commit b840047e1936d4a514c356fd2f38c42c763572df Author: Jason Staack Date: Sun Mar 8 17:46:37 2026 -0500 feat: The Other Dude v9.0.1 — full-featured email system ci: add GitHub Pages deployment workflow for docs site Co-Authored-By: Claude Opus 4.6 diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..915179a --- /dev/null +++ b/.env.example @@ -0,0 +1,52 @@ +# .env.example -- Copy to .env for development, .env.prod for production +# DO NOT commit .env or .env.prod to git + +# Environment (dev | staging | production) +ENVIRONMENT=dev +LOG_LEVEL=debug +DEBUG=true + +# Database +POSTGRES_DB=mikrotik +POSTGRES_USER=postgres +POSTGRES_PASSWORD=CHANGE_ME_IN_PRODUCTION +DATABASE_URL=postgresql+asyncpg://postgres:CHANGE_ME_IN_PRODUCTION@postgres:5432/mikrotik +SYNC_DATABASE_URL=postgresql+psycopg2://postgres:CHANGE_ME_IN_PRODUCTION@postgres:5432/mikrotik +APP_USER_DATABASE_URL=postgresql+asyncpg://app_user:CHANGE_ME_IN_PRODUCTION@postgres:5432/mikrotik + +# Poller database (different role, no RLS) +POLLER_DATABASE_URL=postgres://poller_user:poller_password@postgres:5432/mikrotik + +# Redis +REDIS_URL=redis://redis:6379/0 + +# NATS +NATS_URL=nats://nats:4222 + +# Security +JWT_SECRET_KEY=CHANGE_ME_IN_PRODUCTION +CREDENTIAL_ENCRYPTION_KEY=CHANGE_ME_IN_PRODUCTION + +# First admin bootstrap (dev only) +FIRST_ADMIN_EMAIL=admin@mikrotik-portal.dev +FIRST_ADMIN_PASSWORD=changeme-in-production + +# CORS (comma-separated origins) +# Dev: localhost ports for Vite/React dev server +# Prod: set to your actual domain, e.g., https://mikrotik.yourdomain.com +CORS_ORIGINS=http://localhost:3000,http://localhost:5173,http://localhost:8080 + +# Git store path +GIT_STORE_PATH=/data/git-store + +# Firmware +FIRMWARE_CACHE_DIR=/data/firmware-cache + +# SMTP (system emails like password reset) +# For dev: run `docker compose --profile mail-testing up -d` for Mailpit UI at http://localhost:8025 +SMTP_HOST=mailpit +SMTP_PORT=1025 +SMTP_USER= +SMTP_PASSWORD= +SMTP_USE_TLS=false +SMTP_FROM_ADDRESS=noreply@example.com diff --git a/.env.staging.example b/.env.staging.example new file mode 100644 index 0000000..c79573c --- /dev/null +++ b/.env.staging.example @@ -0,0 +1,43 @@ +# .env.staging -- Copy to .env.staging and fill in values +# DO NOT commit this file to git + +ENVIRONMENT=staging +LOG_LEVEL=info +DEBUG=false + +# Database +POSTGRES_DB=mikrotik +POSTGRES_USER=postgres +POSTGRES_PASSWORD=CHANGE_ME_STAGING + +DATABASE_URL=postgresql+asyncpg://postgres:CHANGE_ME_STAGING@postgres:5432/mikrotik +SYNC_DATABASE_URL=postgresql+psycopg2://postgres:CHANGE_ME_STAGING@postgres:5432/mikrotik +APP_USER_DATABASE_URL=postgresql+asyncpg://app_user:CHANGE_ME_STAGING@postgres:5432/mikrotik + +# Poller database (different role, no RLS) +POLLER_DATABASE_URL=postgres://poller_user:poller_password@postgres:5432/mikrotik + +# Redis +REDIS_URL=redis://redis:6379/0 + +# NATS +NATS_URL=nats://nats:4222 + +# Security -- generate unique values for staging +# JWT: python3 -c "import secrets; print(secrets.token_urlsafe(64))" +# Fernet: python3 -c "import secrets, base64; print(base64.b64encode(secrets.token_bytes(32)).decode())" +JWT_SECRET_KEY=CHANGE_ME_STAGING +CREDENTIAL_ENCRYPTION_KEY=CHANGE_ME_STAGING + +# First admin bootstrap +FIRST_ADMIN_EMAIL=admin@mikrotik-portal.staging +FIRST_ADMIN_PASSWORD=CHANGE_ME_STAGING + +# CORS (staging URL) +CORS_ORIGINS=http://localhost:3080 + +# Git store path +GIT_STORE_PATH=/data/git-store + +# Firmware +FIRMWARE_CACHE_DIR=/data/firmware-cache diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..3e2a7b0 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,267 @@ +name: CI + +on: + push: + branches: [main, master] + pull_request: + branches: [main, master] + +# Cancel in-progress runs for the same branch/PR to save runner minutes. +concurrency: + group: ci-${{ github.ref }} + cancel-in-progress: true + +jobs: + # --------------------------------------------------------------------------- + # LINT — parallel linting for all three services + # --------------------------------------------------------------------------- + python-lint: + name: Lint Python (Ruff) + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install Ruff + run: pip install ruff + + - name: Ruff check + run: ruff check backend/ + + - name: Ruff format check + run: ruff format --check backend/ + + go-lint: + name: Lint Go (golangci-lint) + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-go@v5 + with: + go-version: "1.24" + + - name: golangci-lint + uses: golangci/golangci-lint-action@v6 + with: + working-directory: poller + + frontend-lint: + name: Lint Frontend (ESLint + tsc) + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-node@v4 + with: + node-version: "18" + cache: "npm" + cache-dependency-path: frontend/package-lock.json + + - name: Install dependencies + working-directory: frontend + run: npm ci + + - name: ESLint + working-directory: frontend + run: npx eslint . + + - name: TypeScript type check + working-directory: frontend + run: npx tsc --noEmit + + # --------------------------------------------------------------------------- + # TEST — parallel test suites for all three services + # --------------------------------------------------------------------------- + backend-test: + name: Test Backend (pytest) + runs-on: ubuntu-latest + + services: + postgres: + image: timescale/timescaledb:latest-pg17 + env: + POSTGRES_DB: mikrotik_test + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + ports: + - 5432:5432 + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + redis: + image: redis:7-alpine + ports: + - 6379:6379 + options: >- + --health-cmd "redis-cli ping" + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + nats: + image: nats:2-alpine + ports: + - 4222:4222 + options: >- + --health-cmd "true" + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + env: + ENVIRONMENT: dev + DATABASE_URL: "postgresql+asyncpg://postgres:postgres@localhost:5432/mikrotik_test" + SYNC_DATABASE_URL: "postgresql+psycopg2://postgres:postgres@localhost:5432/mikrotik_test" + APP_USER_DATABASE_URL: "postgresql+asyncpg://app_user:app_password@localhost:5432/mikrotik_test" + TEST_DATABASE_URL: "postgresql+asyncpg://postgres:postgres@localhost:5432/mikrotik_test" + TEST_APP_USER_DATABASE_URL: "postgresql+asyncpg://app_user:app_password@localhost:5432/mikrotik_test" + CREDENTIAL_ENCRYPTION_KEY: "LLLjnfBZTSycvL2U07HDSxUeTtLxb9cZzryQl0R9E4w=" + JWT_SECRET_KEY: "change-this-in-production-use-a-long-random-string" + REDIS_URL: "redis://localhost:6379/0" + NATS_URL: "nats://localhost:4222" + + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: pip-${{ hashFiles('backend/pyproject.toml') }} + restore-keys: pip- + + - name: Install backend dependencies + working-directory: backend + run: pip install -e ".[dev]" + + - name: Set up test database roles + env: + PGPASSWORD: postgres + run: | + # Create app_user role for RLS-enforced connections + psql -h localhost -U postgres -d mikrotik_test -c " + CREATE ROLE app_user WITH LOGIN PASSWORD 'app_password' NOSUPERUSER NOCREATEDB NOCREATEROLE; + GRANT CONNECT ON DATABASE mikrotik_test TO app_user; + GRANT USAGE ON SCHEMA public TO app_user; + ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO app_user; + ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO app_user; + " || true + + # Create poller_user role + psql -h localhost -U postgres -d mikrotik_test -c " + DO \$\$ + BEGIN + IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'poller_user') THEN + CREATE ROLE poller_user WITH LOGIN PASSWORD 'poller_password' NOSUPERUSER NOCREATEDB NOCREATEROLE; + END IF; + END + \$\$; + GRANT CONNECT ON DATABASE mikrotik_test TO poller_user; + GRANT USAGE ON SCHEMA public TO poller_user; + " || true + + - name: Run backend tests + working-directory: backend + run: python -m pytest tests/ -x -v --tb=short + + poller-test: + name: Test Go Poller + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-go@v5 + with: + go-version: "1.24" + + - uses: actions/cache@v4 + with: + path: ~/go/pkg/mod + key: go-${{ hashFiles('poller/go.sum') }} + restore-keys: go- + + - name: Run poller tests + working-directory: poller + run: go test ./... -v -count=1 + + frontend-test: + name: Test Frontend (Vitest) + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-node@v4 + with: + node-version: "18" + cache: "npm" + cache-dependency-path: frontend/package-lock.json + + - name: Install dependencies + working-directory: frontend + run: npm ci + + - name: Run frontend tests + working-directory: frontend + run: npx vitest run + + # --------------------------------------------------------------------------- + # BUILD — sequential Docker builds + Trivy scans (depends on lint + test) + # --------------------------------------------------------------------------- + build: + name: Build & Scan Docker Images + runs-on: ubuntu-latest + needs: [python-lint, go-lint, frontend-lint, backend-test, poller-test, frontend-test] + + steps: + - uses: actions/checkout@v4 + + # Build and scan each image SEQUENTIALLY to avoid OOM. + # Each multi-stage build (Go, Python/pip, Node/tsc) can peak at 1-2 GB. + # Running them in parallel would exceed typical runner memory. + + - name: Build API image + run: docker build -f infrastructure/docker/Dockerfile.api -t mikrotik-api:ci . + + - name: Scan API image + uses: aquasecurity/trivy-action@0.33.1 + with: + image-ref: "mikrotik-api:ci" + format: "table" + exit-code: "1" + severity: "HIGH,CRITICAL" + trivyignores: ".trivyignore" + + - name: Build Poller image + run: docker build -f poller/Dockerfile -t mikrotik-poller:ci ./poller + + - name: Scan Poller image + uses: aquasecurity/trivy-action@0.33.1 + with: + image-ref: "mikrotik-poller:ci" + format: "table" + exit-code: "1" + severity: "HIGH,CRITICAL" + trivyignores: ".trivyignore" + + - name: Build Frontend image + run: docker build -f infrastructure/docker/Dockerfile.frontend -t mikrotik-frontend:ci . + + - name: Scan Frontend image + uses: aquasecurity/trivy-action@0.33.1 + with: + image-ref: "mikrotik-frontend:ci" + format: "table" + exit-code: "1" + severity: "HIGH,CRITICAL" + trivyignores: ".trivyignore" diff --git a/.github/workflows/pages.yml b/.github/workflows/pages.yml new file mode 100644 index 0000000..277326a --- /dev/null +++ b/.github/workflows/pages.yml @@ -0,0 +1,39 @@ +name: Deploy Docs to GitHub Pages + +on: + push: + branches: [main] + paths: + - "docs/website/**" + workflow_dispatch: + +permissions: + contents: read + pages: write + id-token: write + +concurrency: + group: pages + cancel-in-progress: false + +jobs: + deploy: + name: Deploy to GitHub Pages + runs-on: ubuntu-latest + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + steps: + - uses: actions/checkout@v4 + + - name: Setup Pages + uses: actions/configure-pages@v5 + + - name: Upload artifact + uses: actions/upload-pages-artifact@v3 + with: + path: docs/website + + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 diff --git a/.github/workflows/security-scan.yml b/.github/workflows/security-scan.yml new file mode 100644 index 0000000..54282de --- /dev/null +++ b/.github/workflows/security-scan.yml @@ -0,0 +1,56 @@ +name: Container Security Scan + +on: + push: + branches: [main, master] + pull_request: + branches: [main, master] + +jobs: + trivy-scan: + name: Trivy Container Scan + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + # Build and scan each container image sequentially to avoid OOM. + # Scans are BLOCKING (exit-code: 1) — HIGH/CRITICAL CVEs fail the pipeline. + # Add base-image CVEs to .trivyignore with justification if needed. + + - name: Build API image + run: docker build -f infrastructure/docker/Dockerfile.api -t mikrotik-api:scan . + + - name: Scan API image + uses: aquasecurity/trivy-action@0.33.1 + with: + image-ref: "mikrotik-api:scan" + format: "table" + exit-code: "1" + severity: "HIGH,CRITICAL" + trivyignores: ".trivyignore" + + - name: Build Poller image + run: docker build -f poller/Dockerfile -t mikrotik-poller:scan ./poller + + - name: Scan Poller image + uses: aquasecurity/trivy-action@0.33.1 + with: + image-ref: "mikrotik-poller:scan" + format: "table" + exit-code: "1" + severity: "HIGH,CRITICAL" + trivyignores: ".trivyignore" + + - name: Build Frontend image + run: docker build -f infrastructure/docker/Dockerfile.frontend -t mikrotik-frontend:scan . + + - name: Scan Frontend image + uses: aquasecurity/trivy-action@0.33.1 + with: + image-ref: "mikrotik-frontend:scan" + format: "table" + exit-code: "1" + severity: "HIGH,CRITICAL" + trivyignores: ".trivyignore" diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..3f4dc20 --- /dev/null +++ b/.gitignore @@ -0,0 +1,40 @@ +# Environment files with secrets +.env +.env.prod +.env.local +.env.*.local + +# Docker data +docker-data/ + +# Python +__pycache__/ +*.pyc +*.pyo +.pytest_cache/ +.coverage +htmlcov/ + +# Node +node_modules/ + +# IDE +.idea/ +.vscode/ +*.swp +*.swo + +# Build caches +.go-cache/ +.npm-cache/ +.tmp/ + +# Git worktrees +.worktrees/ + +# OS +.DS_Store +Thumbs.db + +# Playwright MCP logs +.playwright-mcp/ diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..cb28c8c --- /dev/null +++ b/LICENSE @@ -0,0 +1,53 @@ +Business Source License 1.1 + +Parameters + +Licensor: The Other Dude +Licensed Work: The Other Dude v9.0.0 + The Licensed Work is (c) 2026 The Other Dude +Additional Use Grant: You may use the Licensed Work for non-production, + personal, educational, and evaluation purposes. +Change Date: March 8, 2030 +Change License: Apache License, Version 2.0 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN "AS IS" BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..3aa7030 --- /dev/null +++ b/README.md @@ -0,0 +1,132 @@ +# The Other Dude + +**Self-hosted MikroTik fleet management for MSPs.** + +TOD is a multi-tenant platform for managing RouterOS devices at scale. It replaces +the chaos of juggling WinBox sessions and SSH terminals across hundreds of routers +with a single, centralized web interface -- fleet visibility, configuration management, +real-time monitoring, and zero-knowledge security, all self-hosted on your infrastructure. + +--- + +## Key Features + +- **Fleet Management** -- Dashboard with device health, uptime sparklines, virtual-scrolled fleet table, geographic map, and subnet discovery. +- **Configuration Push with Panic-Revert** -- Two-phase config deployment ensures you never brick a remote device. Batch config, templates, and git-backed version history with one-click restore. +- **Real-Time Monitoring** -- Live CPU, memory, disk, and interface traffic via Server-Sent Events backed by NATS JetStream. Configurable alert rules with email, webhook, and Slack notifications. +- **Zero-Knowledge Security** -- 1Password-style architecture. SRP-6a authentication (server never sees your password), per-tenant envelope encryption via Transit KMS, Emergency Kit export. +- **Multi-Tenant with PostgreSQL RLS** -- Full organization isolation enforced at the database layer. Four roles: super_admin, admin, operator, viewer. +- **Internal Certificate Authority** -- Issue and deploy TLS certificates to RouterOS devices via SFTP. Three-tier TLS fallback for maximum compatibility. +- **WireGuard VPN Onboarding** -- Create device + VPN peer in one transaction. Generates ready-to-paste RouterOS commands for devices behind NAT. +- **PDF Reports** -- Fleet summary, device detail, security audit, and performance reports generated server-side. +- **Command Palette UX** -- Cmd+K quick navigation, keyboard shortcuts, dark/light mode, smooth page transitions, and skeleton loaders throughout. + +--- + +## Architecture + +``` + +----------------+ + | Frontend | + | React / Vite | + +-------+--------+ + | + /api/ proxy + | + +-------v--------+ + | Backend | + | FastAPI | + +--+----+-----+--+ + | | | + +-------------+ | +--------------+ + | | | + +------v-------+ +------v------+ +----------v----------+ + | PostgreSQL | | Redis | | NATS | + | TimescaleDB | | (locks, | | JetStream | + | (RLS) | | caching) | | (pub/sub) | + +------^-------+ +------^------+ +----------^----------+ + | | | + +------+------------------+--------------------+------+ + | Go Poller | + | RouterOS binary API (port 8729 TLS) | + +---------------------------+-------------------------+ + | + +----------v-----------+ + | RouterOS Fleet | + | (your devices) | + +----------------------+ +``` + +The **Go poller** communicates with RouterOS devices using the binary API over TLS, +publishing metrics to NATS and persisting to PostgreSQL with TimescaleDB hypertables. +The **FastAPI backend** enforces tenant isolation via Row-Level Security and streams +real-time events to the **React frontend** over SSE. **OpenBao** provides Transit +secret engine for per-tenant envelope encryption. + +--- + +## Tech Stack + +| Layer | Technology | +|-------|------------| +| Frontend | React 19, TanStack Router + Query, Tailwind CSS, Vite | +| Backend | Python 3.12, FastAPI, SQLAlchemy 2.0 async, asyncpg | +| Poller | Go 1.24, go-routeros/v3, pgx/v5, nats.go | +| Database | PostgreSQL 17 + TimescaleDB, Row-Level Security | +| Cache / Locks | Redis 7 | +| Message Bus | NATS with JetStream | +| KMS | OpenBao (Transit secret engine) | +| VPN | WireGuard | +| Auth | SRP-6a (zero-knowledge), JWT | +| Reports | Jinja2 + WeasyPrint | + +--- + +## Quick Start + +```bash +# Clone and configure +git clone https://github.com/your-org/tod.git && cd tod +cp .env.example .env +# Edit .env -- set CREDENTIAL_ENCRYPTION_KEY and JWT_SECRET_KEY at minimum + +# Build images sequentially (avoids OOM on low-RAM machines) +docker compose --profile full build api +docker compose --profile full build poller +docker compose --profile full build frontend + +# Start the full stack +docker compose --profile full up -d + +# Open the UI +open http://localhost:3000 +``` + +On first launch, the setup wizard walks you through creating a super admin account, +enrolling your Secret Key, adding your first organization, and onboarding your first device. + +--- + +## Documentation + +Full documentation is available at [theotherdude.net](https://theotherdude.net). + +See the documentation site for screenshots and feature walkthroughs. + +--- + +## License + +[Business Source License 1.1](LICENSE) + +Free for personal and educational use. Commercial use (managing devices for paying +customers or as part of a paid service) requires a commercial license. See the +LICENSE file for full terms. + +--- + +## The Name + +"The Other Dude" -- because every MSP needs one. When the network is down at 2 AM +and someone has to fix it, TOD is the other dude on the job. The Big Lebowski inspired, +the rug really ties the room together. diff --git a/backend/.gitignore b/backend/.gitignore new file mode 100644 index 0000000..b0edf8c --- /dev/null +++ b/backend/.gitignore @@ -0,0 +1,26 @@ +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +.venv/ +venv/ +env/ +.env +*.egg-info/ +dist/ +build/ + +# IDE +.vscode/ +.idea/ +*.swp + +# Testing +.pytest_cache/ +.coverage +htmlcov/ + +# Logs +*.log diff --git a/backend/alembic.ini b/backend/alembic.ini new file mode 100644 index 0000000..f794886 --- /dev/null +++ b/backend/alembic.ini @@ -0,0 +1,114 @@ +# A generic, single database configuration. + +[alembic] +# path to migration scripts +# Use forward slashes (/) also on windows to provide os agnostic paths +script_location = alembic + +# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s +# Uncomment the line below if you want the files to be prepended with date and time +# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s + +# sys.path path, will be prepended to sys.path if present. +# defaults to the current working directory. +prepend_sys_path = . + +# timezone to use when rendering the date within the migration file +# as well as the filename. +# If specified, requires the python>=3.9 or backports.zoneinfo library. +# Any required deps can installed by adding `alembic[tz]` to the pip requirements +# timezone = + +# max length of characters to apply to the +# "slug" field +# truncate_slug_length = 40 + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + +# set to 'true' to allow .pyc and .pyo files without +# a source .py file to be detected as revisions in the +# versions/ directory +# sourceless = false + +# version location specification; This defaults +# to alembic/versions. When using multiple version +# directories, initial revisions must be specified with --version-path. +# The path separator used here should be the character specified by +# "version_path_separator" below. +# version_locations = %(here)s/bar:%(here)s/bat:alembic/versions + +# version path separator; As mentioned above, this is the character used to split +# version_locations. The default within new alembic.ini files is "os", which uses +# os.pathsep. Note that this may cause alembic to miss version files if the separator +# character is actually part of the version file path. +# version_path_separator = os # Use os.pathsep. Default configuration used for new projects. +# version_path_separator = ; # Windows +# version_path_separator = : # Unix +version_path_separator = space # No separator in paths. + +# set to 'true' to search source files recursively +# in each "version_locations" directory +# New in Alembic version 1.10 +# recursive_version_locations = false + +# the output encoding used when revision files +# are written from script.mako +# output_encoding = utf-8 + +sqlalchemy.url = postgresql+asyncpg://postgres:postgres@localhost:5432/mikrotik + + +[post_write_hooks] +# post_write_hooks defines scripts or Python functions that are run +# on newly generated revision scripts. See the documentation for further +# detail and examples + +# format using "black" - use the console_scripts runner, +# if available. +# hooks = black +# black.type = console_scripts +# black.entrypoint = black +# black.options = -l 79 REVISION_SCRIPT_FILENAME + +# lint with attempts to fix using "ruff" - use the exec runner, execute a binary +# hooks = ruff +# ruff.type = exec +# ruff.executable = %(here)s/.venv/bin/ruff +# ruff.options = --fix REVISION_SCRIPT_FILENAME + +# Logging configuration +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARN +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/backend/alembic/env.py b/backend/alembic/env.py new file mode 100644 index 0000000..3915537 --- /dev/null +++ b/backend/alembic/env.py @@ -0,0 +1,78 @@ +"""Alembic environment configuration for async SQLAlchemy with PostgreSQL.""" + +import asyncio +import os +from logging.config import fileConfig + +from alembic import context +from sqlalchemy import pool +from sqlalchemy.engine import Connection +from sqlalchemy.ext.asyncio import async_engine_from_config + +# Import all models to register them with Base.metadata +from app.database import Base +import app.models.tenant # noqa: F401 +import app.models.user # noqa: F401 +import app.models.device # noqa: F401 +import app.models.config_backup # noqa: F401 + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Override sqlalchemy.url from DATABASE_URL env var if set (for Docker) +if os.environ.get("DATABASE_URL"): + config.set_main_option("sqlalchemy.url", os.environ["DATABASE_URL"]) + +# Interpret the config file for Python logging. +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# Add your model's MetaData object here for 'autogenerate' support +target_metadata = Base.metadata + + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode.""" + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + ) + + with context.begin_transaction(): + context.run_migrations() + + +def do_run_migrations(connection: Connection) -> None: + context.configure(connection=connection, target_metadata=target_metadata) + + with context.begin_transaction(): + context.run_migrations() + + +async def run_async_migrations() -> None: + """Run migrations in 'online' mode with async engine.""" + connectable = async_engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + async with connectable.connect() as connection: + await connection.run_sync(do_run_migrations) + + await connectable.dispose() + + +def run_migrations_online() -> None: + """Run migrations in 'online' mode.""" + asyncio.run(run_async_migrations()) + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/backend/alembic/script.py.mako b/backend/alembic/script.py.mako new file mode 100644 index 0000000..fbc4b07 --- /dev/null +++ b/backend/alembic/script.py.mako @@ -0,0 +1,26 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision: str = ${repr(up_revision)} +down_revision: Union[str, None] = ${repr(down_revision)} +branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)} +depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)} + + +def upgrade() -> None: + ${upgrades if upgrades else "pass"} + + +def downgrade() -> None: + ${downgrades if downgrades else "pass"} diff --git a/backend/alembic/versions/001_initial_schema.py b/backend/alembic/versions/001_initial_schema.py new file mode 100644 index 0000000..ff1cfe3 --- /dev/null +++ b/backend/alembic/versions/001_initial_schema.py @@ -0,0 +1,376 @@ +"""Initial schema with RLS policies for multi-tenant isolation. + +Revision ID: 001 +Revises: None +Create Date: 2026-02-24 + +This migration creates: +1. All database tables (tenants, users, devices, device_groups, device_tags, + device_group_memberships, device_tag_assignments) +2. Composite unique indexes for tenant-scoped uniqueness +3. Row Level Security (RLS) on all tenant-scoped tables +4. RLS policies using app.current_tenant PostgreSQL setting +5. The app_user role with appropriate grants (cannot bypass RLS) +""" + +from typing import Sequence, Union + +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision: str = "001" +down_revision: Union[str, None] = None +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # ========================================================================= + # TENANTS TABLE + # ========================================================================= + op.create_table( + "tenants", + sa.Column( + "id", + postgresql.UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + nullable=False, + ), + sa.Column("name", sa.String(255), nullable=False), + sa.Column("description", sa.Text, nullable=True), + sa.Column( + "created_at", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "updated_at", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.PrimaryKeyConstraint("id"), + sa.UniqueConstraint("name"), + ) + op.create_index("ix_tenants_name", "tenants", ["name"], unique=True) + + # ========================================================================= + # USERS TABLE + # ========================================================================= + op.create_table( + "users", + sa.Column( + "id", + postgresql.UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + nullable=False, + ), + sa.Column("email", sa.String(255), nullable=False), + sa.Column("hashed_password", sa.String(255), nullable=False), + sa.Column("name", sa.String(255), nullable=False), + sa.Column("role", sa.String(50), nullable=False, server_default="viewer"), + sa.Column("tenant_id", postgresql.UUID(as_uuid=True), nullable=True), + sa.Column("is_active", sa.Boolean, nullable=False, server_default="true"), + sa.Column("last_login", sa.DateTime(timezone=True), nullable=True), + sa.Column( + "created_at", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "updated_at", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.PrimaryKeyConstraint("id"), + sa.UniqueConstraint("email"), + sa.ForeignKeyConstraint(["tenant_id"], ["tenants.id"], ondelete="CASCADE"), + ) + op.create_index("ix_users_email", "users", ["email"], unique=True) + op.create_index("ix_users_tenant_id", "users", ["tenant_id"]) + + # ========================================================================= + # DEVICES TABLE + # ========================================================================= + op.create_table( + "devices", + sa.Column( + "id", + postgresql.UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + nullable=False, + ), + sa.Column("tenant_id", postgresql.UUID(as_uuid=True), nullable=False), + sa.Column("hostname", sa.String(255), nullable=False), + sa.Column("ip_address", sa.String(45), nullable=False), + sa.Column("api_port", sa.Integer, nullable=False, server_default="8728"), + sa.Column("api_ssl_port", sa.Integer, nullable=False, server_default="8729"), + sa.Column("model", sa.String(255), nullable=True), + sa.Column("serial_number", sa.String(255), nullable=True), + sa.Column("firmware_version", sa.String(100), nullable=True), + sa.Column("routeros_version", sa.String(100), nullable=True), + sa.Column("uptime_seconds", sa.Integer, nullable=True), + sa.Column("last_seen", sa.DateTime(timezone=True), nullable=True), + sa.Column("encrypted_credentials", sa.LargeBinary, nullable=True), + sa.Column("status", sa.String(20), nullable=False, server_default="unknown"), + sa.Column( + "created_at", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "updated_at", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.PrimaryKeyConstraint("id"), + sa.ForeignKeyConstraint(["tenant_id"], ["tenants.id"], ondelete="CASCADE"), + sa.UniqueConstraint("tenant_id", "hostname", name="uq_devices_tenant_hostname"), + ) + op.create_index("ix_devices_tenant_id", "devices", ["tenant_id"]) + + # ========================================================================= + # DEVICE GROUPS TABLE + # ========================================================================= + op.create_table( + "device_groups", + sa.Column( + "id", + postgresql.UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + nullable=False, + ), + sa.Column("tenant_id", postgresql.UUID(as_uuid=True), nullable=False), + sa.Column("name", sa.String(255), nullable=False), + sa.Column("description", sa.Text, nullable=True), + sa.Column( + "created_at", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.PrimaryKeyConstraint("id"), + sa.ForeignKeyConstraint(["tenant_id"], ["tenants.id"], ondelete="CASCADE"), + sa.UniqueConstraint("tenant_id", "name", name="uq_device_groups_tenant_name"), + ) + op.create_index("ix_device_groups_tenant_id", "device_groups", ["tenant_id"]) + + # ========================================================================= + # DEVICE TAGS TABLE + # ========================================================================= + op.create_table( + "device_tags", + sa.Column( + "id", + postgresql.UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + nullable=False, + ), + sa.Column("tenant_id", postgresql.UUID(as_uuid=True), nullable=False), + sa.Column("name", sa.String(100), nullable=False), + sa.Column("color", sa.String(7), nullable=True), + sa.PrimaryKeyConstraint("id"), + sa.ForeignKeyConstraint(["tenant_id"], ["tenants.id"], ondelete="CASCADE"), + sa.UniqueConstraint("tenant_id", "name", name="uq_device_tags_tenant_name"), + ) + op.create_index("ix_device_tags_tenant_id", "device_tags", ["tenant_id"]) + + # ========================================================================= + # DEVICE GROUP MEMBERSHIPS TABLE + # ========================================================================= + op.create_table( + "device_group_memberships", + sa.Column("device_id", postgresql.UUID(as_uuid=True), nullable=False), + sa.Column("group_id", postgresql.UUID(as_uuid=True), nullable=False), + sa.PrimaryKeyConstraint("device_id", "group_id"), + sa.ForeignKeyConstraint(["device_id"], ["devices.id"], ondelete="CASCADE"), + sa.ForeignKeyConstraint(["group_id"], ["device_groups.id"], ondelete="CASCADE"), + ) + + # ========================================================================= + # DEVICE TAG ASSIGNMENTS TABLE + # ========================================================================= + op.create_table( + "device_tag_assignments", + sa.Column("device_id", postgresql.UUID(as_uuid=True), nullable=False), + sa.Column("tag_id", postgresql.UUID(as_uuid=True), nullable=False), + sa.PrimaryKeyConstraint("device_id", "tag_id"), + sa.ForeignKeyConstraint(["device_id"], ["devices.id"], ondelete="CASCADE"), + sa.ForeignKeyConstraint(["tag_id"], ["device_tags.id"], ondelete="CASCADE"), + ) + + # ========================================================================= + # ROW LEVEL SECURITY (RLS) + # ========================================================================= + # RLS is the core tenant isolation mechanism. The app_user role CANNOT + # bypass RLS (only superusers can). All queries through app_user will + # be filtered by the current_setting('app.current_tenant') value which + # is set per-request by the tenant_context middleware. + + conn = op.get_bind() + + # --- TENANTS RLS --- + # Super admin sees all; tenant users see only their tenant + conn.execute(sa.text("ALTER TABLE tenants ENABLE ROW LEVEL SECURITY")) + conn.execute(sa.text("ALTER TABLE tenants FORCE ROW LEVEL SECURITY")) + conn.execute(sa.text(""" + CREATE POLICY tenant_isolation ON tenants + USING ( + id::text = current_setting('app.current_tenant', true) + OR current_setting('app.current_tenant', true) = 'super_admin' + ) + WITH CHECK ( + id::text = current_setting('app.current_tenant', true) + OR current_setting('app.current_tenant', true) = 'super_admin' + ) + """)) + + # --- USERS RLS --- + # Users see only other users in their tenant; super_admin sees all + conn.execute(sa.text("ALTER TABLE users ENABLE ROW LEVEL SECURITY")) + conn.execute(sa.text("ALTER TABLE users FORCE ROW LEVEL SECURITY")) + conn.execute(sa.text(""" + CREATE POLICY tenant_isolation ON users + USING ( + tenant_id::text = current_setting('app.current_tenant', true) + OR current_setting('app.current_tenant', true) = 'super_admin' + ) + WITH CHECK ( + tenant_id::text = current_setting('app.current_tenant', true) + OR current_setting('app.current_tenant', true) = 'super_admin' + ) + """)) + + # --- DEVICES RLS --- + conn.execute(sa.text("ALTER TABLE devices ENABLE ROW LEVEL SECURITY")) + conn.execute(sa.text("ALTER TABLE devices FORCE ROW LEVEL SECURITY")) + conn.execute(sa.text(""" + CREATE POLICY tenant_isolation ON devices + USING (tenant_id::text = current_setting('app.current_tenant', true)) + WITH CHECK (tenant_id::text = current_setting('app.current_tenant', true)) + """)) + + # --- DEVICE GROUPS RLS --- + conn.execute(sa.text("ALTER TABLE device_groups ENABLE ROW LEVEL SECURITY")) + conn.execute(sa.text("ALTER TABLE device_groups FORCE ROW LEVEL SECURITY")) + conn.execute(sa.text(""" + CREATE POLICY tenant_isolation ON device_groups + USING (tenant_id::text = current_setting('app.current_tenant', true)) + WITH CHECK (tenant_id::text = current_setting('app.current_tenant', true)) + """)) + + # --- DEVICE TAGS RLS --- + conn.execute(sa.text("ALTER TABLE device_tags ENABLE ROW LEVEL SECURITY")) + conn.execute(sa.text("ALTER TABLE device_tags FORCE ROW LEVEL SECURITY")) + conn.execute(sa.text(""" + CREATE POLICY tenant_isolation ON device_tags + USING (tenant_id::text = current_setting('app.current_tenant', true)) + WITH CHECK (tenant_id::text = current_setting('app.current_tenant', true)) + """)) + + # --- DEVICE GROUP MEMBERSHIPS RLS --- + # These are filtered by joining through devices/groups (which already have RLS) + # But we also add direct RLS via a join to the devices table + conn.execute(sa.text("ALTER TABLE device_group_memberships ENABLE ROW LEVEL SECURITY")) + conn.execute(sa.text("ALTER TABLE device_group_memberships FORCE ROW LEVEL SECURITY")) + conn.execute(sa.text(""" + CREATE POLICY tenant_isolation ON device_group_memberships + USING ( + EXISTS ( + SELECT 1 FROM devices d + WHERE d.id = device_id + AND d.tenant_id::text = current_setting('app.current_tenant', true) + ) + ) + WITH CHECK ( + EXISTS ( + SELECT 1 FROM devices d + WHERE d.id = device_id + AND d.tenant_id::text = current_setting('app.current_tenant', true) + ) + ) + """)) + + # --- DEVICE TAG ASSIGNMENTS RLS --- + conn.execute(sa.text("ALTER TABLE device_tag_assignments ENABLE ROW LEVEL SECURITY")) + conn.execute(sa.text("ALTER TABLE device_tag_assignments FORCE ROW LEVEL SECURITY")) + conn.execute(sa.text(""" + CREATE POLICY tenant_isolation ON device_tag_assignments + USING ( + EXISTS ( + SELECT 1 FROM devices d + WHERE d.id = device_id + AND d.tenant_id::text = current_setting('app.current_tenant', true) + ) + ) + WITH CHECK ( + EXISTS ( + SELECT 1 FROM devices d + WHERE d.id = device_id + AND d.tenant_id::text = current_setting('app.current_tenant', true) + ) + ) + """)) + + # ========================================================================= + # GRANT PERMISSIONS TO app_user (RLS-enforcing application role) + # ========================================================================= + # app_user is a non-superuser role — it CANNOT bypass RLS policies. + # All API queries use this role to ensure tenant isolation. + + tables = [ + "tenants", + "users", + "devices", + "device_groups", + "device_tags", + "device_group_memberships", + "device_tag_assignments", + ] + + for table in tables: + conn.execute(sa.text( + f"GRANT SELECT, INSERT, UPDATE, DELETE ON {table} TO app_user" + )) + + # Grant sequence usage for UUID generation (gen_random_uuid is built-in, but just in case) + conn.execute(sa.text("GRANT USAGE ON SCHEMA public TO app_user")) + + # Allow app_user to set the tenant context variable + conn.execute(sa.text("GRANT SET ON PARAMETER app.current_tenant TO app_user")) + + +def downgrade() -> None: + conn = op.get_bind() + + # Revoke grants + tables = [ + "tenants", + "users", + "devices", + "device_groups", + "device_tags", + "device_group_memberships", + "device_tag_assignments", + ] + for table in tables: + try: + conn.execute(sa.text(f"REVOKE ALL ON {table} FROM app_user")) + except Exception: + pass + + # Drop tables (in reverse dependency order) + op.drop_table("device_tag_assignments") + op.drop_table("device_group_memberships") + op.drop_table("device_tags") + op.drop_table("device_groups") + op.drop_table("devices") + op.drop_table("users") + op.drop_table("tenants") diff --git a/backend/alembic/versions/002_add_routeros_major_version_and_poller_role.py b/backend/alembic/versions/002_add_routeros_major_version_and_poller_role.py new file mode 100644 index 0000000..d50c021 --- /dev/null +++ b/backend/alembic/versions/002_add_routeros_major_version_and_poller_role.py @@ -0,0 +1,92 @@ +"""Add routeros_major_version column and poller_user PostgreSQL role. + +Revision ID: 002 +Revises: 001 +Create Date: 2026-02-24 + +This migration: +1. Adds routeros_major_version INTEGER column to devices table (nullable). + Stores the detected major version (6 or 7) as populated by the Go poller. +2. Creates the poller_user PostgreSQL role with SELECT-only access to the + devices table. The poller_user bypasses RLS intentionally — it must read + all devices across all tenants to poll them. +""" + +from typing import Sequence, Union + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "002" +down_revision: Union[str, None] = "001" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # ========================================================================= + # ADD routeros_major_version COLUMN + # ========================================================================= + # Stores the detected RouterOS major version (6 or 7) as an INTEGER. + # Populated by the Go poller after a successful connection and + # /system/resource/print query. NULL until the poller has connected at + # least once. + op.add_column( + "devices", + sa.Column("routeros_major_version", sa.Integer(), nullable=True), + ) + + # ========================================================================= + # CREATE poller_user ROLE AND GRANT PERMISSIONS + # ========================================================================= + # The poller_user role is used exclusively by the Go poller service. + # It has SELECT-only access to the devices table and does NOT enforce + # RLS policies (RLS is applied to app_user only). This allows the poller + # to read all devices across all tenants, which is required for polling. + conn = op.get_bind() + + conn.execute(sa.text(""" + DO $$ + BEGIN + IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'poller_user') THEN + CREATE ROLE poller_user WITH LOGIN PASSWORD 'poller_password' BYPASSRLS; + END IF; + END + $$ + """)) + + conn.execute(sa.text("GRANT CONNECT ON DATABASE mikrotik TO poller_user")) + conn.execute(sa.text("GRANT USAGE ON SCHEMA public TO poller_user")) + + # SELECT on devices only — poller needs to read encrypted_credentials + # and other device fields. No INSERT/UPDATE/DELETE needed. + conn.execute(sa.text("GRANT SELECT ON devices TO poller_user")) + + +def downgrade() -> None: + conn = op.get_bind() + + # Revoke grants from poller_user + try: + conn.execute(sa.text("REVOKE SELECT ON devices FROM poller_user")) + except Exception: + pass + + try: + conn.execute(sa.text("REVOKE USAGE ON SCHEMA public FROM poller_user")) + except Exception: + pass + + try: + conn.execute(sa.text("REVOKE CONNECT ON DATABASE mikrotik FROM poller_user")) + except Exception: + pass + + try: + conn.execute(sa.text("DROP ROLE IF EXISTS poller_user")) + except Exception: + pass + + # Drop the column + op.drop_column("devices", "routeros_major_version") diff --git a/backend/alembic/versions/003_metrics_hypertables.py b/backend/alembic/versions/003_metrics_hypertables.py new file mode 100644 index 0000000..9ac6c8d --- /dev/null +++ b/backend/alembic/versions/003_metrics_hypertables.py @@ -0,0 +1,174 @@ +"""Add TimescaleDB hypertables for metrics and denormalized columns on devices. + +Revision ID: 003 +Revises: 002 +Create Date: 2026-02-25 + +This migration: +1. Creates interface_metrics hypertable for per-interface traffic counters. +2. Creates health_metrics hypertable for per-device CPU/memory/disk/temperature. +3. Creates wireless_metrics hypertable for per-interface wireless client stats. +4. Adds last_cpu_load and last_memory_used_pct denormalized columns to devices + for efficient fleet table display without joining hypertables. +5. Applies RLS tenant_isolation policies and appropriate GRANTs on all hypertables. +""" + +from typing import Sequence, Union + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "003" +down_revision: Union[str, None] = "002" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + conn = op.get_bind() + + # ========================================================================= + # CREATE interface_metrics HYPERTABLE + # ========================================================================= + # Stores per-interface byte counters from /interface/print on every poll cycle. + # rx_bps/tx_bps are stored as NULL — computed at query time via LAG() window + # function to avoid delta state in the poller. + conn.execute(sa.text(""" + CREATE TABLE IF NOT EXISTS interface_metrics ( + time TIMESTAMPTZ NOT NULL, + device_id UUID NOT NULL, + tenant_id UUID NOT NULL, + interface TEXT NOT NULL, + rx_bytes BIGINT, + tx_bytes BIGINT, + rx_bps BIGINT, + tx_bps BIGINT + ) + """)) + + conn.execute(sa.text( + "SELECT create_hypertable('interface_metrics', 'time', if_not_exists => TRUE)" + )) + + conn.execute(sa.text( + "CREATE INDEX IF NOT EXISTS idx_interface_metrics_device_time " + "ON interface_metrics (device_id, time DESC)" + )) + + conn.execute(sa.text("ALTER TABLE interface_metrics ENABLE ROW LEVEL SECURITY")) + + conn.execute(sa.text(""" + CREATE POLICY tenant_isolation ON interface_metrics + USING (tenant_id::text = current_setting('app.current_tenant')) + """)) + + conn.execute(sa.text("GRANT SELECT, INSERT ON interface_metrics TO app_user")) + conn.execute(sa.text("GRANT SELECT, INSERT ON interface_metrics TO poller_user")) + + # ========================================================================= + # CREATE health_metrics HYPERTABLE + # ========================================================================= + # Stores per-device system health metrics from /system/resource/print and + # /system/health/print on every poll cycle. + # temperature is nullable — not all RouterOS devices have temperature sensors. + conn.execute(sa.text(""" + CREATE TABLE IF NOT EXISTS health_metrics ( + time TIMESTAMPTZ NOT NULL, + device_id UUID NOT NULL, + tenant_id UUID NOT NULL, + cpu_load SMALLINT, + free_memory BIGINT, + total_memory BIGINT, + free_disk BIGINT, + total_disk BIGINT, + temperature SMALLINT + ) + """)) + + conn.execute(sa.text( + "SELECT create_hypertable('health_metrics', 'time', if_not_exists => TRUE)" + )) + + conn.execute(sa.text( + "CREATE INDEX IF NOT EXISTS idx_health_metrics_device_time " + "ON health_metrics (device_id, time DESC)" + )) + + conn.execute(sa.text("ALTER TABLE health_metrics ENABLE ROW LEVEL SECURITY")) + + conn.execute(sa.text(""" + CREATE POLICY tenant_isolation ON health_metrics + USING (tenant_id::text = current_setting('app.current_tenant')) + """)) + + conn.execute(sa.text("GRANT SELECT, INSERT ON health_metrics TO app_user")) + conn.execute(sa.text("GRANT SELECT, INSERT ON health_metrics TO poller_user")) + + # ========================================================================= + # CREATE wireless_metrics HYPERTABLE + # ========================================================================= + # Stores per-wireless-interface aggregated client stats from + # /interface/wireless/registration-table/print (v6) or + # /interface/wifi/registration-table/print (v7). + # ccq may be 0 on RouterOS v7 (not available in the WiFi API path). + # avg_signal is dBm (negative integer, e.g. -67). + conn.execute(sa.text(""" + CREATE TABLE IF NOT EXISTS wireless_metrics ( + time TIMESTAMPTZ NOT NULL, + device_id UUID NOT NULL, + tenant_id UUID NOT NULL, + interface TEXT NOT NULL, + client_count SMALLINT, + avg_signal SMALLINT, + ccq SMALLINT, + frequency INTEGER + ) + """)) + + conn.execute(sa.text( + "SELECT create_hypertable('wireless_metrics', 'time', if_not_exists => TRUE)" + )) + + conn.execute(sa.text( + "CREATE INDEX IF NOT EXISTS idx_wireless_metrics_device_time " + "ON wireless_metrics (device_id, time DESC)" + )) + + conn.execute(sa.text("ALTER TABLE wireless_metrics ENABLE ROW LEVEL SECURITY")) + + conn.execute(sa.text(""" + CREATE POLICY tenant_isolation ON wireless_metrics + USING (tenant_id::text = current_setting('app.current_tenant')) + """)) + + conn.execute(sa.text("GRANT SELECT, INSERT ON wireless_metrics TO app_user")) + conn.execute(sa.text("GRANT SELECT, INSERT ON wireless_metrics TO poller_user")) + + # ========================================================================= + # ADD DENORMALIZED COLUMNS TO devices TABLE + # ========================================================================= + # These columns are updated by the metrics subscriber alongside each + # health_metrics insert, enabling the fleet table to display CPU and memory + # usage without a JOIN to the hypertable. + op.add_column( + "devices", + sa.Column("last_cpu_load", sa.SmallInteger(), nullable=True), + ) + op.add_column( + "devices", + sa.Column("last_memory_used_pct", sa.SmallInteger(), nullable=True), + ) + + +def downgrade() -> None: + # Remove denormalized columns from devices first + op.drop_column("devices", "last_memory_used_pct") + op.drop_column("devices", "last_cpu_load") + + conn = op.get_bind() + + # Drop hypertables (CASCADE handles indexes, policies, and chunks) + conn.execute(sa.text("DROP TABLE IF EXISTS wireless_metrics CASCADE")) + conn.execute(sa.text("DROP TABLE IF EXISTS health_metrics CASCADE")) + conn.execute(sa.text("DROP TABLE IF EXISTS interface_metrics CASCADE")) diff --git a/backend/alembic/versions/004_config_management.py b/backend/alembic/versions/004_config_management.py new file mode 100644 index 0000000..20032e4 --- /dev/null +++ b/backend/alembic/versions/004_config_management.py @@ -0,0 +1,128 @@ +"""Add config management tables: config_backup_runs, config_backup_schedules, config_push_operations. + +Revision ID: 004 +Revises: 003 +Create Date: 2026-02-25 + +This migration: +1. Creates config_backup_runs table for backup metadata (content lives in git). +2. Creates config_backup_schedules table for per-tenant/per-device schedule config. +3. Creates config_push_operations table for panic-revert recovery (API-restart safety). +4. Applies RLS tenant_isolation policies and appropriate GRANTs on all tables. +""" + +from typing import Sequence, Union + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "004" +down_revision: Union[str, None] = "003" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + conn = op.get_bind() + + # ========================================================================= + # CREATE config_backup_runs TABLE + # ========================================================================= + # Stores metadata for each backup run. The actual config content lives in + # the tenant's bare git repository (GIT_STORE_PATH). This table provides + # the timeline view and change tracking without duplicating file content. + conn.execute(sa.text(""" + CREATE TABLE IF NOT EXISTS config_backup_runs ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + device_id UUID NOT NULL REFERENCES devices(id) ON DELETE CASCADE, + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + commit_sha TEXT NOT NULL, + trigger_type TEXT NOT NULL, + lines_added INT, + lines_removed INT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() + ) + """)) + + conn.execute(sa.text( + "CREATE INDEX IF NOT EXISTS idx_config_backup_runs_device_created " + "ON config_backup_runs (device_id, created_at DESC)" + )) + + conn.execute(sa.text("ALTER TABLE config_backup_runs ENABLE ROW LEVEL SECURITY")) + + conn.execute(sa.text(""" + CREATE POLICY tenant_isolation ON config_backup_runs + USING (tenant_id::text = current_setting('app.current_tenant')) + """)) + + conn.execute(sa.text("GRANT SELECT, INSERT ON config_backup_runs TO app_user")) + conn.execute(sa.text("GRANT SELECT ON config_backup_runs TO poller_user")) + + # ========================================================================= + # CREATE config_backup_schedules TABLE + # ========================================================================= + # Stores per-tenant default and per-device override schedules. + # device_id = NULL means tenant default (applies to all devices in tenant). + # A per-device row with a specific device_id overrides the tenant default. + # UNIQUE(tenant_id, device_id) allows one entry per (tenant, device) pair + # where device_id NULL is the tenant-level default. + conn.execute(sa.text(""" + CREATE TABLE IF NOT EXISTS config_backup_schedules ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + device_id UUID REFERENCES devices(id) ON DELETE CASCADE, + cron_expression TEXT NOT NULL DEFAULT '0 2 * * *', + enabled BOOL NOT NULL DEFAULT TRUE, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(tenant_id, device_id) + ) + """)) + + conn.execute(sa.text("ALTER TABLE config_backup_schedules ENABLE ROW LEVEL SECURITY")) + + conn.execute(sa.text(""" + CREATE POLICY tenant_isolation ON config_backup_schedules + USING (tenant_id::text = current_setting('app.current_tenant')) + """)) + + conn.execute(sa.text("GRANT SELECT, INSERT, UPDATE ON config_backup_schedules TO app_user")) + + # ========================================================================= + # CREATE config_push_operations TABLE + # ========================================================================= + # Tracks pending two-phase config push operations for panic-revert recovery. + # If the API pod restarts during the 60-second verification window, the + # startup handler checks for 'pending_verification' rows and either verifies + # connectivity (clean up the RouterOS scheduler job) or marks as failed. + # See Pitfall 6 in 04-RESEARCH.md. + conn.execute(sa.text(""" + CREATE TABLE IF NOT EXISTS config_push_operations ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + device_id UUID NOT NULL REFERENCES devices(id) ON DELETE CASCADE, + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + pre_push_commit_sha TEXT NOT NULL, + scheduler_name TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'pending_verification', + started_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + completed_at TIMESTAMPTZ + ) + """)) + + conn.execute(sa.text("ALTER TABLE config_push_operations ENABLE ROW LEVEL SECURITY")) + + conn.execute(sa.text(""" + CREATE POLICY tenant_isolation ON config_push_operations + USING (tenant_id::text = current_setting('app.current_tenant')) + """)) + + conn.execute(sa.text("GRANT SELECT, INSERT, UPDATE ON config_push_operations TO app_user")) + + +def downgrade() -> None: + conn = op.get_bind() + + conn.execute(sa.text("DROP TABLE IF EXISTS config_push_operations CASCADE")) + conn.execute(sa.text("DROP TABLE IF EXISTS config_backup_schedules CASCADE")) + conn.execute(sa.text("DROP TABLE IF EXISTS config_backup_runs CASCADE")) diff --git a/backend/alembic/versions/005_alerting_and_firmware.py b/backend/alembic/versions/005_alerting_and_firmware.py new file mode 100644 index 0000000..1af426a --- /dev/null +++ b/backend/alembic/versions/005_alerting_and_firmware.py @@ -0,0 +1,286 @@ +"""Add alerting and firmware management tables. + +Revision ID: 005 +Revises: 004 +Create Date: 2026-02-25 + +This migration: +1. ALTERs devices table: adds architecture and preferred_channel columns. +2. ALTERs device_groups table: adds preferred_channel column. +3. Creates alert_rules, notification_channels, alert_rule_channels, alert_events tables. +4. Creates firmware_versions, firmware_upgrade_jobs tables. +5. Applies RLS policies on tenant-scoped tables. +6. Seeds default alert rules for all existing tenants. +""" + +from typing import Sequence, Union + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "005" +down_revision: Union[str, None] = "004" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + conn = op.get_bind() + + # ========================================================================= + # ALTER devices TABLE — add architecture and preferred_channel columns + # ========================================================================= + conn.execute(sa.text( + "ALTER TABLE devices ADD COLUMN IF NOT EXISTS architecture TEXT" + )) + conn.execute(sa.text( + "ALTER TABLE devices ADD COLUMN IF NOT EXISTS preferred_channel TEXT DEFAULT 'stable' NOT NULL" + )) + + # ========================================================================= + # ALTER device_groups TABLE — add preferred_channel column + # ========================================================================= + conn.execute(sa.text( + "ALTER TABLE device_groups ADD COLUMN IF NOT EXISTS preferred_channel TEXT DEFAULT 'stable' NOT NULL" + )) + + # ========================================================================= + # CREATE alert_rules TABLE + # ========================================================================= + conn.execute(sa.text(""" + CREATE TABLE IF NOT EXISTS alert_rules ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + device_id UUID REFERENCES devices(id) ON DELETE CASCADE, + group_id UUID REFERENCES device_groups(id) ON DELETE SET NULL, + name TEXT NOT NULL, + metric TEXT NOT NULL, + operator TEXT NOT NULL, + threshold NUMERIC NOT NULL, + duration_polls INTEGER NOT NULL DEFAULT 1, + severity TEXT NOT NULL, + enabled BOOLEAN NOT NULL DEFAULT TRUE, + is_default BOOLEAN NOT NULL DEFAULT FALSE, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() + ) + """)) + + conn.execute(sa.text( + "CREATE INDEX IF NOT EXISTS idx_alert_rules_tenant_enabled " + "ON alert_rules (tenant_id, enabled)" + )) + + conn.execute(sa.text("ALTER TABLE alert_rules ENABLE ROW LEVEL SECURITY")) + conn.execute(sa.text(""" + CREATE POLICY tenant_isolation ON alert_rules + USING (tenant_id::text = current_setting('app.current_tenant')) + """)) + conn.execute(sa.text( + "GRANT SELECT, INSERT, UPDATE, DELETE ON alert_rules TO app_user" + )) + conn.execute(sa.text("GRANT ALL ON alert_rules TO poller_user")) + + # ========================================================================= + # CREATE notification_channels TABLE + # ========================================================================= + conn.execute(sa.text(""" + CREATE TABLE IF NOT EXISTS notification_channels ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + name TEXT NOT NULL, + channel_type TEXT NOT NULL, + smtp_host TEXT, + smtp_port INTEGER, + smtp_user TEXT, + smtp_password BYTEA, + smtp_use_tls BOOLEAN DEFAULT FALSE, + from_address TEXT, + to_address TEXT, + webhook_url TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() + ) + """)) + + conn.execute(sa.text( + "CREATE INDEX IF NOT EXISTS idx_notification_channels_tenant " + "ON notification_channels (tenant_id)" + )) + + conn.execute(sa.text("ALTER TABLE notification_channels ENABLE ROW LEVEL SECURITY")) + conn.execute(sa.text(""" + CREATE POLICY tenant_isolation ON notification_channels + USING (tenant_id::text = current_setting('app.current_tenant')) + """)) + conn.execute(sa.text( + "GRANT SELECT, INSERT, UPDATE, DELETE ON notification_channels TO app_user" + )) + conn.execute(sa.text("GRANT ALL ON notification_channels TO poller_user")) + + # ========================================================================= + # CREATE alert_rule_channels TABLE (M2M association) + # ========================================================================= + conn.execute(sa.text(""" + CREATE TABLE IF NOT EXISTS alert_rule_channels ( + rule_id UUID NOT NULL REFERENCES alert_rules(id) ON DELETE CASCADE, + channel_id UUID NOT NULL REFERENCES notification_channels(id) ON DELETE CASCADE, + PRIMARY KEY (rule_id, channel_id) + ) + """)) + + conn.execute(sa.text("ALTER TABLE alert_rule_channels ENABLE ROW LEVEL SECURITY")) + # RLS for M2M: join through parent table's tenant_id via rule_id + conn.execute(sa.text(""" + CREATE POLICY tenant_isolation ON alert_rule_channels + USING (rule_id IN ( + SELECT id FROM alert_rules + WHERE tenant_id::text = current_setting('app.current_tenant') + )) + """)) + conn.execute(sa.text( + "GRANT SELECT, INSERT, UPDATE, DELETE ON alert_rule_channels TO app_user" + )) + conn.execute(sa.text("GRANT ALL ON alert_rule_channels TO poller_user")) + + # ========================================================================= + # CREATE alert_events TABLE + # ========================================================================= + conn.execute(sa.text(""" + CREATE TABLE IF NOT EXISTS alert_events ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + rule_id UUID REFERENCES alert_rules(id) ON DELETE SET NULL, + device_id UUID NOT NULL REFERENCES devices(id) ON DELETE CASCADE, + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + status TEXT NOT NULL, + severity TEXT NOT NULL, + metric TEXT, + value NUMERIC, + threshold NUMERIC, + message TEXT, + is_flapping BOOLEAN NOT NULL DEFAULT FALSE, + acknowledged_at TIMESTAMPTZ, + acknowledged_by UUID REFERENCES users(id) ON DELETE SET NULL, + silenced_until TIMESTAMPTZ, + fired_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + resolved_at TIMESTAMPTZ + ) + """)) + + conn.execute(sa.text( + "CREATE INDEX IF NOT EXISTS idx_alert_events_device_rule_status " + "ON alert_events (device_id, rule_id, status)" + )) + conn.execute(sa.text( + "CREATE INDEX IF NOT EXISTS idx_alert_events_tenant_fired " + "ON alert_events (tenant_id, fired_at)" + )) + + conn.execute(sa.text("ALTER TABLE alert_events ENABLE ROW LEVEL SECURITY")) + conn.execute(sa.text(""" + CREATE POLICY tenant_isolation ON alert_events + USING (tenant_id::text = current_setting('app.current_tenant')) + """)) + conn.execute(sa.text( + "GRANT SELECT, INSERT, UPDATE, DELETE ON alert_events TO app_user" + )) + conn.execute(sa.text("GRANT ALL ON alert_events TO poller_user")) + + # ========================================================================= + # CREATE firmware_versions TABLE (global — NOT tenant-scoped) + # ========================================================================= + conn.execute(sa.text(""" + CREATE TABLE IF NOT EXISTS firmware_versions ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + architecture TEXT NOT NULL, + channel TEXT NOT NULL, + version TEXT NOT NULL, + npk_url TEXT NOT NULL, + npk_local_path TEXT, + npk_size_bytes BIGINT, + checked_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(architecture, channel, version) + ) + """)) + + conn.execute(sa.text( + "CREATE INDEX IF NOT EXISTS idx_firmware_versions_arch_channel " + "ON firmware_versions (architecture, channel)" + )) + + # No RLS on firmware_versions — global cache table + conn.execute(sa.text( + "GRANT SELECT, INSERT, UPDATE ON firmware_versions TO app_user" + )) + conn.execute(sa.text("GRANT ALL ON firmware_versions TO poller_user")) + + # ========================================================================= + # CREATE firmware_upgrade_jobs TABLE + # ========================================================================= + conn.execute(sa.text(""" + CREATE TABLE IF NOT EXISTS firmware_upgrade_jobs ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + device_id UUID NOT NULL REFERENCES devices(id) ON DELETE CASCADE, + rollout_group_id UUID, + target_version TEXT NOT NULL, + architecture TEXT NOT NULL, + channel TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'pending', + pre_upgrade_backup_sha TEXT, + scheduled_at TIMESTAMPTZ, + started_at TIMESTAMPTZ, + completed_at TIMESTAMPTZ, + error_message TEXT, + confirmed_major_upgrade BOOLEAN NOT NULL DEFAULT FALSE, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() + ) + """)) + + conn.execute(sa.text("ALTER TABLE firmware_upgrade_jobs ENABLE ROW LEVEL SECURITY")) + conn.execute(sa.text(""" + CREATE POLICY tenant_isolation ON firmware_upgrade_jobs + USING (tenant_id::text = current_setting('app.current_tenant')) + """)) + conn.execute(sa.text( + "GRANT SELECT, INSERT, UPDATE, DELETE ON firmware_upgrade_jobs TO app_user" + )) + conn.execute(sa.text("GRANT ALL ON firmware_upgrade_jobs TO poller_user")) + + # ========================================================================= + # SEED DEFAULT ALERT RULES for all existing tenants + # ========================================================================= + # Note: New tenant creation (in the tenants API router) should also seed + # these three default rules. A _seed_default_alert_rules(tenant_id) helper + # should be created in the alerts router or a shared service for this. + conn.execute(sa.text(""" + INSERT INTO alert_rules (id, tenant_id, name, metric, operator, threshold, duration_polls, severity, enabled, is_default) + SELECT gen_random_uuid(), t.id, 'High CPU Usage', 'cpu_load', 'gt', 90, 5, 'warning', TRUE, TRUE + FROM tenants t + """)) + conn.execute(sa.text(""" + INSERT INTO alert_rules (id, tenant_id, name, metric, operator, threshold, duration_polls, severity, enabled, is_default) + SELECT gen_random_uuid(), t.id, 'High Memory Usage', 'memory_used_pct', 'gt', 90, 5, 'warning', TRUE, TRUE + FROM tenants t + """)) + conn.execute(sa.text(""" + INSERT INTO alert_rules (id, tenant_id, name, metric, operator, threshold, duration_polls, severity, enabled, is_default) + SELECT gen_random_uuid(), t.id, 'High Disk Usage', 'disk_used_pct', 'gt', 85, 3, 'warning', TRUE, TRUE + FROM tenants t + """)) + + +def downgrade() -> None: + conn = op.get_bind() + + # Drop tables in reverse dependency order + conn.execute(sa.text("DROP TABLE IF EXISTS firmware_upgrade_jobs CASCADE")) + conn.execute(sa.text("DROP TABLE IF EXISTS firmware_versions CASCADE")) + conn.execute(sa.text("DROP TABLE IF EXISTS alert_events CASCADE")) + conn.execute(sa.text("DROP TABLE IF EXISTS alert_rule_channels CASCADE")) + conn.execute(sa.text("DROP TABLE IF EXISTS notification_channels CASCADE")) + conn.execute(sa.text("DROP TABLE IF EXISTS alert_rules CASCADE")) + + # Drop added columns + conn.execute(sa.text("ALTER TABLE devices DROP COLUMN IF EXISTS architecture")) + conn.execute(sa.text("ALTER TABLE devices DROP COLUMN IF EXISTS preferred_channel")) + conn.execute(sa.text("ALTER TABLE device_groups DROP COLUMN IF EXISTS preferred_channel")) diff --git a/backend/alembic/versions/006_advanced_features.py b/backend/alembic/versions/006_advanced_features.py new file mode 100644 index 0000000..af797f2 --- /dev/null +++ b/backend/alembic/versions/006_advanced_features.py @@ -0,0 +1,212 @@ +"""Add config templates, template push jobs, and device location columns. + +Revision ID: 006 +Revises: 005 +Create Date: 2026-02-25 + +This migration: +1. ALTERs devices table: adds latitude and longitude columns. +2. Creates config_templates table. +3. Creates config_template_tags table. +4. Creates template_push_jobs table. +5. Applies RLS policies on all three new tables. +6. Seeds starter templates for all existing tenants. +""" + +from typing import Sequence, Union + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "006" +down_revision: Union[str, None] = "005" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + conn = op.get_bind() + + # ========================================================================= + # ALTER devices TABLE — add latitude and longitude columns + # ========================================================================= + conn.execute(sa.text( + "ALTER TABLE devices ADD COLUMN IF NOT EXISTS latitude DOUBLE PRECISION" + )) + conn.execute(sa.text( + "ALTER TABLE devices ADD COLUMN IF NOT EXISTS longitude DOUBLE PRECISION" + )) + + # ========================================================================= + # CREATE config_templates TABLE + # ========================================================================= + conn.execute(sa.text(""" + CREATE TABLE IF NOT EXISTS config_templates ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + name TEXT NOT NULL, + description TEXT, + content TEXT NOT NULL, + variables JSONB NOT NULL DEFAULT '[]'::jsonb, + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT now(), + UNIQUE(tenant_id, name) + ) + """)) + + # ========================================================================= + # CREATE config_template_tags TABLE + # ========================================================================= + conn.execute(sa.text(""" + CREATE TABLE IF NOT EXISTS config_template_tags ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + name VARCHAR(100) NOT NULL, + template_id UUID NOT NULL REFERENCES config_templates(id) ON DELETE CASCADE, + UNIQUE(template_id, name) + ) + """)) + + # ========================================================================= + # CREATE template_push_jobs TABLE + # ========================================================================= + conn.execute(sa.text(""" + CREATE TABLE IF NOT EXISTS template_push_jobs ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + template_id UUID REFERENCES config_templates(id) ON DELETE SET NULL, + device_id UUID NOT NULL REFERENCES devices(id) ON DELETE CASCADE, + rollout_id UUID, + rendered_content TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'pending', + pre_push_backup_sha TEXT, + error_message TEXT, + started_at TIMESTAMPTZ, + completed_at TIMESTAMPTZ, + created_at TIMESTAMPTZ NOT NULL DEFAULT now() + ) + """)) + + # ========================================================================= + # RLS POLICIES + # ========================================================================= + for table in ("config_templates", "config_template_tags", "template_push_jobs"): + conn.execute(sa.text(f"ALTER TABLE {table} ENABLE ROW LEVEL SECURITY")) + conn.execute(sa.text(f""" + CREATE POLICY {table}_tenant_isolation ON {table} + USING (tenant_id = current_setting('app.current_tenant')::uuid) + """)) + conn.execute(sa.text( + f"GRANT SELECT, INSERT, UPDATE, DELETE ON {table} TO app_user" + )) + conn.execute(sa.text(f"GRANT ALL ON {table} TO poller_user")) + + # ========================================================================= + # INDEXES + # ========================================================================= + conn.execute(sa.text( + "CREATE INDEX IF NOT EXISTS idx_config_templates_tenant " + "ON config_templates (tenant_id)" + )) + conn.execute(sa.text( + "CREATE INDEX IF NOT EXISTS idx_config_template_tags_template " + "ON config_template_tags (template_id)" + )) + conn.execute(sa.text( + "CREATE INDEX IF NOT EXISTS idx_template_push_jobs_tenant_rollout " + "ON template_push_jobs (tenant_id, rollout_id)" + )) + conn.execute(sa.text( + "CREATE INDEX IF NOT EXISTS idx_template_push_jobs_device_status " + "ON template_push_jobs (device_id, status)" + )) + + # ========================================================================= + # SEED STARTER TEMPLATES for all existing tenants + # ========================================================================= + + # 1. Basic Firewall + conn.execute(sa.text(""" + INSERT INTO config_templates (id, tenant_id, name, description, content, variables) + SELECT + gen_random_uuid(), + t.id, + 'Basic Firewall', + 'Standard firewall ruleset with WAN protection and LAN forwarding', + '/ip firewall filter +add chain=input connection-state=established,related action=accept +add chain=input connection-state=invalid action=drop +add chain=input in-interface={{ wan_interface }} protocol=tcp dst-port=8291 action=drop comment="Block Winbox from WAN" +add chain=input in-interface={{ wan_interface }} protocol=tcp dst-port=22 action=drop comment="Block SSH from WAN" +add chain=forward connection-state=established,related action=accept +add chain=forward connection-state=invalid action=drop +add chain=forward src-address={{ allowed_network }} action=accept +add chain=forward action=drop', + '[{"name":"wan_interface","type":"string","default":"ether1","description":"WAN-facing interface"},{"name":"allowed_network","type":"subnet","default":"192.168.1.0/24","description":"Allowed source network"}]'::jsonb + FROM tenants t + ON CONFLICT DO NOTHING + """)) + + # 2. DHCP Server Setup + conn.execute(sa.text(""" + INSERT INTO config_templates (id, tenant_id, name, description, content, variables) + SELECT + gen_random_uuid(), + t.id, + 'DHCP Server Setup', + 'Configure DHCP server with address pool, DNS, and gateway', + '/ip pool add name=dhcp-pool ranges={{ pool_start }}-{{ pool_end }} +/ip dhcp-server network add address={{ gateway }}/24 gateway={{ gateway }} dns-server={{ dns_server }} +/ip dhcp-server add name=dhcp1 interface={{ interface }} address-pool=dhcp-pool disabled=no', + '[{"name":"pool_start","type":"ip","default":"192.168.1.100","description":"DHCP pool start address"},{"name":"pool_end","type":"ip","default":"192.168.1.254","description":"DHCP pool end address"},{"name":"gateway","type":"ip","default":"192.168.1.1","description":"Default gateway"},{"name":"dns_server","type":"ip","default":"8.8.8.8","description":"DNS server address"},{"name":"interface","type":"string","default":"bridge1","description":"Interface to serve DHCP on"}]'::jsonb + FROM tenants t + ON CONFLICT DO NOTHING + """)) + + # 3. Wireless AP Config + conn.execute(sa.text(""" + INSERT INTO config_templates (id, tenant_id, name, description, content, variables) + SELECT + gen_random_uuid(), + t.id, + 'Wireless AP Config', + 'Configure wireless access point with WPA2 security', + '/interface wireless security-profiles add name=portal-wpa2 mode=dynamic-keys authentication-types=wpa2-psk wpa2-pre-shared-key={{ password }} +/interface wireless set wlan1 mode=ap-bridge ssid={{ ssid }} security-profile=portal-wpa2 frequency={{ frequency }} channel-width={{ channel_width }} disabled=no', + '[{"name":"ssid","type":"string","default":"MikroTik-AP","description":"Wireless network name"},{"name":"password","type":"string","default":"","description":"WPA2 pre-shared key (min 8 characters)"},{"name":"frequency","type":"integer","default":"2412","description":"Wireless frequency in MHz"},{"name":"channel_width","type":"string","default":"20/40mhz-XX","description":"Channel width setting"}]'::jsonb + FROM tenants t + ON CONFLICT DO NOTHING + """)) + + # 4. Initial Device Setup + conn.execute(sa.text(""" + INSERT INTO config_templates (id, tenant_id, name, description, content, variables) + SELECT + gen_random_uuid(), + t.id, + 'Initial Device Setup', + 'Set device identity, NTP, DNS, and disable unused services', + '/system identity set name={{ device.hostname }} +/system ntp client set enabled=yes servers={{ ntp_server }} +/ip dns set servers={{ dns_servers }} allow-remote-requests=no +/ip service disable telnet,ftp,www,api-ssl +/ip service set ssh port=22 +/ip service set winbox port=8291', + '[{"name":"ntp_server","type":"ip","default":"pool.ntp.org","description":"NTP server address"},{"name":"dns_servers","type":"string","default":"8.8.8.8,8.8.4.4","description":"Comma-separated DNS servers"}]'::jsonb + FROM tenants t + ON CONFLICT DO NOTHING + """)) + + +def downgrade() -> None: + conn = op.get_bind() + + # Drop tables in reverse dependency order + conn.execute(sa.text("DROP TABLE IF EXISTS template_push_jobs CASCADE")) + conn.execute(sa.text("DROP TABLE IF EXISTS config_template_tags CASCADE")) + conn.execute(sa.text("DROP TABLE IF EXISTS config_templates CASCADE")) + + # Drop location columns from devices + conn.execute(sa.text("ALTER TABLE devices DROP COLUMN IF EXISTS latitude")) + conn.execute(sa.text("ALTER TABLE devices DROP COLUMN IF EXISTS longitude")) diff --git a/backend/alembic/versions/007_audit_logs.py b/backend/alembic/versions/007_audit_logs.py new file mode 100644 index 0000000..6ef33de --- /dev/null +++ b/backend/alembic/versions/007_audit_logs.py @@ -0,0 +1,82 @@ +"""Create audit_logs table with RLS policy. + +Revision ID: 007 +Revises: 006 +Create Date: 2026-03-02 + +This migration: +1. Creates audit_logs table for centralized audit trail. +2. Applies RLS policy for tenant isolation. +3. Creates indexes for fast paginated and filtered queries. +4. Grants SELECT, INSERT to app_user (read and write audit entries). +""" + +from typing import Sequence, Union + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "007" +down_revision: Union[str, None] = "006" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + conn = op.get_bind() + + # ========================================================================= + # CREATE audit_logs TABLE + # ========================================================================= + conn.execute(sa.text(""" + CREATE TABLE IF NOT EXISTS audit_logs ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + user_id UUID REFERENCES users(id) ON DELETE SET NULL, + action VARCHAR(100) NOT NULL, + resource_type VARCHAR(50), + resource_id VARCHAR(255), + device_id UUID REFERENCES devices(id) ON DELETE SET NULL, + details JSONB NOT NULL DEFAULT '{}'::jsonb, + ip_address VARCHAR(45), + created_at TIMESTAMPTZ NOT NULL DEFAULT now() + ) + """)) + + # ========================================================================= + # RLS POLICY + # ========================================================================= + conn.execute(sa.text( + "ALTER TABLE audit_logs ENABLE ROW LEVEL SECURITY" + )) + conn.execute(sa.text(""" + CREATE POLICY audit_logs_tenant_isolation ON audit_logs + USING (tenant_id = current_setting('app.current_tenant')::uuid) + """)) + + # Grant SELECT + INSERT to app_user (no UPDATE/DELETE -- audit logs are immutable) + conn.execute(sa.text( + "GRANT SELECT, INSERT ON audit_logs TO app_user" + )) + # Poller user gets full access for cross-tenant audit logging + conn.execute(sa.text( + "GRANT ALL ON audit_logs TO poller_user" + )) + + # ========================================================================= + # INDEXES + # ========================================================================= + conn.execute(sa.text( + "CREATE INDEX IF NOT EXISTS idx_audit_logs_tenant_created " + "ON audit_logs (tenant_id, created_at DESC)" + )) + conn.execute(sa.text( + "CREATE INDEX IF NOT EXISTS idx_audit_logs_tenant_action " + "ON audit_logs (tenant_id, action)" + )) + + +def downgrade() -> None: + conn = op.get_bind() + conn.execute(sa.text("DROP TABLE IF EXISTS audit_logs CASCADE")) diff --git a/backend/alembic/versions/008_maintenance_windows.py b/backend/alembic/versions/008_maintenance_windows.py new file mode 100644 index 0000000..814cb0f --- /dev/null +++ b/backend/alembic/versions/008_maintenance_windows.py @@ -0,0 +1,86 @@ +"""Add maintenance_windows table with RLS. + +Revision ID: 008 +Revises: 007 +Create Date: 2026-03-02 + +This migration: +1. Creates maintenance_windows table for scheduling maintenance periods. +2. Adds CHECK constraint (end_at > start_at). +3. Creates composite index on (tenant_id, start_at, end_at) for active window queries. +4. Applies RLS policy matching the standard tenant_id isolation pattern. +5. Grants permissions to app_user role. +""" + +from typing import Sequence, Union + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "008" +down_revision: Union[str, None] = "007" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + conn = op.get_bind() + + # ── 1. Create maintenance_windows table ──────────────────────────────── + conn.execute(sa.text(""" + CREATE TABLE IF NOT EXISTS maintenance_windows ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + name VARCHAR(200) NOT NULL, + device_ids JSONB NOT NULL DEFAULT '[]'::jsonb, + start_at TIMESTAMPTZ NOT NULL, + end_at TIMESTAMPTZ NOT NULL, + suppress_alerts BOOLEAN NOT NULL DEFAULT true, + notes TEXT, + created_by UUID REFERENCES users(id) ON DELETE SET NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT now(), + + CONSTRAINT chk_maintenance_window_dates CHECK (end_at > start_at) + ) + """)) + + # ── 2. Composite index for active window queries ─────────────────────── + conn.execute(sa.text(""" + CREATE INDEX IF NOT EXISTS idx_maintenance_windows_tenant_time + ON maintenance_windows (tenant_id, start_at, end_at) + """)) + + # ── 3. RLS policy ───────────────────────────────────────────────────── + conn.execute(sa.text("ALTER TABLE maintenance_windows ENABLE ROW LEVEL SECURITY")) + + conn.execute(sa.text(""" + DO $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_policies + WHERE tablename = 'maintenance_windows' AND policyname = 'maintenance_windows_tenant_isolation' + ) THEN + CREATE POLICY maintenance_windows_tenant_isolation ON maintenance_windows + USING (tenant_id = NULLIF(current_setting('app.current_tenant', true), '')::uuid); + END IF; + END + $$ + """)) + + # ── 4. Grant permissions to app_user ─────────────────────────────────── + conn.execute(sa.text(""" + DO $$ + BEGIN + IF EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'app_user') THEN + GRANT SELECT, INSERT, UPDATE, DELETE ON maintenance_windows TO app_user; + END IF; + END + $$ + """)) + + +def downgrade() -> None: + conn = op.get_bind() + conn.execute(sa.text("DROP TABLE IF EXISTS maintenance_windows CASCADE")) diff --git a/backend/alembic/versions/009_api_keys.py b/backend/alembic/versions/009_api_keys.py new file mode 100644 index 0000000..47b18bf --- /dev/null +++ b/backend/alembic/versions/009_api_keys.py @@ -0,0 +1,93 @@ +"""Add api_keys table with RLS for tenant-scoped API key management. + +Revision ID: 009 +Revises: 008 +Create Date: 2026-03-02 + +This migration: +1. Creates api_keys table (UUID PK, tenant_id FK, user_id FK, key_hash, scopes JSONB). +2. Adds unique index on key_hash for O(1) validation lookups. +3. Adds composite index on (tenant_id, revoked_at) for listing active keys. +4. Applies RLS policy on tenant_id. +5. Grants SELECT, INSERT, UPDATE to app_user. +""" + +from typing import Sequence, Union + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "009" +down_revision: Union[str, None] = "008" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + conn = op.get_bind() + + # 1. Create api_keys table + conn.execute( + sa.text(""" + CREATE TABLE IF NOT EXISTS api_keys ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + name VARCHAR(200) NOT NULL, + key_prefix VARCHAR(12) NOT NULL, + key_hash VARCHAR(64) NOT NULL, + scopes JSONB NOT NULL DEFAULT '[]'::jsonb, + expires_at TIMESTAMPTZ, + last_used_at TIMESTAMPTZ, + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + revoked_at TIMESTAMPTZ + ); + """) + ) + + # 2. Unique index on key_hash for fast validation lookups + conn.execute( + sa.text(""" + CREATE UNIQUE INDEX IF NOT EXISTS ix_api_keys_key_hash + ON api_keys (key_hash); + """) + ) + + # 3. Composite index for listing active keys per tenant + conn.execute( + sa.text(""" + CREATE INDEX IF NOT EXISTS ix_api_keys_tenant_revoked + ON api_keys (tenant_id, revoked_at); + """) + ) + + # 4. Enable RLS and create tenant isolation policy + conn.execute(sa.text("ALTER TABLE api_keys ENABLE ROW LEVEL SECURITY;")) + conn.execute(sa.text("ALTER TABLE api_keys FORCE ROW LEVEL SECURITY;")) + + conn.execute( + sa.text(""" + DO $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_policies + WHERE tablename = 'api_keys' AND policyname = 'tenant_isolation' + ) THEN + CREATE POLICY tenant_isolation ON api_keys + USING ( + tenant_id::text = current_setting('app.current_tenant', true) + OR current_setting('app.current_tenant', true) = 'super_admin' + ); + END IF; + END $$; + """) + ) + + # 5. Grant permissions to app_user role + conn.execute(sa.text("GRANT SELECT, INSERT, UPDATE ON api_keys TO app_user;")) + + +def downgrade() -> None: + conn = op.get_bind() + conn.execute(sa.text("DROP TABLE IF EXISTS api_keys CASCADE;")) diff --git a/backend/alembic/versions/010_wireguard_vpn.py b/backend/alembic/versions/010_wireguard_vpn.py new file mode 100644 index 0000000..e034d4a --- /dev/null +++ b/backend/alembic/versions/010_wireguard_vpn.py @@ -0,0 +1,90 @@ +"""Add vpn_config and vpn_peers tables for WireGuard VPN management. + +Revision ID: 010 +Revises: 009 +Create Date: 2026-03-02 + +This migration: +1. Creates vpn_config table (one row per tenant — server keys, subnet, port). +2. Creates vpn_peers table (one row per device VPN connection). +3. Applies RLS policies on tenant_id. +4. Grants SELECT, INSERT, UPDATE, DELETE to app_user. +""" + +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import UUID + +# revision identifiers +revision: str = "010" +down_revision: Union[str, None] = "009" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # ── vpn_config: one row per tenant ── + op.create_table( + "vpn_config", + sa.Column("id", UUID(as_uuid=True), server_default=sa.text("gen_random_uuid()"), primary_key=True), + sa.Column("tenant_id", UUID(as_uuid=True), sa.ForeignKey("tenants.id", ondelete="CASCADE"), nullable=False, unique=True), + sa.Column("server_private_key", sa.LargeBinary(), nullable=False), # AES-256-GCM encrypted + sa.Column("server_public_key", sa.String(64), nullable=False), + sa.Column("subnet", sa.String(32), nullable=False, server_default="10.10.0.0/24"), + sa.Column("server_port", sa.Integer(), nullable=False, server_default="51820"), + sa.Column("server_address", sa.String(32), nullable=False, server_default="10.10.0.1/24"), + sa.Column("endpoint", sa.String(255), nullable=True), # public hostname:port for devices to connect to + sa.Column("is_enabled", sa.Boolean(), nullable=False, server_default="false"), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + ) + + # ── vpn_peers: one per device VPN connection ── + op.create_table( + "vpn_peers", + sa.Column("id", UUID(as_uuid=True), server_default=sa.text("gen_random_uuid()"), primary_key=True), + sa.Column("tenant_id", UUID(as_uuid=True), sa.ForeignKey("tenants.id", ondelete="CASCADE"), nullable=False), + sa.Column("device_id", UUID(as_uuid=True), sa.ForeignKey("devices.id", ondelete="CASCADE"), nullable=False, unique=True), + sa.Column("peer_private_key", sa.LargeBinary(), nullable=False), # AES-256-GCM encrypted + sa.Column("peer_public_key", sa.String(64), nullable=False), + sa.Column("preshared_key", sa.LargeBinary(), nullable=True), # AES-256-GCM encrypted, optional + sa.Column("assigned_ip", sa.String(32), nullable=False), # e.g. 10.10.0.2/24 + sa.Column("additional_allowed_ips", sa.String(512), nullable=True), # comma-separated subnets for site-to-site + sa.Column("is_enabled", sa.Boolean(), nullable=False, server_default="true"), + sa.Column("last_handshake", sa.DateTime(timezone=True), nullable=True), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + ) + + # Indexes + op.create_index("ix_vpn_peers_tenant_id", "vpn_peers", ["tenant_id"]) + + # ── RLS policies ── + op.execute("ALTER TABLE vpn_config ENABLE ROW LEVEL SECURITY") + op.execute(""" + CREATE POLICY vpn_config_tenant_isolation ON vpn_config + FOR ALL + TO app_user + USING (CAST(tenant_id AS text) = current_setting('app.current_tenant', true)) + """) + + op.execute("ALTER TABLE vpn_peers ENABLE ROW LEVEL SECURITY") + op.execute(""" + CREATE POLICY vpn_peers_tenant_isolation ON vpn_peers + FOR ALL + TO app_user + USING (CAST(tenant_id AS text) = current_setting('app.current_tenant', true)) + """) + + # ── Grants ── + op.execute("GRANT SELECT, INSERT, UPDATE, DELETE ON vpn_config TO app_user") + op.execute("GRANT SELECT, INSERT, UPDATE, DELETE ON vpn_peers TO app_user") + + +def downgrade() -> None: + op.execute("DROP POLICY IF EXISTS vpn_peers_tenant_isolation ON vpn_peers") + op.execute("DROP POLICY IF EXISTS vpn_config_tenant_isolation ON vpn_config") + op.drop_table("vpn_peers") + op.drop_table("vpn_config") diff --git a/backend/alembic/versions/012_seed_starter_templates.py b/backend/alembic/versions/012_seed_starter_templates.py new file mode 100644 index 0000000..375fffe --- /dev/null +++ b/backend/alembic/versions/012_seed_starter_templates.py @@ -0,0 +1,169 @@ +"""Seed starter config templates for tenants missing them. + +Revision ID: 012 +Revises: 010 +Create Date: 2026-03-02 + +Re-seeds the 4 original starter templates from 006 plus a new comprehensive +'Basic Router' template for any tenants created after migration 006 ran. +Uses ON CONFLICT (tenant_id, name) DO NOTHING so existing templates are untouched. +""" + +revision = "012" +down_revision = "010" +branch_labels = None +depends_on = None + +from alembic import op +import sqlalchemy as sa + + +def upgrade() -> None: + conn = op.get_bind() + + # 1. Basic Router — comprehensive starter for a typical SOHO/branch router + conn.execute(sa.text(""" + INSERT INTO config_templates (id, tenant_id, name, description, content, variables) + SELECT + gen_random_uuid(), + t.id, + 'Basic Router', + 'Complete SOHO/branch router setup: WAN on ether1, LAN bridge, DHCP, DNS, NAT, basic firewall', + '/interface bridge add name=bridge-lan comment="LAN bridge" +/interface bridge port add bridge=bridge-lan interface=ether2 +/interface bridge port add bridge=bridge-lan interface=ether3 +/interface bridge port add bridge=bridge-lan interface=ether4 +/interface bridge port add bridge=bridge-lan interface=ether5 + +# WAN — DHCP client on ether1 +/ip dhcp-client add interface={{ wan_interface }} disabled=no comment="WAN uplink" + +# LAN address +/ip address add address={{ lan_gateway }}/{{ lan_cidr }} interface=bridge-lan + +# DNS +/ip dns set servers={{ dns_servers }} allow-remote-requests=yes + +# DHCP server for LAN +/ip pool add name=lan-pool ranges={{ dhcp_start }}-{{ dhcp_end }} +/ip dhcp-server network add address={{ lan_network }}/{{ lan_cidr }} gateway={{ lan_gateway }} dns-server={{ lan_gateway }} +/ip dhcp-server add name=lan-dhcp interface=bridge-lan address-pool=lan-pool disabled=no + +# NAT masquerade +/ip firewall nat add chain=srcnat out-interface={{ wan_interface }} action=masquerade + +# Firewall — input chain +/ip firewall filter +add chain=input connection-state=established,related action=accept +add chain=input connection-state=invalid action=drop +add chain=input in-interface={{ wan_interface }} action=drop comment="Drop all other WAN input" + +# Firewall — forward chain +add chain=forward connection-state=established,related action=accept +add chain=forward connection-state=invalid action=drop +add chain=forward in-interface=bridge-lan out-interface={{ wan_interface }} action=accept comment="Allow LAN to WAN" +add chain=forward action=drop comment="Drop everything else" + +# NTP +/system ntp client set enabled=yes servers={{ ntp_server }} + +# Identity +/system identity set name={{ device.hostname }}', + '[{"name":"wan_interface","type":"string","default":"ether1","description":"WAN-facing interface"},{"name":"lan_gateway","type":"ip","default":"192.168.88.1","description":"LAN gateway IP"},{"name":"lan_cidr","type":"integer","default":"24","description":"LAN subnet mask bits"},{"name":"lan_network","type":"ip","default":"192.168.88.0","description":"LAN network address"},{"name":"dhcp_start","type":"ip","default":"192.168.88.100","description":"DHCP pool start"},{"name":"dhcp_end","type":"ip","default":"192.168.88.254","description":"DHCP pool end"},{"name":"dns_servers","type":"string","default":"8.8.8.8,8.8.4.4","description":"Upstream DNS servers"},{"name":"ntp_server","type":"string","default":"pool.ntp.org","description":"NTP server"}]'::jsonb + FROM tenants t + WHERE NOT EXISTS ( + SELECT 1 FROM config_templates ct + WHERE ct.tenant_id = t.id AND ct.name = 'Basic Router' + ) + """)) + + # 2. Re-seed Basic Firewall (for tenants missing it) + conn.execute(sa.text(""" + INSERT INTO config_templates (id, tenant_id, name, description, content, variables) + SELECT + gen_random_uuid(), + t.id, + 'Basic Firewall', + 'Standard firewall ruleset with WAN protection and LAN forwarding', + '/ip firewall filter +add chain=input connection-state=established,related action=accept +add chain=input connection-state=invalid action=drop +add chain=input in-interface={{ wan_interface }} protocol=tcp dst-port=8291 action=drop comment="Block Winbox from WAN" +add chain=input in-interface={{ wan_interface }} protocol=tcp dst-port=22 action=drop comment="Block SSH from WAN" +add chain=forward connection-state=established,related action=accept +add chain=forward connection-state=invalid action=drop +add chain=forward src-address={{ allowed_network }} action=accept +add chain=forward action=drop', + '[{"name":"wan_interface","type":"string","default":"ether1","description":"WAN-facing interface"},{"name":"allowed_network","type":"subnet","default":"192.168.88.0/24","description":"Allowed source network"}]'::jsonb + FROM tenants t + WHERE NOT EXISTS ( + SELECT 1 FROM config_templates ct + WHERE ct.tenant_id = t.id AND ct.name = 'Basic Firewall' + ) + """)) + + # 3. Re-seed DHCP Server Setup + conn.execute(sa.text(""" + INSERT INTO config_templates (id, tenant_id, name, description, content, variables) + SELECT + gen_random_uuid(), + t.id, + 'DHCP Server Setup', + 'Configure DHCP server with address pool, DNS, and gateway', + '/ip pool add name=dhcp-pool ranges={{ pool_start }}-{{ pool_end }} +/ip dhcp-server network add address={{ gateway }}/24 gateway={{ gateway }} dns-server={{ dns_server }} +/ip dhcp-server add name=dhcp1 interface={{ interface }} address-pool=dhcp-pool disabled=no', + '[{"name":"pool_start","type":"ip","default":"192.168.88.100","description":"DHCP pool start address"},{"name":"pool_end","type":"ip","default":"192.168.88.254","description":"DHCP pool end address"},{"name":"gateway","type":"ip","default":"192.168.88.1","description":"Default gateway"},{"name":"dns_server","type":"ip","default":"8.8.8.8","description":"DNS server address"},{"name":"interface","type":"string","default":"bridge-lan","description":"Interface to serve DHCP on"}]'::jsonb + FROM tenants t + WHERE NOT EXISTS ( + SELECT 1 FROM config_templates ct + WHERE ct.tenant_id = t.id AND ct.name = 'DHCP Server Setup' + ) + """)) + + # 4. Re-seed Wireless AP Config + conn.execute(sa.text(""" + INSERT INTO config_templates (id, tenant_id, name, description, content, variables) + SELECT + gen_random_uuid(), + t.id, + 'Wireless AP Config', + 'Configure wireless access point with WPA2 security', + '/interface wireless security-profiles add name=portal-wpa2 mode=dynamic-keys authentication-types=wpa2-psk wpa2-pre-shared-key={{ password }} +/interface wireless set wlan1 mode=ap-bridge ssid={{ ssid }} security-profile=portal-wpa2 frequency={{ frequency }} channel-width={{ channel_width }} disabled=no', + '[{"name":"ssid","type":"string","default":"MikroTik-AP","description":"Wireless network name"},{"name":"password","type":"string","default":"","description":"WPA2 pre-shared key (min 8 characters)"},{"name":"frequency","type":"integer","default":"2412","description":"Wireless frequency in MHz"},{"name":"channel_width","type":"string","default":"20/40mhz-XX","description":"Channel width setting"}]'::jsonb + FROM tenants t + WHERE NOT EXISTS ( + SELECT 1 FROM config_templates ct + WHERE ct.tenant_id = t.id AND ct.name = 'Wireless AP Config' + ) + """)) + + # 5. Re-seed Initial Device Setup + conn.execute(sa.text(""" + INSERT INTO config_templates (id, tenant_id, name, description, content, variables) + SELECT + gen_random_uuid(), + t.id, + 'Initial Device Setup', + 'Set device identity, NTP, DNS, and disable unused services', + '/system identity set name={{ device.hostname }} +/system ntp client set enabled=yes servers={{ ntp_server }} +/ip dns set servers={{ dns_servers }} allow-remote-requests=no +/ip service disable telnet,ftp,www,api-ssl +/ip service set ssh port=22 +/ip service set winbox port=8291', + '[{"name":"ntp_server","type":"ip","default":"pool.ntp.org","description":"NTP server address"},{"name":"dns_servers","type":"string","default":"8.8.8.8,8.8.4.4","description":"Comma-separated DNS servers"}]'::jsonb + FROM tenants t + WHERE NOT EXISTS ( + SELECT 1 FROM config_templates ct + WHERE ct.tenant_id = t.id AND ct.name = 'Initial Device Setup' + ) + """)) + + +def downgrade() -> None: + conn = op.get_bind() + conn.execute(sa.text( + "DELETE FROM config_templates WHERE name = 'Basic Router'" + )) diff --git a/backend/alembic/versions/013_certificates.py b/backend/alembic/versions/013_certificates.py new file mode 100644 index 0000000..b29f1d0 --- /dev/null +++ b/backend/alembic/versions/013_certificates.py @@ -0,0 +1,203 @@ +"""Add certificate authority and device certificate tables. + +Revision ID: 013 +Revises: 012 +Create Date: 2026-03-03 + +Creates the `certificate_authorities` (one per tenant) and `device_certificates` +(one per device) tables for the Internal Certificate Authority feature. +Also adds a `tls_mode` column to the `devices` table to track per-device +TLS verification mode (insecure vs portal_ca). + +Both tables have RLS policies for tenant isolation, plus poller_user read +access (the poller needs CA cert PEM to verify device TLS connections). +""" + +revision = "013" +down_revision = "012" +branch_labels = None +depends_on = None + +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import UUID + + +def upgrade() -> None: + # --- certificate_authorities table --- + op.create_table( + "certificate_authorities", + sa.Column( + "id", + UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + primary_key=True, + ), + sa.Column( + "tenant_id", + UUID(as_uuid=True), + sa.ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + unique=True, + ), + sa.Column("common_name", sa.String(255), nullable=False), + sa.Column("cert_pem", sa.Text(), nullable=False), + sa.Column("encrypted_private_key", sa.LargeBinary(), nullable=False), + sa.Column("serial_number", sa.String(64), nullable=False), + sa.Column("fingerprint_sha256", sa.String(95), nullable=False), + sa.Column( + "not_valid_before", + sa.DateTime(timezone=True), + nullable=False, + ), + sa.Column( + "not_valid_after", + sa.DateTime(timezone=True), + nullable=False, + ), + sa.Column( + "created_at", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + ), + ) + + # --- device_certificates table --- + op.create_table( + "device_certificates", + sa.Column( + "id", + UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + primary_key=True, + ), + sa.Column( + "tenant_id", + UUID(as_uuid=True), + sa.ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + ), + sa.Column( + "device_id", + UUID(as_uuid=True), + sa.ForeignKey("devices.id", ondelete="CASCADE"), + nullable=False, + ), + sa.Column( + "ca_id", + UUID(as_uuid=True), + sa.ForeignKey("certificate_authorities.id", ondelete="CASCADE"), + nullable=False, + ), + sa.Column("common_name", sa.String(255), nullable=False), + sa.Column("serial_number", sa.String(64), nullable=False), + sa.Column("fingerprint_sha256", sa.String(95), nullable=False), + sa.Column("cert_pem", sa.Text(), nullable=False), + sa.Column("encrypted_private_key", sa.LargeBinary(), nullable=False), + sa.Column( + "not_valid_before", + sa.DateTime(timezone=True), + nullable=False, + ), + sa.Column( + "not_valid_after", + sa.DateTime(timezone=True), + nullable=False, + ), + sa.Column( + "status", + sa.String(20), + nullable=False, + server_default="issued", + ), + sa.Column("deployed_at", sa.DateTime(timezone=True), nullable=True), + sa.Column( + "created_at", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + ), + sa.Column( + "updated_at", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + ), + ) + + # --- Add tls_mode column to devices table --- + op.add_column( + "devices", + sa.Column( + "tls_mode", + sa.String(20), + nullable=False, + server_default="insecure", + ), + ) + + # --- RLS policies --- + conn = op.get_bind() + + # certificate_authorities RLS + conn.execute(sa.text( + "ALTER TABLE certificate_authorities ENABLE ROW LEVEL SECURITY" + )) + conn.execute(sa.text( + "GRANT SELECT, INSERT, UPDATE, DELETE ON certificate_authorities TO app_user" + )) + conn.execute(sa.text( + "CREATE POLICY tenant_isolation ON certificate_authorities FOR ALL " + "USING (tenant_id = NULLIF(current_setting('app.current_tenant', true), '')::uuid) " + "WITH CHECK (tenant_id = NULLIF(current_setting('app.current_tenant', true), '')::uuid)" + )) + conn.execute(sa.text( + "GRANT SELECT ON certificate_authorities TO poller_user" + )) + + # device_certificates RLS + conn.execute(sa.text( + "ALTER TABLE device_certificates ENABLE ROW LEVEL SECURITY" + )) + conn.execute(sa.text( + "GRANT SELECT, INSERT, UPDATE, DELETE ON device_certificates TO app_user" + )) + conn.execute(sa.text( + "CREATE POLICY tenant_isolation ON device_certificates FOR ALL " + "USING (tenant_id = NULLIF(current_setting('app.current_tenant', true), '')::uuid) " + "WITH CHECK (tenant_id = NULLIF(current_setting('app.current_tenant', true), '')::uuid)" + )) + conn.execute(sa.text( + "GRANT SELECT ON device_certificates TO poller_user" + )) + + +def downgrade() -> None: + conn = op.get_bind() + + # Drop RLS policies + conn.execute(sa.text( + "DROP POLICY IF EXISTS tenant_isolation ON device_certificates" + )) + conn.execute(sa.text( + "DROP POLICY IF EXISTS tenant_isolation ON certificate_authorities" + )) + + # Revoke grants + conn.execute(sa.text( + "REVOKE ALL ON device_certificates FROM app_user" + )) + conn.execute(sa.text( + "REVOKE ALL ON device_certificates FROM poller_user" + )) + conn.execute(sa.text( + "REVOKE ALL ON certificate_authorities FROM app_user" + )) + conn.execute(sa.text( + "REVOKE ALL ON certificate_authorities FROM poller_user" + )) + + # Drop tls_mode column from devices + op.drop_column("devices", "tls_mode") + + # Drop tables + op.drop_table("device_certificates") + op.drop_table("certificate_authorities") diff --git a/backend/alembic/versions/014_timescaledb_retention.py b/backend/alembic/versions/014_timescaledb_retention.py new file mode 100644 index 0000000..cb48a97 --- /dev/null +++ b/backend/alembic/versions/014_timescaledb_retention.py @@ -0,0 +1,50 @@ +"""Add TimescaleDB retention policies. + +Revision ID: 014 +Revises: 013 +Create Date: 2026-03-03 + +Adds retention (drop after 90 days) on all three hypertables: +interface_metrics, health_metrics, wireless_metrics. + +Note: Compression is skipped because TimescaleDB 2.17.x does not support +compression on tables with row-level security (RLS) policies. +Compression can be re-added when upgrading to TimescaleDB >= 2.19. + +Without retention policies the database grows ~5 GB/month unbounded. +""" + +revision = "014" +down_revision = "013" +branch_labels = None +depends_on = None + +from alembic import op +import sqlalchemy as sa + + +HYPERTABLES = [ + "interface_metrics", + "health_metrics", + "wireless_metrics", +] + + +def upgrade() -> None: + conn = op.get_bind() + + for table in HYPERTABLES: + # Drop chunks older than 90 days + conn.execute(sa.text( + f"SELECT add_retention_policy('{table}', INTERVAL '90 days')" + )) + + +def downgrade() -> None: + conn = op.get_bind() + + for table in HYPERTABLES: + # Remove retention policy + conn.execute(sa.text( + f"SELECT remove_retention_policy('{table}', if_exists => true)" + )) diff --git a/backend/alembic/versions/015_password_reset_tokens.py b/backend/alembic/versions/015_password_reset_tokens.py new file mode 100644 index 0000000..4fae0ea --- /dev/null +++ b/backend/alembic/versions/015_password_reset_tokens.py @@ -0,0 +1,62 @@ +"""Add password_reset_tokens table. + +Revision ID: 015 +Revises: 014 +Create Date: 2026-03-03 + +Stores one-time password reset tokens with expiry. Tokens are hashed +with SHA-256 so a database leak doesn't expose reset links. +""" + +revision = "015" +down_revision = "014" +branch_labels = None +depends_on = None + +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import UUID + + +def upgrade() -> None: + op.create_table( + "password_reset_tokens", + sa.Column( + "id", + UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + primary_key=True, + ), + sa.Column( + "user_id", + UUID(as_uuid=True), + sa.ForeignKey("users.id", ondelete="CASCADE"), + nullable=False, + ), + sa.Column( + "token_hash", + sa.String(64), + nullable=False, + unique=True, + index=True, + ), + sa.Column( + "expires_at", + sa.DateTime(timezone=True), + nullable=False, + ), + sa.Column( + "used_at", + sa.DateTime(timezone=True), + nullable=True, + ), + sa.Column( + "created_at", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + ), + ) + + +def downgrade() -> None: + op.drop_table("password_reset_tokens") diff --git a/backend/alembic/versions/016_zero_knowledge_schema.py b/backend/alembic/versions/016_zero_knowledge_schema.py new file mode 100644 index 0000000..38dd56c --- /dev/null +++ b/backend/alembic/versions/016_zero_knowledge_schema.py @@ -0,0 +1,207 @@ +"""Add zero-knowledge authentication schema. + +Revision ID: 016 +Revises: 015 +Create Date: 2026-03-03 + +Adds SRP columns to users, creates user_key_sets table for encrypted +key bundles, creates immutable key_access_log audit trail, and adds +vault key columns to tenants (Phase 29 preparation). + +Both new tables have RLS policies. key_access_log is append-only +(INSERT+SELECT only, no UPDATE/DELETE). +""" + +revision = "016" +down_revision = "015" +branch_labels = None +depends_on = None + +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import UUID + + +def upgrade() -> None: + # --- Add SRP columns to users table --- + op.add_column( + "users", + sa.Column("srp_salt", sa.LargeBinary(), nullable=True), + ) + op.add_column( + "users", + sa.Column("srp_verifier", sa.LargeBinary(), nullable=True), + ) + op.add_column( + "users", + sa.Column( + "auth_version", + sa.SmallInteger(), + server_default=sa.text("1"), + nullable=False, + ), + ) + + # --- Create user_key_sets table --- + op.create_table( + "user_key_sets", + sa.Column( + "id", + UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + primary_key=True, + ), + sa.Column( + "user_id", + UUID(as_uuid=True), + sa.ForeignKey("users.id", ondelete="CASCADE"), + nullable=False, + unique=True, + ), + sa.Column( + "tenant_id", + UUID(as_uuid=True), + sa.ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=True, # NULL for super_admin + ), + sa.Column("encrypted_private_key", sa.LargeBinary(), nullable=False), + sa.Column("private_key_nonce", sa.LargeBinary(), nullable=False), + sa.Column("encrypted_vault_key", sa.LargeBinary(), nullable=False), + sa.Column("vault_key_nonce", sa.LargeBinary(), nullable=False), + sa.Column("public_key", sa.LargeBinary(), nullable=False), + sa.Column( + "pbkdf2_iterations", + sa.Integer(), + server_default=sa.text("650000"), + nullable=False, + ), + sa.Column("pbkdf2_salt", sa.LargeBinary(), nullable=False), + sa.Column("hkdf_salt", sa.LargeBinary(), nullable=False), + sa.Column( + "key_version", + sa.Integer(), + server_default=sa.text("1"), + nullable=False, + ), + sa.Column( + "created_at", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + ), + sa.Column( + "updated_at", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + ), + ) + + # --- Create key_access_log table (immutable audit trail) --- + op.create_table( + "key_access_log", + sa.Column( + "id", + UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + primary_key=True, + ), + sa.Column( + "tenant_id", + UUID(as_uuid=True), + sa.ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + ), + sa.Column( + "user_id", + UUID(as_uuid=True), + sa.ForeignKey("users.id", ondelete="SET NULL"), + nullable=True, + ), + sa.Column("action", sa.Text(), nullable=False), + sa.Column("resource_type", sa.Text(), nullable=True), + sa.Column("resource_id", sa.Text(), nullable=True), + sa.Column("key_version", sa.Integer(), nullable=True), + sa.Column("ip_address", sa.Text(), nullable=True), + sa.Column( + "created_at", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + ) + + # --- Add vault key columns to tenants (Phase 29 preparation) --- + op.add_column( + "tenants", + sa.Column("encrypted_vault_key", sa.LargeBinary(), nullable=True), + ) + op.add_column( + "tenants", + sa.Column( + "vault_key_version", + sa.Integer(), + server_default=sa.text("1"), + ), + ) + + # --- RLS policies --- + conn = op.get_bind() + + # user_key_sets RLS + conn.execute(sa.text( + "ALTER TABLE user_key_sets ENABLE ROW LEVEL SECURITY" + )) + conn.execute(sa.text( + "CREATE POLICY user_key_sets_tenant_isolation ON user_key_sets " + "USING (tenant_id::text = current_setting('app.current_tenant', true) " + "OR current_setting('app.current_tenant', true) = 'super_admin')" + )) + conn.execute(sa.text( + "GRANT SELECT, INSERT, UPDATE ON user_key_sets TO app_user" + )) + + # key_access_log RLS (append-only: INSERT+SELECT only, no UPDATE/DELETE) + conn.execute(sa.text( + "ALTER TABLE key_access_log ENABLE ROW LEVEL SECURITY" + )) + conn.execute(sa.text( + "CREATE POLICY key_access_log_tenant_isolation ON key_access_log " + "USING (tenant_id::text = current_setting('app.current_tenant', true) " + "OR current_setting('app.current_tenant', true) = 'super_admin')" + )) + conn.execute(sa.text( + "GRANT INSERT, SELECT ON key_access_log TO app_user" + )) + # poller_user needs INSERT to log key access events when decrypting credentials + conn.execute(sa.text( + "GRANT INSERT, SELECT ON key_access_log TO poller_user" + )) + + +def downgrade() -> None: + conn = op.get_bind() + + # Drop RLS policies + conn.execute(sa.text( + "DROP POLICY IF EXISTS key_access_log_tenant_isolation ON key_access_log" + )) + conn.execute(sa.text( + "DROP POLICY IF EXISTS user_key_sets_tenant_isolation ON user_key_sets" + )) + + # Revoke grants + conn.execute(sa.text("REVOKE ALL ON key_access_log FROM app_user")) + conn.execute(sa.text("REVOKE ALL ON key_access_log FROM poller_user")) + conn.execute(sa.text("REVOKE ALL ON user_key_sets FROM app_user")) + + # Drop vault key columns from tenants + op.drop_column("tenants", "vault_key_version") + op.drop_column("tenants", "encrypted_vault_key") + + # Drop tables + op.drop_table("key_access_log") + op.drop_table("user_key_sets") + + # Drop SRP columns from users + op.drop_column("users", "auth_version") + op.drop_column("users", "srp_verifier") + op.drop_column("users", "srp_salt") diff --git a/backend/alembic/versions/017_openbao_envelope_encryption.py b/backend/alembic/versions/017_openbao_envelope_encryption.py new file mode 100644 index 0000000..b032ceb --- /dev/null +++ b/backend/alembic/versions/017_openbao_envelope_encryption.py @@ -0,0 +1,90 @@ +"""OpenBao envelope encryption columns and key_access_log extensions. + +Revision ID: 017 +Revises: 016 +Create Date: 2026-03-03 + +Adds Transit ciphertext columns (TEXT) alongside existing BYTEA columns +for dual-write migration strategy. Extends key_access_log with device_id, +justification, and correlation_id for Phase 29 audit trail. +""" + +revision = "017" +down_revision = "016" +branch_labels = None +depends_on = None + +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import UUID + + +def upgrade() -> None: + # --- Transit ciphertext columns (TEXT, alongside existing BYTEA) --- + + # devices: store OpenBao Transit ciphertext for credentials + op.add_column( + "devices", + sa.Column("encrypted_credentials_transit", sa.Text(), nullable=True), + ) + + # certificate_authorities: Transit-encrypted CA private keys + op.add_column( + "certificate_authorities", + sa.Column("encrypted_private_key_transit", sa.Text(), nullable=True), + ) + + # device_certificates: Transit-encrypted device cert private keys + op.add_column( + "device_certificates", + sa.Column("encrypted_private_key_transit", sa.Text(), nullable=True), + ) + + # notification_channels: Transit-encrypted SMTP password + op.add_column( + "notification_channels", + sa.Column("smtp_password_transit", sa.Text(), nullable=True), + ) + + # --- Tenant OpenBao key tracking --- + op.add_column( + "tenants", + sa.Column("openbao_key_name", sa.Text(), nullable=True), + ) + + # --- Extend key_access_log for Phase 29 --- + op.add_column( + "key_access_log", + sa.Column("device_id", UUID(as_uuid=True), nullable=True), + ) + op.add_column( + "key_access_log", + sa.Column("justification", sa.Text(), nullable=True), + ) + op.add_column( + "key_access_log", + sa.Column("correlation_id", sa.Text(), nullable=True), + ) + + # Add FK constraint for device_id -> devices(id) (nullable, so no cascade needed) + op.create_foreign_key( + "fk_key_access_log_device_id", + "key_access_log", + "devices", + ["device_id"], + ["id"], + ) + + +def downgrade() -> None: + op.drop_constraint( + "fk_key_access_log_device_id", "key_access_log", type_="foreignkey" + ) + op.drop_column("key_access_log", "correlation_id") + op.drop_column("key_access_log", "justification") + op.drop_column("key_access_log", "device_id") + op.drop_column("tenants", "openbao_key_name") + op.drop_column("notification_channels", "smtp_password_transit") + op.drop_column("device_certificates", "encrypted_private_key_transit") + op.drop_column("certificate_authorities", "encrypted_private_key_transit") + op.drop_column("devices", "encrypted_credentials_transit") diff --git a/backend/alembic/versions/018_data_encryption.py b/backend/alembic/versions/018_data_encryption.py new file mode 100644 index 0000000..f892213 --- /dev/null +++ b/backend/alembic/versions/018_data_encryption.py @@ -0,0 +1,62 @@ +"""Data encryption columns for config backups and audit logs. + +Revision ID: 018 +Revises: 017 +Create Date: 2026-03-03 + +Adds encryption metadata columns to config_backup_runs (encryption_tier, +encryption_nonce) and encrypted_details TEXT column to audit_logs for +Transit-encrypted audit detail storage. +""" + +revision = "018" +down_revision = "017" +branch_labels = None +depends_on = None + +from alembic import op +import sqlalchemy as sa + + +def upgrade() -> None: + # --- config_backup_runs: encryption metadata --- + + # NULL = plaintext, 1 = client-side AES-GCM, 2 = OpenBao Transit + op.add_column( + "config_backup_runs", + sa.Column( + "encryption_tier", + sa.SmallInteger(), + nullable=True, + comment="NULL=plaintext, 1=client-side AES-GCM, 2=OpenBao Transit", + ), + ) + + # 12-byte AES-GCM nonce for Tier 1 (client-side) backups + op.add_column( + "config_backup_runs", + sa.Column( + "encryption_nonce", + sa.LargeBinary(), + nullable=True, + comment="12-byte AES-GCM nonce for Tier 1 backups", + ), + ) + + # --- audit_logs: Transit-encrypted details --- + + op.add_column( + "audit_logs", + sa.Column( + "encrypted_details", + sa.Text(), + nullable=True, + comment="Transit-encrypted details JSON (vault:v1:...)", + ), + ) + + +def downgrade() -> None: + op.drop_column("audit_logs", "encrypted_details") + op.drop_column("config_backup_runs", "encryption_nonce") + op.drop_column("config_backup_runs", "encryption_tier") diff --git a/backend/alembic/versions/019_deprecate_bcrypt.py b/backend/alembic/versions/019_deprecate_bcrypt.py new file mode 100644 index 0000000..627a7cf --- /dev/null +++ b/backend/alembic/versions/019_deprecate_bcrypt.py @@ -0,0 +1,52 @@ +"""Deprecate bcrypt: add must_upgrade_auth flag and make hashed_password nullable. + +Revision ID: 019 +Revises: 018 +Create Date: 2026-03-03 + +Conservative migration that flags legacy bcrypt users for SRP upgrade +rather than dropping data. hashed_password is made nullable so SRP-only +users no longer need a dummy value. A future migration (post-v6.0) can +drop hashed_password once all users have upgraded. +""" + +import sqlalchemy as sa +from alembic import op + +revision = "019" +down_revision = "018" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + # Add must_upgrade_auth flag + op.add_column( + "users", + sa.Column( + "must_upgrade_auth", + sa.Boolean(), + server_default="false", + nullable=False, + ), + ) + + # Flag all bcrypt-only users for upgrade (auth_version=1 and no SRP verifier) + op.execute( + "UPDATE users SET must_upgrade_auth = true " + "WHERE auth_version = 1 AND srp_verifier IS NULL" + ) + + # Make hashed_password nullable (SRP users don't need it) + op.alter_column("users", "hashed_password", nullable=True) + + +def downgrade() -> None: + # Restore NOT NULL (set a dummy value for any NULLs first) + op.execute( + "UPDATE users SET hashed_password = '$2b$12$placeholder' " + "WHERE hashed_password IS NULL" + ) + op.alter_column("users", "hashed_password", nullable=False) + + op.drop_column("users", "must_upgrade_auth") diff --git a/backend/alembic/versions/020_tls_mode_opt_in.py b/backend/alembic/versions/020_tls_mode_opt_in.py new file mode 100644 index 0000000..0d2b82b --- /dev/null +++ b/backend/alembic/versions/020_tls_mode_opt_in.py @@ -0,0 +1,51 @@ +"""Add opt-in plain-text TLS mode and change default from insecure to auto. + +Revision ID: 020 +Revises: 019 +Create Date: 2026-03-04 + +Reclassifies tls_mode values: +- 'auto': CA-verified -> InsecureSkipVerify (NO plain-text fallback) +- 'insecure': Skip directly to InsecureSkipVerify +- 'plain': Explicit opt-in for plain-text API (dangerous) +- 'portal_ca': Existing CA-verified mode (unchanged) + +Existing 'insecure' devices become 'auto' since the old behavior was +an implicit auto-fallback. portal_ca devices keep their mode. +""" + +import sqlalchemy as sa +from alembic import op + +revision = "020" +down_revision = "019" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + # Migrate existing 'insecure' devices to 'auto' (the new default). + # 'portal_ca' devices keep their mode (they already have CA verification). + op.execute("UPDATE devices SET tls_mode = 'auto' WHERE tls_mode = 'insecure'") + + # Change the server default from 'insecure' to 'auto' + op.alter_column( + "devices", + "tls_mode", + server_default="auto", + ) + + +def downgrade() -> None: + # Revert 'auto' devices back to 'insecure' + op.execute("UPDATE devices SET tls_mode = 'insecure' WHERE tls_mode = 'auto'") + + # Revert 'plain' devices to 'insecure' (plain didn't exist before) + op.execute("UPDATE devices SET tls_mode = 'insecure' WHERE tls_mode = 'plain'") + + # Restore old server default + op.alter_column( + "devices", + "tls_mode", + server_default="insecure", + ) diff --git a/backend/alembic/versions/021_system_tenant_for_audit.py b/backend/alembic/versions/021_system_tenant_for_audit.py new file mode 100644 index 0000000..0483f77 --- /dev/null +++ b/backend/alembic/versions/021_system_tenant_for_audit.py @@ -0,0 +1,44 @@ +"""Add system tenant for super_admin audit log entries. + +Revision ID: 021 +Revises: 020 +Create Date: 2026-03-04 + +The super_admin has NULL tenant_id, but audit_logs.tenant_id has a FK +to tenants and is NOT NULL. Code was using uuid.UUID(int=0) as a +substitute, but that row didn't exist — causing FK violations that +silently dropped every super_admin audit entry. + +This migration inserts a sentinel 'System (Internal)' tenant so +audit_logs can reference it. +""" + +from alembic import op + +revision = "021" +down_revision = "020" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + op.execute( + """ + INSERT INTO tenants (id, name, description) + VALUES ( + '00000000-0000-0000-0000-000000000000', + 'System (Internal)', + 'Internal tenant for super_admin audit entries' + ) + ON CONFLICT (id) DO NOTHING + """ + ) + + +def downgrade() -> None: + op.execute( + """ + DELETE FROM tenants + WHERE id = '00000000-0000-0000-0000-000000000000' + """ + ) diff --git a/backend/alembic/versions/022_rls_super_admin_devices.py b/backend/alembic/versions/022_rls_super_admin_devices.py new file mode 100644 index 0000000..6a4bfbb --- /dev/null +++ b/backend/alembic/versions/022_rls_super_admin_devices.py @@ -0,0 +1,49 @@ +"""Add super_admin bypass to devices, device_groups, device_tags RLS policies. + +Previously these tables only matched tenant_id, so super_admin context +('super_admin') returned zero rows. Users/tenants tables already had +the bypass — this brings device tables in line. + +Revision ID: 022 +Revises: 021 +Create Date: 2026-03-07 +""" + +import sqlalchemy as sa +from alembic import op + +revision = "022" +down_revision = "021" +branch_labels = None +depends_on = None + +# Tables that need super_admin bypass added to their RLS policy +_TABLES = ["devices", "device_groups", "device_tags"] + + +def upgrade() -> None: + conn = op.get_bind() + for table in _TABLES: + conn.execute(sa.text(f"DROP POLICY IF EXISTS tenant_isolation ON {table}")) + conn.execute(sa.text(f""" + CREATE POLICY tenant_isolation ON {table} + USING ( + tenant_id::text = current_setting('app.current_tenant', true) + OR current_setting('app.current_tenant', true) = 'super_admin' + ) + WITH CHECK ( + tenant_id::text = current_setting('app.current_tenant', true) + OR current_setting('app.current_tenant', true) = 'super_admin' + ) + """)) + + +def downgrade() -> None: + conn = op.get_bind() + for table in _TABLES: + conn.execute(sa.text(f"DROP POLICY IF EXISTS tenant_isolation ON {table}")) + conn.execute(sa.text(f""" + CREATE POLICY tenant_isolation ON {table} + USING (tenant_id::text = current_setting('app.current_tenant', true)) + WITH CHECK (tenant_id::text = current_setting('app.current_tenant', true)) + """)) diff --git a/backend/alembic/versions/023_slack_notification_channel.py b/backend/alembic/versions/023_slack_notification_channel.py new file mode 100644 index 0000000..b0c8d9f --- /dev/null +++ b/backend/alembic/versions/023_slack_notification_channel.py @@ -0,0 +1,21 @@ +"""Add Slack notification channel support. + +Revision ID: 023 +Revises: 022 +""" + +from alembic import op +import sqlalchemy as sa + +revision = "023" +down_revision = "022" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + op.add_column("notification_channels", sa.Column("slack_webhook_url", sa.Text(), nullable=True)) + + +def downgrade() -> None: + op.drop_column("notification_channels", "slack_webhook_url") diff --git a/backend/alembic/versions/024_contact_email_and_offline_rule.py b/backend/alembic/versions/024_contact_email_and_offline_rule.py new file mode 100644 index 0000000..6c5e035 --- /dev/null +++ b/backend/alembic/versions/024_contact_email_and_offline_rule.py @@ -0,0 +1,41 @@ +"""Add contact_email to tenants and seed device_offline default alert rule. + +Revision ID: 024 +Revises: 023 +""" + +from alembic import op +import sqlalchemy as sa + + +revision = "024" +down_revision = "023" + + +def upgrade() -> None: + conn = op.get_bind() + + # 1. Add contact_email column to tenants + op.add_column("tenants", sa.Column("contact_email", sa.String(255), nullable=True)) + + # 2. Seed device_offline default alert rule for all existing tenants + conn.execute(sa.text(""" + INSERT INTO alert_rules (id, tenant_id, name, metric, operator, threshold, duration_polls, severity, enabled, is_default) + SELECT gen_random_uuid(), t.id, 'Device Offline', 'device_offline', 'eq', 1, 1, 'critical', TRUE, TRUE + FROM tenants t + WHERE t.id != '00000000-0000-0000-0000-000000000000' + AND NOT EXISTS ( + SELECT 1 FROM alert_rules ar + WHERE ar.tenant_id = t.id AND ar.metric = 'device_offline' AND ar.is_default = TRUE + ) + """)) + + +def downgrade() -> None: + conn = op.get_bind() + + conn.execute(sa.text(""" + DELETE FROM alert_rules WHERE metric = 'device_offline' AND is_default = TRUE + """)) + + op.drop_column("tenants", "contact_email") diff --git a/backend/alembic/versions/025_fix_key_access_log_device_fk.py b/backend/alembic/versions/025_fix_key_access_log_device_fk.py new file mode 100644 index 0000000..818c3b6 --- /dev/null +++ b/backend/alembic/versions/025_fix_key_access_log_device_fk.py @@ -0,0 +1,37 @@ +"""Fix key_access_log device_id FK to SET NULL on delete. + +Revision ID: 025 +Revises: 024 +""" + +from alembic import op + +revision = "025" +down_revision = "024" + + +def upgrade() -> None: + op.drop_constraint( + "fk_key_access_log_device_id", "key_access_log", type_="foreignkey" + ) + op.create_foreign_key( + "fk_key_access_log_device_id", + "key_access_log", + "devices", + ["device_id"], + ["id"], + ondelete="SET NULL", + ) + + +def downgrade() -> None: + op.drop_constraint( + "fk_key_access_log_device_id", "key_access_log", type_="foreignkey" + ) + op.create_foreign_key( + "fk_key_access_log_device_id", + "key_access_log", + "devices", + ["device_id"], + ["id"], + ) diff --git a/backend/alembic/versions/026_system_settings.py b/backend/alembic/versions/026_system_settings.py new file mode 100644 index 0000000..aca01f8 --- /dev/null +++ b/backend/alembic/versions/026_system_settings.py @@ -0,0 +1,41 @@ +"""Add system_settings table for instance-wide configuration. + +Revision ID: 026 +Revises: 025 +Create Date: 2026-03-08 +""" + +revision = "026" +down_revision = "025" +branch_labels = None +depends_on = None + +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import UUID + + +def upgrade() -> None: + op.create_table( + "system_settings", + sa.Column("key", sa.String(255), primary_key=True), + sa.Column("value", sa.Text, nullable=True), + sa.Column("encrypted_value", sa.LargeBinary, nullable=True), + sa.Column("encrypted_value_transit", sa.Text, nullable=True), + sa.Column( + "updated_at", + sa.DateTime(timezone=True), + server_default=sa.func.now(), + nullable=False, + ), + sa.Column( + "updated_by", + UUID(as_uuid=True), + sa.ForeignKey("users.id", ondelete="SET NULL"), + nullable=True, + ), + ) + + +def downgrade() -> None: + op.drop_table("system_settings") diff --git a/backend/app/__init__.py b/backend/app/__init__.py new file mode 100644 index 0000000..6d6bd0f --- /dev/null +++ b/backend/app/__init__.py @@ -0,0 +1 @@ +# TOD Backend diff --git a/backend/app/config.py b/backend/app/config.py new file mode 100644 index 0000000..de358ad --- /dev/null +++ b/backend/app/config.py @@ -0,0 +1,177 @@ +"""Application configuration using Pydantic Settings.""" + +import base64 +import sys +from functools import lru_cache +from typing import Optional + +from pydantic import field_validator +from pydantic_settings import BaseSettings, SettingsConfigDict + +# Known insecure default values that MUST NOT be used in non-dev environments. +# If any of these are detected in production/staging, the app refuses to start. +KNOWN_INSECURE_DEFAULTS: dict[str, list[str]] = { + "JWT_SECRET_KEY": [ + "change-this-in-production-use-a-long-random-string", + "dev-jwt-secret-change-in-production", + "CHANGE_ME_IN_PRODUCTION", + ], + "CREDENTIAL_ENCRYPTION_KEY": [ + "LLLjnfBZTSycvL2U07HDSxUeTtLxb9cZzryQl0R9E4w=", + "CHANGE_ME_IN_PRODUCTION", + ], + "OPENBAO_TOKEN": [ + "dev-openbao-token", + "CHANGE_ME_IN_PRODUCTION", + ], +} + + +def validate_production_settings(settings: "Settings") -> None: + """Reject known-insecure defaults in non-dev environments. + + Called during app startup. Exits with code 1 and clear error message + if production is running with dev secrets. + """ + if settings.ENVIRONMENT == "dev": + return + + for field, insecure_values in KNOWN_INSECURE_DEFAULTS.items(): + actual = getattr(settings, field, None) + if actual in insecure_values: + print( + f"FATAL: {field} uses a known insecure default in '{settings.ENVIRONMENT}' environment.\n" + f"Generate a secure value and set it in your .env.prod file.\n" + f"For JWT_SECRET_KEY: python -c \"import secrets; print(secrets.token_urlsafe(64))\"\n" + f"For CREDENTIAL_ENCRYPTION_KEY: python -c \"import secrets, base64; print(base64.b64encode(secrets.token_bytes(32)).decode())\"", + file=sys.stderr, + ) + sys.exit(1) + + +class Settings(BaseSettings): + model_config = SettingsConfigDict( + env_file=".env", + env_file_encoding="utf-8", + case_sensitive=False, + extra="ignore", + ) + + # Environment (dev | staging | production) + ENVIRONMENT: str = "dev" + + # Database + DATABASE_URL: str = "postgresql+asyncpg://postgres:postgres@localhost:5432/mikrotik" + # Sync URL used by Alembic only + SYNC_DATABASE_URL: str = "postgresql+psycopg2://postgres:postgres@localhost:5432/mikrotik" + + # App user for RLS enforcement (cannot bypass RLS) + APP_USER_DATABASE_URL: str = "postgresql+asyncpg://app_user:app_password@localhost:5432/mikrotik" + + # Database connection pool + DB_POOL_SIZE: int = 20 + DB_MAX_OVERFLOW: int = 40 + DB_ADMIN_POOL_SIZE: int = 10 + DB_ADMIN_MAX_OVERFLOW: int = 20 + + # Redis + REDIS_URL: str = "redis://localhost:6379/0" + + # NATS JetStream + NATS_URL: str = "nats://localhost:4222" + + # JWT configuration + JWT_SECRET_KEY: str = "change-this-in-production-use-a-long-random-string" + JWT_ALGORITHM: str = "HS256" + JWT_ACCESS_TOKEN_EXPIRE_MINUTES: int = 15 + JWT_REFRESH_TOKEN_EXPIRE_DAYS: int = 7 + + # Credential encryption key — must be 32 bytes, base64-encoded in env + # Generate with: python -c "import secrets, base64; print(base64.b64encode(secrets.token_bytes(32)).decode())" + CREDENTIAL_ENCRYPTION_KEY: str = "LLLjnfBZTSycvL2U07HDSxUeTtLxb9cZzryQl0R9E4w=" + + # OpenBao Transit (KMS for per-tenant credential encryption) + OPENBAO_ADDR: str = "http://localhost:8200" + OPENBAO_TOKEN: str = "dev-openbao-token" + + # First admin bootstrap + FIRST_ADMIN_EMAIL: Optional[str] = None + FIRST_ADMIN_PASSWORD: Optional[str] = None + + # CORS origins (comma-separated) + CORS_ORIGINS: str = "http://localhost:3000,http://localhost:5173,http://localhost:8080" + + # Git store — PVC mount for bare git repos (one per tenant). + # In production: /data/git-store (Kubernetes PVC ReadWriteMany). + # In local dev: ./git-store (relative to cwd, created on first use). + GIT_STORE_PATH: str = "./git-store" + + # WireGuard config path — shared volume with the WireGuard container + WIREGUARD_CONFIG_PATH: str = "/data/wireguard" + + # Firmware cache + FIRMWARE_CACHE_DIR: str = "/data/firmware-cache" # PVC mount path + FIRMWARE_CHECK_INTERVAL_HOURS: int = 24 # How often to check for new versions + + # SMTP settings for transactional email (password reset, etc.) + SMTP_HOST: str = "localhost" + SMTP_PORT: int = 587 + SMTP_USER: Optional[str] = None + SMTP_PASSWORD: Optional[str] = None + SMTP_USE_TLS: bool = False + SMTP_FROM_ADDRESS: str = "noreply@mikrotik-portal.local" + + # Password reset + PASSWORD_RESET_TOKEN_EXPIRE_MINUTES: int = 30 + APP_BASE_URL: str = "http://localhost:5173" + + # App settings + APP_NAME: str = "TOD - The Other Dude" + APP_VERSION: str = "0.1.0" + DEBUG: bool = False + + @field_validator("CREDENTIAL_ENCRYPTION_KEY") + @classmethod + def validate_encryption_key(cls, v: str) -> str: + """Ensure the key decodes to exactly 32 bytes. + + Note: CHANGE_ME_IN_PRODUCTION is allowed through this validator + because it fails the base64 length check. The production safety + check in validate_production_settings() catches it separately. + """ + if v == "CHANGE_ME_IN_PRODUCTION": + # Allow the placeholder through field validation -- the production + # safety check will reject it in non-dev environments. + return v + try: + key_bytes = base64.b64decode(v) + if len(key_bytes) != 32: + raise ValueError( + f"CREDENTIAL_ENCRYPTION_KEY must decode to exactly 32 bytes, got {len(key_bytes)}" + ) + except Exception as e: + raise ValueError(f"Invalid CREDENTIAL_ENCRYPTION_KEY: {e}") from e + return v + + def get_encryption_key_bytes(self) -> bytes: + """Return the encryption key as raw bytes.""" + return base64.b64decode(self.CREDENTIAL_ENCRYPTION_KEY) + + def get_cors_origins(self) -> list[str]: + """Return CORS origins as a list.""" + return [origin.strip() for origin in self.CORS_ORIGINS.split(",") if origin.strip()] + + +@lru_cache() +def get_settings() -> Settings: + """Return cached settings instance. + + Validates that production environments do not use insecure defaults. + This runs once (cached) at startup before the app accepts requests. + """ + s = Settings() + validate_production_settings(s) + return s + + +settings = get_settings() diff --git a/backend/app/database.py b/backend/app/database.py new file mode 100644 index 0000000..321aca4 --- /dev/null +++ b/backend/app/database.py @@ -0,0 +1,114 @@ +"""Database engine, session factory, and dependency injection.""" + +import uuid +from collections.abc import AsyncGenerator +from typing import Optional + +from sqlalchemy import text +from sqlalchemy.ext.asyncio import ( + AsyncSession, + async_sessionmaker, + create_async_engine, +) +from sqlalchemy.orm import DeclarativeBase + +from app.config import settings + + +class Base(DeclarativeBase): + """Base class for all SQLAlchemy ORM models.""" + pass + + +# Primary engine using postgres superuser (for migrations/admin) +engine = create_async_engine( + settings.DATABASE_URL, + echo=settings.DEBUG, + pool_pre_ping=True, + pool_size=settings.DB_ADMIN_POOL_SIZE, + max_overflow=settings.DB_ADMIN_MAX_OVERFLOW, +) + +# App user engine (enforces RLS — no superuser bypass) +app_engine = create_async_engine( + settings.APP_USER_DATABASE_URL, + echo=settings.DEBUG, + pool_pre_ping=True, + pool_size=settings.DB_POOL_SIZE, + max_overflow=settings.DB_MAX_OVERFLOW, +) + +# Session factory for the app_user connection (RLS enforced) +AsyncSessionLocal = async_sessionmaker( + app_engine, + class_=AsyncSession, + expire_on_commit=False, + autocommit=False, + autoflush=False, +) + +# Admin session factory (for bootstrap/migrations only) +AdminAsyncSessionLocal = async_sessionmaker( + engine, + class_=AsyncSession, + expire_on_commit=False, + autocommit=False, + autoflush=False, +) + + +async def get_db() -> AsyncGenerator[AsyncSession, None]: + """ + Dependency that yields an async database session using app_user (RLS enforced). + + The tenant context (SET LOCAL app.current_tenant) must be set by + tenant_context middleware before any tenant-scoped queries. + """ + async with AsyncSessionLocal() as session: + try: + yield session + await session.commit() + except Exception: + await session.rollback() + raise + finally: + await session.close() + + +async def get_admin_db() -> AsyncGenerator[AsyncSession, None]: + """ + Dependency that yields an admin database session (bypasses RLS). + USE ONLY for bootstrap operations and internal system tasks. + """ + async with AdminAsyncSessionLocal() as session: + try: + yield session + await session.commit() + except Exception: + await session.rollback() + raise + finally: + await session.close() + + +async def set_tenant_context(session: AsyncSession, tenant_id: Optional[str]) -> None: + """ + Set the PostgreSQL session variable for RLS enforcement. + + This MUST be called before any tenant-scoped query to activate RLS policies. + Uses SET LOCAL so the context resets at transaction end. + """ + if tenant_id: + # Allow 'super_admin' as a special RLS context value for cross-tenant access. + # Otherwise validate tenant_id is a valid UUID to prevent SQL injection. + # SET LOCAL cannot use parameterized queries in PostgreSQL. + if tenant_id != "super_admin": + try: + uuid.UUID(tenant_id) + except ValueError: + raise ValueError(f"Invalid tenant_id format: {tenant_id!r}") + await session.execute(text(f"SET LOCAL app.current_tenant = '{tenant_id}'")) + else: + # For super_admin users: set empty string which will not match any tenant + # The super_admin uses the admin engine which bypasses RLS + await session.execute(text("SET LOCAL app.current_tenant = ''")) diff --git a/backend/app/logging_config.py b/backend/app/logging_config.py new file mode 100644 index 0000000..d6b8bf8 --- /dev/null +++ b/backend/app/logging_config.py @@ -0,0 +1,81 @@ +"""Structured logging configuration for the FastAPI backend. + +Uses structlog with two rendering modes: +- Dev mode (ENVIRONMENT=dev or DEBUG=true): colored console output +- Prod mode: machine-parseable JSON output + +Must be called once during app startup (in lifespan), NOT at module import time, +so tests can override the configuration. +""" + +import logging +import os + +import structlog + + +def configure_logging() -> None: + """Configure structlog for the FastAPI application. + + Dev mode: colored console output with human-readable formatting. + Prod mode: JSON output with machine-parseable fields. + + Must be called once during app startup (in lifespan), NOT at module import time, + so tests can override the configuration. + """ + is_dev = os.getenv("ENVIRONMENT", "dev") == "dev" + log_level_name = os.getenv("LOG_LEVEL", "debug" if is_dev else "info").upper() + log_level = getattr(logging, log_level_name, logging.INFO) + + shared_processors: list[structlog.types.Processor] = [ + structlog.contextvars.merge_contextvars, + structlog.stdlib.add_logger_name, + structlog.stdlib.add_log_level, + structlog.stdlib.PositionalArgumentsFormatter(), + structlog.processors.TimeStamper(fmt="iso"), + structlog.processors.StackInfoRenderer(), + structlog.processors.UnicodeDecoder(), + ] + + if is_dev: + renderer = structlog.dev.ConsoleRenderer() + else: + renderer = structlog.processors.JSONRenderer() + + structlog.configure( + processors=[ + *shared_processors, + structlog.stdlib.ProcessorFormatter.wrap_for_formatter, + ], + logger_factory=structlog.stdlib.LoggerFactory(), + wrapper_class=structlog.stdlib.BoundLogger, + cache_logger_on_first_use=True, + ) + + # Capture stdlib loggers (uvicorn, SQLAlchemy, alembic) into structlog pipeline + formatter = structlog.stdlib.ProcessorFormatter( + processors=[ + structlog.stdlib.ProcessorFormatter.remove_processors_meta, + renderer, + ], + ) + + handler = logging.StreamHandler() + handler.setFormatter(formatter) + + root_logger = logging.getLogger() + root_logger.handlers.clear() + root_logger.addHandler(handler) + root_logger.setLevel(log_level) + + # Quiet down noisy libraries in dev + if is_dev: + logging.getLogger("uvicorn.access").setLevel(logging.WARNING) + + +def get_logger(name: str | None = None) -> structlog.stdlib.BoundLogger: + """Get a structlog bound logger. + + Use this instead of logging.getLogger() throughout the application. + """ + return structlog.get_logger(name) diff --git a/backend/app/main.py b/backend/app/main.py new file mode 100644 index 0000000..c5c26a3 --- /dev/null +++ b/backend/app/main.py @@ -0,0 +1,330 @@ +"""FastAPI application entry point.""" + +from contextlib import asynccontextmanager +from typing import AsyncGenerator + +import structlog +from fastapi import FastAPI +from fastapi.middleware.cors import CORSMiddleware +from starlette.responses import JSONResponse + +from app.config import settings +from app.logging_config import configure_logging +from app.middleware.rate_limit import setup_rate_limiting +from app.middleware.request_id import RequestIDMiddleware +from app.middleware.security_headers import SecurityHeadersMiddleware +from app.observability import check_health_ready, setup_instrumentator + +logger = structlog.get_logger(__name__) + + +async def run_migrations() -> None: + """Run Alembic migrations on startup.""" + import os + import subprocess + import sys + + result = subprocess.run( + [sys.executable, "-m", "alembic", "upgrade", "head"], + capture_output=True, + text=True, + cwd=os.path.dirname(os.path.dirname(os.path.abspath(__file__))), + ) + if result.returncode != 0: + logger.error("migration failed", stderr=result.stderr) + raise RuntimeError(f"Database migration failed: {result.stderr}") + logger.info("migrations applied successfully") + + +async def bootstrap_first_admin() -> None: + """Create the first super_admin user if no users exist.""" + if not settings.FIRST_ADMIN_EMAIL or not settings.FIRST_ADMIN_PASSWORD: + logger.info("FIRST_ADMIN_EMAIL/PASSWORD not set, skipping bootstrap") + return + + from sqlalchemy import select + + from app.database import AdminAsyncSessionLocal + from app.models.user import User, UserRole + from app.services.auth import hash_password + + async with AdminAsyncSessionLocal() as session: + # Check if any users exist (bypass RLS with admin session) + result = await session.execute(select(User).limit(1)) + existing_user = result.scalar_one_or_none() + + if existing_user: + logger.info("users already exist, skipping first admin bootstrap") + return + + # Create the first super_admin with bcrypt password. + # must_upgrade_auth=True triggers the SRP registration flow on first login. + admin = User( + email=settings.FIRST_ADMIN_EMAIL, + hashed_password=hash_password(settings.FIRST_ADMIN_PASSWORD), + name="Super Admin", + role=UserRole.SUPER_ADMIN.value, + tenant_id=None, # super_admin has no tenant + is_active=True, + must_upgrade_auth=True, + ) + session.add(admin) + await session.commit() + logger.info("created first super_admin", email=settings.FIRST_ADMIN_EMAIL) + + +@asynccontextmanager +async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]: + """Application lifespan: run migrations and bootstrap on startup.""" + from app.services.backup_scheduler import start_backup_scheduler, stop_backup_scheduler + from app.services.firmware_subscriber import start_firmware_subscriber, stop_firmware_subscriber + from app.services.metrics_subscriber import start_metrics_subscriber, stop_metrics_subscriber + from app.services.nats_subscriber import start_nats_subscriber, stop_nats_subscriber + from app.services.sse_manager import ensure_sse_streams + + # Configure structured logging FIRST -- before any other startup work + configure_logging() + + logger.info("starting TOD API") + + # Run database migrations + await run_migrations() + + # Bootstrap first admin user + await bootstrap_first_admin() + + # Start NATS subscriber for device status events. + # Wrapped in try/except so NATS failure doesn't prevent API startup -- + # allows running the API locally without NATS during frontend development. + nats_connection = None + try: + nats_connection = await start_nats_subscriber() + except Exception as exc: + logger.warning( + "NATS status subscriber could not start (API will run without it)", + error=str(exc), + ) + + # Start NATS subscriber for device metrics events (separate NATS connection). + # Same pattern -- failure is non-fatal so the API starts without full NATS stack. + metrics_nc = None + try: + metrics_nc = await start_metrics_subscriber() + except Exception as exc: + logger.warning( + "NATS metrics subscriber could not start (API will run without it)", + error=str(exc), + ) + + # Start NATS subscriber for device firmware events (separate NATS connection). + firmware_nc = None + try: + firmware_nc = await start_firmware_subscriber() + except Exception as exc: + logger.warning( + "NATS firmware subscriber could not start (API will run without it)", + error=str(exc), + ) + + # Ensure NATS streams for SSE event delivery exist (ALERT_EVENTS, OPERATION_EVENTS). + # Non-fatal -- API starts without SSE streams; they'll be created on first SSE connection. + try: + await ensure_sse_streams() + except Exception as exc: + logger.warning( + "SSE NATS streams could not be created (SSE will retry on connection)", + error=str(exc), + ) + + # Start APScheduler for automated nightly config backups. + # Non-fatal -- API starts and serves requests even without the scheduler. + try: + await start_backup_scheduler() + except Exception as exc: + logger.warning("backup scheduler could not start", error=str(exc)) + + # Register daily firmware version check (3am UTC) on the same scheduler. + try: + from app.services.firmware_service import schedule_firmware_checks + + schedule_firmware_checks() + except Exception as exc: + logger.warning("firmware check scheduler could not start", error=str(exc)) + + # Provision OpenBao Transit keys for existing tenants and migrate legacy credentials. + # Non-blocking: if OpenBao is unavailable, the dual-read path handles fallback. + if settings.OPENBAO_ADDR: + try: + from app.database import AdminAsyncSessionLocal + from app.services.key_service import provision_existing_tenants + + async with AdminAsyncSessionLocal() as openbao_session: + counts = await provision_existing_tenants(openbao_session) + logger.info( + "openbao tenant provisioning complete", + **{k: v for k, v in counts.items()}, + ) + except Exception as exc: + logger.warning( + "openbao tenant provisioning failed (will retry on next restart)", + error=str(exc), + ) + + # Recover stale push operations from previous API instance + try: + from app.services.restore_service import recover_stale_push_operations + from app.database import AdminAsyncSessionLocal as _AdminSession + + async with _AdminSession() as session: + await recover_stale_push_operations(session) + logger.info("push operation recovery check complete") + except Exception as e: + logger.error("push operation recovery failed (non-fatal): %s", e) + + # Config change subscriber (event-driven backups) + config_change_nc = None + try: + from app.services.config_change_subscriber import ( + start_config_change_subscriber, + stop_config_change_subscriber, + ) + config_change_nc = await start_config_change_subscriber() + except Exception as e: + logger.error("Config change subscriber failed to start (non-fatal): %s", e) + + # Push rollback/alert subscriber + push_rollback_nc = None + try: + from app.services.push_rollback_subscriber import ( + start_push_rollback_subscriber, + stop_push_rollback_subscriber, + ) + push_rollback_nc = await start_push_rollback_subscriber() + except Exception as e: + logger.error("Push rollback subscriber failed to start (non-fatal): %s", e) + + logger.info("startup complete, ready to serve requests") + yield + + # Shutdown + logger.info("shutting down TOD API") + await stop_backup_scheduler() + await stop_nats_subscriber(nats_connection) + await stop_metrics_subscriber(metrics_nc) + await stop_firmware_subscriber(firmware_nc) + if config_change_nc: + await stop_config_change_subscriber() + if push_rollback_nc: + await stop_push_rollback_subscriber() + + # Dispose database engine connections to release all pooled connections cleanly. + from app.database import app_engine, engine + + await app_engine.dispose() + await engine.dispose() + logger.info("database connections closed") + + +def create_app() -> FastAPI: + """Create and configure the FastAPI application.""" + app = FastAPI( + title=settings.APP_NAME, + version=settings.APP_VERSION, + description="The Other Dude — Fleet Management API", + docs_url="/docs" if settings.ENVIRONMENT == "dev" else None, + redoc_url="/redoc" if settings.ENVIRONMENT == "dev" else None, + lifespan=lifespan, + ) + + # Starlette processes middleware in LIFO order (last added = first to run). + # We want: Request -> RequestID -> CORS -> Route handler + # So add CORS first, then RequestID (it will wrap CORS). + app.add_middleware( + CORSMiddleware, + allow_origins=settings.get_cors_origins(), + allow_credentials=True, + allow_methods=["GET", "POST", "PUT", "PATCH", "DELETE", "OPTIONS"], + allow_headers=["Authorization", "Content-Type", "X-Request-ID"], + ) + app.add_middleware(SecurityHeadersMiddleware, environment=settings.ENVIRONMENT) + setup_rate_limiting(app) # Register 429 exception handler (no middleware added) + app.add_middleware(RequestIDMiddleware) + + # Include routers + from app.routers.alerts import router as alerts_router + from app.routers.auth import router as auth_router + from app.routers.sse import router as sse_router + from app.routers.config_backups import router as config_router + from app.routers.config_editor import router as config_editor_router + from app.routers.device_groups import router as device_groups_router + from app.routers.device_tags import router as device_tags_router + from app.routers.devices import router as devices_router + from app.routers.firmware import router as firmware_router + from app.routers.metrics import router as metrics_router + from app.routers.events import router as events_router + from app.routers.clients import router as clients_router + from app.routers.device_logs import router as device_logs_router + from app.routers.templates import router as templates_router + from app.routers.tenants import router as tenants_router + from app.routers.reports import router as reports_router + from app.routers.topology import router as topology_router + from app.routers.users import router as users_router + from app.routers.audit_logs import router as audit_logs_router + from app.routers.api_keys import router as api_keys_router + from app.routers.maintenance_windows import router as maintenance_windows_router + from app.routers.vpn import router as vpn_router + from app.routers.certificates import router as certificates_router + from app.routers.transparency import router as transparency_router + from app.routers.settings import router as settings_router + + app.include_router(auth_router, prefix="/api") + app.include_router(tenants_router, prefix="/api") + app.include_router(users_router, prefix="/api") + app.include_router(devices_router, prefix="/api") + app.include_router(device_groups_router, prefix="/api") + app.include_router(device_tags_router, prefix="/api") + app.include_router(metrics_router, prefix="/api") + app.include_router(config_router, prefix="/api") + app.include_router(firmware_router, prefix="/api") + app.include_router(alerts_router, prefix="/api") + app.include_router(config_editor_router, prefix="/api") + app.include_router(events_router, prefix="/api") + app.include_router(device_logs_router, prefix="/api") + app.include_router(templates_router, prefix="/api") + app.include_router(clients_router, prefix="/api") + app.include_router(topology_router, prefix="/api") + app.include_router(sse_router, prefix="/api") + app.include_router(audit_logs_router, prefix="/api") + app.include_router(reports_router, prefix="/api") + app.include_router(api_keys_router, prefix="/api") + app.include_router(maintenance_windows_router, prefix="/api") + app.include_router(vpn_router, prefix="/api") + app.include_router(certificates_router, prefix="/api/certificates", tags=["certificates"]) + app.include_router(transparency_router, prefix="/api") + app.include_router(settings_router, prefix="/api") + + # Health check endpoints + @app.get("/health", tags=["health"]) + async def health_check() -> dict: + """Liveness probe -- returns 200 if the process is alive.""" + return {"status": "ok", "version": settings.APP_VERSION} + + @app.get("/health/ready", tags=["health"]) + async def health_ready() -> JSONResponse: + """Readiness probe -- returns 200 only when PostgreSQL, Redis, and NATS are healthy.""" + result = await check_health_ready() + status_code = 200 if result["status"] == "healthy" else 503 + return JSONResponse(content=result, status_code=status_code) + + @app.get("/api/health", tags=["health"]) + async def api_health_check() -> dict: + """Backward-compatible health endpoint under /api prefix.""" + return {"status": "ok", "version": settings.APP_VERSION} + + # Prometheus metrics instrumentation -- MUST be after routers so all routes are captured + setup_instrumentator(app) + + return app + + +app = create_app() diff --git a/backend/app/middleware/__init__.py b/backend/app/middleware/__init__.py new file mode 100644 index 0000000..b437a94 --- /dev/null +++ b/backend/app/middleware/__init__.py @@ -0,0 +1 @@ +"""FastAPI middleware and dependencies for auth, tenant context, and RBAC.""" diff --git a/backend/app/middleware/rate_limit.py b/backend/app/middleware/rate_limit.py new file mode 100644 index 0000000..184c913 --- /dev/null +++ b/backend/app/middleware/rate_limit.py @@ -0,0 +1,48 @@ +"""Rate limiting middleware using slowapi with Redis backend. + +Per-route rate limits only -- no global limits to avoid blocking the +Go poller, NATS subscribers, and health check endpoints. + +Rate limit data uses Redis DB 1 (separate from app data in DB 0). +""" + +from fastapi import FastAPI +from slowapi import Limiter, _rate_limit_exceeded_handler +from slowapi.errors import RateLimitExceeded +from slowapi.util import get_remote_address + +from app.config import settings + + +def _get_redis_url() -> str: + """Return Redis URL pointing to DB 1 for rate limit storage. + + Keeps rate limit counters separate from application data in DB 0. + """ + url = settings.REDIS_URL + if url.endswith("/0"): + return url[:-2] + "/1" + # If no DB specified or different DB, append /1 + if url.rstrip("/").split("/")[-1].isdigit(): + # Replace existing DB number + parts = url.rsplit("/", 1) + return parts[0] + "/1" + return url.rstrip("/") + "/1" + + +limiter = Limiter( + key_func=get_remote_address, + storage_uri=_get_redis_url(), + default_limits=[], # No global limits -- per-route only +) + + +def setup_rate_limiting(app: FastAPI) -> None: + """Register the rate limiter on the FastAPI app. + + This sets app.state.limiter (required by slowapi) and registers + the 429 exception handler. It does NOT add middleware -- the + @limiter.limit() decorators handle actual limiting per-route. + """ + app.state.limiter = limiter + app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler) diff --git a/backend/app/middleware/rbac.py b/backend/app/middleware/rbac.py new file mode 100644 index 0000000..ca6129a --- /dev/null +++ b/backend/app/middleware/rbac.py @@ -0,0 +1,186 @@ +""" +Role-Based Access Control (RBAC) middleware. + +Provides dependency factories for enforcing role-based access control +on FastAPI routes. Roles are hierarchical: + + super_admin > tenant_admin > operator > viewer + +Role permissions per plan TENANT-04/05/06: + - viewer: GET endpoints only (read-only) + - operator: GET + device/config management endpoints + - tenant_admin: full access within their tenant + - super_admin: full access across all tenants +""" + +from typing import Callable + +from fastapi import Depends, HTTPException, Request, status +from fastapi.params import Depends as DependsClass + +from app.middleware.tenant_context import CurrentUser, get_current_user + +# Role hierarchy (higher index = more privilege) +# api_key is at operator level for RBAC checks; fine-grained access controlled by scopes. +ROLE_HIERARCHY = { + "viewer": 0, + "api_key": 1, + "operator": 1, + "tenant_admin": 2, + "super_admin": 3, +} + + +def _get_role_level(role: str) -> int: + """Return numeric privilege level for a role string.""" + return ROLE_HIERARCHY.get(role, -1) + + +def require_role(*allowed_roles: str) -> Callable: + """ + FastAPI dependency factory that checks the current user's role. + + Usage: + @router.post("/items", dependencies=[Depends(require_role("tenant_admin", "super_admin"))]) + + Args: + *allowed_roles: Role strings that are permitted to access the endpoint + + Returns: + FastAPI dependency that raises 403 if the role is insufficient + """ + async def dependency( + current_user: CurrentUser = Depends(get_current_user), + ) -> CurrentUser: + if current_user.role not in allowed_roles: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail=f"Access denied. Required roles: {', '.join(allowed_roles)}. " + f"Your role: {current_user.role}", + ) + return current_user + + return dependency + + +def require_min_role(min_role: str) -> Callable: + """ + Dependency factory that allows any role at or above the minimum level. + + Usage: + @router.get("/items", dependencies=[Depends(require_min_role("operator"))]) + # Allows: operator, tenant_admin, super_admin + # Denies: viewer + """ + min_level = _get_role_level(min_role) + + async def dependency( + current_user: CurrentUser = Depends(get_current_user), + ) -> CurrentUser: + user_level = _get_role_level(current_user.role) + if user_level < min_level: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail=f"Access denied. Minimum required role: {min_role}. " + f"Your role: {current_user.role}", + ) + return current_user + + return dependency + + +def require_write_access() -> Callable: + """ + Dependency that enforces viewer read-only restriction. + + Viewers are NOT allowed on POST/PUT/PATCH/DELETE endpoints. + Call this on any mutating endpoint to deny viewers. + """ + async def dependency( + request: Request, + current_user: CurrentUser = Depends(get_current_user), + ) -> CurrentUser: + if request.method in ("POST", "PUT", "PATCH", "DELETE"): + if current_user.role == "viewer": + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Viewers have read-only access. " + "Contact your administrator to request elevated permissions.", + ) + return current_user + + return dependency + + +def require_scope(scope: str) -> DependsClass: + """FastAPI dependency that checks API key scopes. + + No-op for regular users (JWT auth) -- scopes only apply to API keys. + For API key users: checks that the required scope is in the key's scope list. + + Returns a Depends() instance so it can be used in dependency lists: + @router.get("/items", dependencies=[require_scope("devices:read")]) + + Args: + scope: Required scope string (e.g. "devices:read", "config:write"). + + Raises: + HTTPException 403 if the API key is missing the required scope. + """ + async def _check_scope( + current_user: CurrentUser = Depends(get_current_user), + ) -> CurrentUser: + if current_user.role == "api_key": + if not current_user.scopes or scope not in current_user.scopes: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail=f"API key missing required scope: {scope}", + ) + return current_user + + return Depends(_check_scope) + + +# Pre-built convenience dependencies + +async def require_super_admin( + current_user: CurrentUser = Depends(get_current_user), +) -> CurrentUser: + """Require super_admin role (portal-wide admin).""" + if current_user.role != "super_admin": + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Access denied. Super admin role required.", + ) + return current_user + + +async def require_tenant_admin_or_above( + current_user: CurrentUser = Depends(get_current_user), +) -> CurrentUser: + """Require tenant_admin or super_admin role.""" + if current_user.role not in ("tenant_admin", "super_admin"): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Access denied. Tenant admin or higher role required.", + ) + return current_user + + +async def require_operator_or_above( + current_user: CurrentUser = Depends(get_current_user), +) -> CurrentUser: + """Require operator, tenant_admin, or super_admin role.""" + if current_user.role not in ("operator", "tenant_admin", "super_admin"): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Access denied. Operator or higher role required.", + ) + return current_user + + +async def require_authenticated( + current_user: CurrentUser = Depends(get_current_user), +) -> CurrentUser: + """Require any authenticated user (viewer and above).""" + return current_user diff --git a/backend/app/middleware/request_id.py b/backend/app/middleware/request_id.py new file mode 100644 index 0000000..1e48300 --- /dev/null +++ b/backend/app/middleware/request_id.py @@ -0,0 +1,67 @@ +"""Request ID middleware for structured logging context. + +Generates or extracts a request ID for every incoming request and binds it +(along with tenant_id from JWT) to structlog's contextvars so that all log +lines emitted during the request include these correlation fields. +""" + +import uuid + +import structlog +from starlette.middleware.base import BaseHTTPMiddleware +from starlette.requests import Request +from starlette.responses import Response + + +class RequestIDMiddleware(BaseHTTPMiddleware): + """Middleware that binds request_id and tenant_id to structlog context.""" + + async def dispatch(self, request: Request, call_next): + # CRITICAL: Clear stale context from previous request to prevent leaks + structlog.contextvars.clear_contextvars() + + # Generate or extract request ID + request_id = request.headers.get("X-Request-ID", str(uuid.uuid4())) + + # Best-effort tenant_id extraction from JWT (does not fail if no token) + tenant_id = self._extract_tenant_id(request) + + # Bind to structlog context -- all subsequent log calls include these fields + structlog.contextvars.bind_contextvars( + request_id=request_id, + tenant_id=tenant_id, + ) + + response: Response = await call_next(request) + response.headers["X-Request-ID"] = request_id + return response + + def _extract_tenant_id(self, request: Request) -> str | None: + """Best-effort extraction of tenant_id from JWT. + + Looks in cookies first (access_token), then Authorization header. + Returns None if no valid token is found -- this is fine for + unauthenticated endpoints like /login. + """ + token = request.cookies.get("access_token") + if not token: + auth_header = request.headers.get("Authorization", "") + if auth_header.startswith("Bearer "): + token = auth_header[7:] + + if not token: + return None + + try: + from jose import jwt as jose_jwt + + from app.config import settings + + payload = jose_jwt.decode( + token, + settings.JWT_SECRET_KEY, + algorithms=[settings.JWT_ALGORITHM], + ) + return payload.get("tenant_id") + except Exception: + return None diff --git a/backend/app/middleware/security_headers.py b/backend/app/middleware/security_headers.py new file mode 100644 index 0000000..c3a0ec3 --- /dev/null +++ b/backend/app/middleware/security_headers.py @@ -0,0 +1,79 @@ +"""Security response headers middleware. + +Adds standard security headers to all API responses: +- X-Content-Type-Options: nosniff (prevent MIME sniffing) +- X-Frame-Options: DENY (prevent clickjacking) +- Referrer-Policy: strict-origin-when-cross-origin +- Cache-Control: no-store (prevent browser caching of API responses) +- Strict-Transport-Security (HSTS, production only -- breaks plain HTTP dev) +- Content-Security-Policy (strict in production, relaxed for dev HMR) + +CSP directives: +- script-src 'self' (production) blocks inline scripts -- XSS mitigation +- style-src 'unsafe-inline' required for Tailwind, Framer Motion, Radix, Sonner +- connect-src includes wss:/ws: for SSE and WebSocket connections +- Dev mode adds 'unsafe-inline' and 'unsafe-eval' for Vite HMR +""" + +from starlette.middleware.base import BaseHTTPMiddleware +from starlette.requests import Request +from starlette.responses import Response + +# Production CSP: strict -- no inline scripts allowed +_CSP_PRODUCTION = "; ".join([ + "default-src 'self'", + "script-src 'self'", + "style-src 'self' 'unsafe-inline'", + "img-src 'self' data: blob:", + "font-src 'self'", + "connect-src 'self' wss: ws:", + "worker-src 'self'", + "frame-ancestors 'none'", + "base-uri 'self'", + "form-action 'self'", +]) + +# Development CSP: relaxed for Vite HMR (hot module replacement) +_CSP_DEV = "; ".join([ + "default-src 'self'", + "script-src 'self' 'unsafe-inline' 'unsafe-eval'", + "style-src 'self' 'unsafe-inline'", + "img-src 'self' data: blob:", + "font-src 'self'", + "connect-src 'self' http://localhost:* ws://localhost:* wss:", + "worker-src 'self' blob:", + "frame-ancestors 'none'", + "base-uri 'self'", + "form-action 'self'", +]) + + +class SecurityHeadersMiddleware(BaseHTTPMiddleware): + """Add security headers to every API response.""" + + def __init__(self, app, environment: str = "dev"): + super().__init__(app) + self.is_production = environment != "dev" + + async def dispatch(self, request: Request, call_next) -> Response: + response = await call_next(request) + + # Always-on security headers + response.headers["X-Content-Type-Options"] = "nosniff" + response.headers["X-Frame-Options"] = "DENY" + response.headers["Referrer-Policy"] = "strict-origin-when-cross-origin" + response.headers["Cache-Control"] = "no-store" + + # Content-Security-Policy (environment-aware) + if self.is_production: + response.headers["Content-Security-Policy"] = _CSP_PRODUCTION + else: + response.headers["Content-Security-Policy"] = _CSP_DEV + + # HSTS only in production (plain HTTP in dev would be blocked) + if self.is_production: + response.headers["Strict-Transport-Security"] = ( + "max-age=31536000; includeSubDomains" + ) + + return response diff --git a/backend/app/middleware/tenant_context.py b/backend/app/middleware/tenant_context.py new file mode 100644 index 0000000..438ccae --- /dev/null +++ b/backend/app/middleware/tenant_context.py @@ -0,0 +1,177 @@ +""" +Tenant context middleware and current user dependency. + +Extracts JWT from Authorization header (Bearer token) or httpOnly cookie, +validates it, and provides current user context for request handlers. + +For tenant-scoped users: sets SET LOCAL app.current_tenant on the DB session. +For super_admin: uses special 'super_admin' context that grants cross-tenant access. +""" + +import uuid +from typing import Annotated, Optional + +from fastapi import Cookie, Depends, HTTPException, Request, status +from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer +from sqlalchemy.ext.asyncio import AsyncSession + +from app.database import get_db, set_tenant_context +from app.services.auth import verify_token + +# Optional HTTP Bearer scheme (won't raise 403 automatically — we handle auth ourselves) +bearer_scheme = HTTPBearer(auto_error=False) + + +class CurrentUser: + """Represents the currently authenticated user extracted from JWT or API key.""" + + def __init__( + self, + user_id: uuid.UUID, + tenant_id: Optional[uuid.UUID], + role: str, + scopes: Optional[list[str]] = None, + ) -> None: + self.user_id = user_id + self.tenant_id = tenant_id + self.role = role + self.scopes = scopes + + @property + def is_super_admin(self) -> bool: + return self.role == "super_admin" + + @property + def is_api_key(self) -> bool: + return self.role == "api_key" + + def __repr__(self) -> str: + return f"" + + +def _extract_token( + request: Request, + credentials: Optional[HTTPAuthorizationCredentials], + access_token: Optional[str], +) -> Optional[str]: + """ + Extract JWT token from Authorization header or httpOnly cookie. + + Priority: Authorization header > cookie. + """ + if credentials and credentials.scheme.lower() == "bearer": + return credentials.credentials + + if access_token: + return access_token + + return None + + +async def get_current_user( + request: Request, + credentials: Annotated[Optional[HTTPAuthorizationCredentials], Depends(bearer_scheme)] = None, + access_token: Annotated[Optional[str], Cookie()] = None, + db: AsyncSession = Depends(get_db), +) -> CurrentUser: + """ + FastAPI dependency that extracts and validates the current user from JWT. + + Supports both Bearer token (Authorization header) and httpOnly cookie. + Sets the tenant context on the database session for RLS enforcement. + + Raises: + HTTPException 401: If no token provided or token is invalid + """ + token = _extract_token(request, credentials, access_token) + + if not token: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Not authenticated", + headers={"WWW-Authenticate": "Bearer"}, + ) + + # API key authentication: detect mktp_ prefix and validate via api_key_service + if token.startswith("mktp_"): + from app.services.api_key_service import validate_api_key + + key_data = await validate_api_key(token) + if not key_data: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid, expired, or revoked API key", + headers={"WWW-Authenticate": "Bearer"}, + ) + + tenant_id = key_data["tenant_id"] + # Set tenant context on the request-scoped DB session for RLS + await set_tenant_context(db, str(tenant_id)) + + return CurrentUser( + user_id=key_data["user_id"], + tenant_id=tenant_id, + role="api_key", + scopes=key_data["scopes"], + ) + + # Decode and validate the JWT + payload = verify_token(token, expected_type="access") + + user_id_str = payload.get("sub") + tenant_id_str = payload.get("tenant_id") + role = payload.get("role") + + if not user_id_str or not role: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid token payload", + headers={"WWW-Authenticate": "Bearer"}, + ) + + try: + user_id = uuid.UUID(user_id_str) + except ValueError: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid token payload", + ) + + tenant_id: Optional[uuid.UUID] = None + if tenant_id_str: + try: + tenant_id = uuid.UUID(tenant_id_str) + except ValueError: + pass + + # Set the tenant context on the database session for RLS enforcement + if role == "super_admin": + # super_admin uses special context that grants cross-tenant access + await set_tenant_context(db, "super_admin") + elif tenant_id: + await set_tenant_context(db, str(tenant_id)) + else: + # Non-super_admin without tenant — deny access + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid token: no tenant context", + ) + + return CurrentUser( + user_id=user_id, + tenant_id=tenant_id, + role=role, + ) + + +async def get_optional_current_user( + request: Request, + credentials: Annotated[Optional[HTTPAuthorizationCredentials], Depends(bearer_scheme)] = None, + access_token: Annotated[Optional[str], Cookie()] = None, + db: AsyncSession = Depends(get_db), +) -> Optional[CurrentUser]: + """Same as get_current_user but returns None instead of raising 401.""" + try: + return await get_current_user(request, credentials, access_token, db) + except HTTPException: + return None diff --git a/backend/app/models/__init__.py b/backend/app/models/__init__.py new file mode 100644 index 0000000..3f79d00 --- /dev/null +++ b/backend/app/models/__init__.py @@ -0,0 +1,35 @@ +"""SQLAlchemy ORM models.""" + +from app.models.tenant import Tenant +from app.models.user import User, UserRole +from app.models.device import Device, DeviceGroup, DeviceTag, DeviceGroupMembership, DeviceTagAssignment, DeviceStatus +from app.models.alert import AlertRule, NotificationChannel, AlertRuleChannel, AlertEvent +from app.models.firmware import FirmwareVersion, FirmwareUpgradeJob +from app.models.config_template import ConfigTemplate, ConfigTemplateTag, TemplatePushJob +from app.models.audit_log import AuditLog +from app.models.maintenance_window import MaintenanceWindow +from app.models.api_key import ApiKey + +__all__ = [ + "Tenant", + "User", + "UserRole", + "Device", + "DeviceGroup", + "DeviceTag", + "DeviceGroupMembership", + "DeviceTagAssignment", + "DeviceStatus", + "AlertRule", + "NotificationChannel", + "AlertRuleChannel", + "AlertEvent", + "FirmwareVersion", + "FirmwareUpgradeJob", + "ConfigTemplate", + "ConfigTemplateTag", + "TemplatePushJob", + "AuditLog", + "MaintenanceWindow", + "ApiKey", +] diff --git a/backend/app/models/alert.py b/backend/app/models/alert.py new file mode 100644 index 0000000..cd798f8 --- /dev/null +++ b/backend/app/models/alert.py @@ -0,0 +1,177 @@ +"""Alert system ORM models: rules, notification channels, and alert events.""" + +import uuid +from datetime import datetime + +from sqlalchemy import ( + Boolean, + DateTime, + ForeignKey, + Integer, + LargeBinary, + Numeric, + Text, + func, +) +from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy.orm import Mapped, mapped_column + +from app.database import Base + + +class AlertRule(Base): + """Configurable alert threshold rule. + + Rules can be tenant-wide (device_id=NULL), device-specific, or group-scoped. + When a metric breaches the threshold for duration_polls consecutive polls, + an alert fires. + """ + __tablename__ = "alert_rules" + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + tenant_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + ) + device_id: Mapped[uuid.UUID | None] = mapped_column( + UUID(as_uuid=True), + ForeignKey("devices.id", ondelete="CASCADE"), + nullable=True, + ) + group_id: Mapped[uuid.UUID | None] = mapped_column( + UUID(as_uuid=True), + ForeignKey("device_groups.id", ondelete="SET NULL"), + nullable=True, + ) + name: Mapped[str] = mapped_column(Text, nullable=False) + metric: Mapped[str] = mapped_column(Text, nullable=False) + operator: Mapped[str] = mapped_column(Text, nullable=False) + threshold: Mapped[float] = mapped_column(Numeric, nullable=False) + duration_polls: Mapped[int] = mapped_column(Integer, nullable=False, default=1, server_default="1") + severity: Mapped[str] = mapped_column(Text, nullable=False) + enabled: Mapped[bool] = mapped_column(Boolean, nullable=False, default=True, server_default="true") + is_default: Mapped[bool] = mapped_column(Boolean, nullable=False, default=False, server_default="false") + created_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + + def __repr__(self) -> str: + return f"" + + +class NotificationChannel(Base): + """Email, webhook, or Slack notification destination.""" + __tablename__ = "notification_channels" + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + tenant_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + ) + name: Mapped[str] = mapped_column(Text, nullable=False) + channel_type: Mapped[str] = mapped_column(Text, nullable=False) # "email", "webhook", or "slack" + # SMTP fields (email channels) + smtp_host: Mapped[str | None] = mapped_column(Text, nullable=True) + smtp_port: Mapped[int | None] = mapped_column(Integer, nullable=True) + smtp_user: Mapped[str | None] = mapped_column(Text, nullable=True) + smtp_password: Mapped[bytes | None] = mapped_column(LargeBinary, nullable=True) # AES-256-GCM encrypted + smtp_use_tls: Mapped[bool] = mapped_column(Boolean, default=False, server_default="false") + from_address: Mapped[str | None] = mapped_column(Text, nullable=True) + to_address: Mapped[str | None] = mapped_column(Text, nullable=True) + # Webhook fields + webhook_url: Mapped[str | None] = mapped_column(Text, nullable=True) + # Slack fields + slack_webhook_url: Mapped[str | None] = mapped_column(Text, nullable=True) + # OpenBao Transit ciphertext (dual-write migration) + smtp_password_transit: Mapped[str | None] = mapped_column(Text, nullable=True) + created_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + + def __repr__(self) -> str: + return f"" + + +class AlertRuleChannel(Base): + """Many-to-many association between alert rules and notification channels.""" + __tablename__ = "alert_rule_channels" + + rule_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("alert_rules.id", ondelete="CASCADE"), + primary_key=True, + ) + channel_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("notification_channels.id", ondelete="CASCADE"), + primary_key=True, + ) + + +class AlertEvent(Base): + """Record of an alert firing, resolving, or flapping. + + rule_id is NULL for system-level alerts (e.g., device offline). + """ + __tablename__ = "alert_events" + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + rule_id: Mapped[uuid.UUID | None] = mapped_column( + UUID(as_uuid=True), + ForeignKey("alert_rules.id", ondelete="SET NULL"), + nullable=True, + ) + device_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("devices.id", ondelete="CASCADE"), + nullable=False, + ) + tenant_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + ) + status: Mapped[str] = mapped_column(Text, nullable=False) # "firing", "resolved", "flapping" + severity: Mapped[str] = mapped_column(Text, nullable=False) + metric: Mapped[str | None] = mapped_column(Text, nullable=True) + value: Mapped[float | None] = mapped_column(Numeric, nullable=True) + threshold: Mapped[float | None] = mapped_column(Numeric, nullable=True) + message: Mapped[str | None] = mapped_column(Text, nullable=True) + is_flapping: Mapped[bool] = mapped_column(Boolean, nullable=False, default=False, server_default="false") + acknowledged_at: Mapped[datetime | None] = mapped_column(DateTime(timezone=True), nullable=True) + acknowledged_by: Mapped[uuid.UUID | None] = mapped_column( + UUID(as_uuid=True), + ForeignKey("users.id", ondelete="SET NULL"), + nullable=True, + ) + silenced_until: Mapped[datetime | None] = mapped_column(DateTime(timezone=True), nullable=True) + fired_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + resolved_at: Mapped[datetime | None] = mapped_column(DateTime(timezone=True), nullable=True) + + def __repr__(self) -> str: + return f"" diff --git a/backend/app/models/api_key.py b/backend/app/models/api_key.py new file mode 100644 index 0000000..bef874a --- /dev/null +++ b/backend/app/models/api_key.py @@ -0,0 +1,60 @@ +"""API key ORM model for tenant-scoped programmatic access.""" + +import uuid +from datetime import datetime +from typing import Optional + +from sqlalchemy import DateTime, ForeignKey, Text, func +from sqlalchemy.dialects.postgresql import JSONB, UUID +from sqlalchemy.orm import Mapped, mapped_column + +from app.database import Base + + +class ApiKey(Base): + """Tracks API keys for programmatic access to the portal. + + Keys are stored as SHA-256 hashes (never plaintext). + Scoped permissions limit what each key can do. + Revocation is soft-delete (sets revoked_at, row preserved for audit). + """ + + __tablename__ = "api_keys" + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + tenant_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + ) + user_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("users.id", ondelete="CASCADE"), + nullable=False, + ) + name: Mapped[str] = mapped_column(Text, nullable=False) + key_prefix: Mapped[str] = mapped_column(Text, nullable=False) + key_hash: Mapped[str] = mapped_column(Text, nullable=False, unique=True) + scopes: Mapped[list] = mapped_column(JSONB, nullable=False, server_default="'[]'::jsonb") + expires_at: Mapped[Optional[datetime]] = mapped_column( + DateTime(timezone=True), nullable=True + ) + last_used_at: Mapped[Optional[datetime]] = mapped_column( + DateTime(timezone=True), nullable=True + ) + created_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + revoked_at: Mapped[Optional[datetime]] = mapped_column( + DateTime(timezone=True), nullable=True + ) + + def __repr__(self) -> str: + return f"" diff --git a/backend/app/models/audit_log.py b/backend/app/models/audit_log.py new file mode 100644 index 0000000..e58f1f2 --- /dev/null +++ b/backend/app/models/audit_log.py @@ -0,0 +1,59 @@ +"""Audit log model for centralized audit trail.""" + +import uuid +from datetime import datetime +from typing import Any + +from sqlalchemy import DateTime, ForeignKey, String, Text, func +from sqlalchemy.dialects.postgresql import JSONB, UUID +from sqlalchemy.orm import Mapped, mapped_column + +from app.database import Base + + +class AuditLog(Base): + """Records all auditable actions in the system (config changes, CRUD, auth events).""" + + __tablename__ = "audit_logs" + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + tenant_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + index=True, + ) + user_id: Mapped[uuid.UUID | None] = mapped_column( + UUID(as_uuid=True), + ForeignKey("users.id", ondelete="SET NULL"), + nullable=True, + ) + action: Mapped[str] = mapped_column(String(100), nullable=False) + resource_type: Mapped[str | None] = mapped_column(String(50), nullable=True) + resource_id: Mapped[str | None] = mapped_column(String(255), nullable=True) + device_id: Mapped[uuid.UUID | None] = mapped_column( + UUID(as_uuid=True), + ForeignKey("devices.id", ondelete="SET NULL"), + nullable=True, + ) + details: Mapped[dict[str, Any]] = mapped_column( + JSONB, + nullable=False, + server_default="{}", + ) + # Transit-encrypted details JSON (vault:v1:...) — set when details are encrypted + encrypted_details: Mapped[str | None] = mapped_column(Text, nullable=True) + ip_address: Mapped[str | None] = mapped_column(String(45), nullable=True) + created_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + + def __repr__(self) -> str: + return f"" diff --git a/backend/app/models/certificate.py b/backend/app/models/certificate.py new file mode 100644 index 0000000..98149f8 --- /dev/null +++ b/backend/app/models/certificate.py @@ -0,0 +1,140 @@ +"""Certificate Authority and Device Certificate ORM models. + +Supports the Internal Certificate Authority feature: +- CertificateAuthority: one per tenant, stores encrypted CA private key + public cert +- DeviceCertificate: per-device signed certificate with lifecycle status tracking +""" + +import uuid +from datetime import datetime + +from sqlalchemy import DateTime, ForeignKey, LargeBinary, String, Text, func +from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy.orm import Mapped, mapped_column + +from app.database import Base + + +class CertificateAuthority(Base): + """Per-tenant root Certificate Authority. + + Each tenant has at most one CA. The CA private key is encrypted with + AES-256-GCM before storage (using the same pattern as device credentials). + The public cert_pem is not sensitive and can be distributed freely. + """ + + __tablename__ = "certificate_authorities" + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + tenant_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + unique=True, + ) + common_name: Mapped[str] = mapped_column(String(255), nullable=False) + cert_pem: Mapped[str] = mapped_column(Text, nullable=False) + encrypted_private_key: Mapped[bytes] = mapped_column( + LargeBinary, nullable=False + ) + serial_number: Mapped[str] = mapped_column(String(64), nullable=False) + fingerprint_sha256: Mapped[str] = mapped_column(String(95), nullable=False) + not_valid_before: Mapped[datetime] = mapped_column( + DateTime(timezone=True), nullable=False + ) + not_valid_after: Mapped[datetime] = mapped_column( + DateTime(timezone=True), nullable=False + ) + # OpenBao Transit ciphertext (dual-write migration) + encrypted_private_key_transit: Mapped[str | None] = mapped_column( + Text, nullable=True + ) + created_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + + def __repr__(self) -> str: + return ( + f"" + ) + + +class DeviceCertificate(Base): + """Per-device TLS certificate signed by the tenant's CA. + + Status lifecycle: + issued -> deploying -> deployed -> expiring -> expired + \\-> revoked + \\-> superseded (when rotated) + """ + + __tablename__ = "device_certificates" + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + tenant_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + ) + device_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("devices.id", ondelete="CASCADE"), + nullable=False, + ) + ca_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("certificate_authorities.id", ondelete="CASCADE"), + nullable=False, + ) + common_name: Mapped[str] = mapped_column(String(255), nullable=False) + serial_number: Mapped[str] = mapped_column(String(64), nullable=False) + fingerprint_sha256: Mapped[str] = mapped_column(String(95), nullable=False) + cert_pem: Mapped[str] = mapped_column(Text, nullable=False) + encrypted_private_key: Mapped[bytes] = mapped_column( + LargeBinary, nullable=False + ) + not_valid_before: Mapped[datetime] = mapped_column( + DateTime(timezone=True), nullable=False + ) + not_valid_after: Mapped[datetime] = mapped_column( + DateTime(timezone=True), nullable=False + ) + # OpenBao Transit ciphertext (dual-write migration) + encrypted_private_key_transit: Mapped[str | None] = mapped_column( + Text, nullable=True + ) + status: Mapped[str] = mapped_column( + String(20), nullable=False, server_default="issued" + ) + deployed_at: Mapped[datetime | None] = mapped_column( + DateTime(timezone=True), nullable=True + ) + created_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + updated_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + + def __repr__(self) -> str: + return ( + f"" + ) diff --git a/backend/app/models/config_backup.py b/backend/app/models/config_backup.py new file mode 100644 index 0000000..fe09d3b --- /dev/null +++ b/backend/app/models/config_backup.py @@ -0,0 +1,178 @@ +"""SQLAlchemy models for config backup tables.""" + +import uuid +from datetime import datetime + +from sqlalchemy import Boolean, DateTime, ForeignKey, Integer, LargeBinary, SmallInteger, String, Text, UniqueConstraint, func +from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy.orm import Mapped, mapped_column, relationship + +from app.database import Base + + +class ConfigBackupRun(Base): + """Metadata for a single config backup run. + + The actual config content (export.rsc and backup.bin) lives in the tenant's + bare git repository at GIT_STORE_PATH/{tenant_id}.git. This table provides + the timeline view and per-run metadata without duplicating file content in + PostgreSQL. + """ + + __tablename__ = "config_backup_runs" + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + device_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("devices.id", ondelete="CASCADE"), + nullable=False, + index=True, + ) + tenant_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + index=True, + ) + # Git commit hash in the tenant's bare repo where this backup is stored. + commit_sha: Mapped[str] = mapped_column(Text, nullable=False) + # Trigger type: 'scheduled' | 'manual' | 'pre-restore' | 'checkpoint' | 'config-change' + trigger_type: Mapped[str] = mapped_column(String(20), nullable=False) + # Lines added/removed vs the prior export.rsc for this device. + # NULL for the first backup (no prior version to diff against). + lines_added: Mapped[int | None] = mapped_column(Integer, nullable=True) + lines_removed: Mapped[int | None] = mapped_column(Integer, nullable=True) + # Encryption metadata: NULL=plaintext, 1=client-side AES-GCM, 2=OpenBao Transit + encryption_tier: Mapped[int | None] = mapped_column(SmallInteger, nullable=True) + # 12-byte AES-GCM nonce for Tier 1 (client-side) backups; NULL for plaintext/Transit + encryption_nonce: Mapped[bytes | None] = mapped_column(LargeBinary, nullable=True) + created_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + + def __repr__(self) -> str: + return ( + f"" + ) + + +class ConfigBackupSchedule(Base): + """Per-tenant default and per-device override backup schedule config. + + A row with device_id=NULL is the tenant-level default (daily at 2am). + A row with a specific device_id overrides the tenant default for that device. + """ + + __tablename__ = "config_backup_schedules" + __table_args__ = ( + UniqueConstraint("tenant_id", "device_id", name="uq_backup_schedule_tenant_device"), + ) + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + tenant_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + index=True, + ) + # NULL = tenant-level default schedule; non-NULL = device-specific override. + device_id: Mapped[uuid.UUID | None] = mapped_column( + UUID(as_uuid=True), + ForeignKey("devices.id", ondelete="CASCADE"), + nullable=True, + ) + # Standard cron expression (5 fields). Default: daily at 2am UTC. + cron_expression: Mapped[str] = mapped_column( + String(100), + nullable=False, + default="0 2 * * *", + server_default="0 2 * * *", + ) + enabled: Mapped[bool] = mapped_column( + Boolean, + nullable=False, + default=True, + server_default="TRUE", + ) + created_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + + def __repr__(self) -> str: + scope = f"device={self.device_id}" if self.device_id else f"tenant={self.tenant_id}" + return f"" + + +class ConfigPushOperation(Base): + """Tracks pending two-phase config push operations for panic-revert recovery. + + Before pushing a config, a row is inserted with status='pending_verification'. + If the API pod restarts during the 60-second verification window, the startup + handler checks this table and either commits (deletes the RouterOS scheduler + job) or marks the operation as 'failed'. This prevents the panic-revert + scheduler from firing and reverting a successful push after an API restart. + + See Pitfall 6 in 04-RESEARCH.md for the full failure scenario. + """ + + __tablename__ = "config_push_operations" + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + device_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("devices.id", ondelete="CASCADE"), + nullable=False, + index=True, + ) + tenant_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + index=True, + ) + # Git commit SHA we'd revert to if the push fails. + pre_push_commit_sha: Mapped[str] = mapped_column(Text, nullable=False) + # RouterOS scheduler job name created on the device for panic-revert. + scheduler_name: Mapped[str] = mapped_column(String(255), nullable=False) + # 'pending_verification' | 'committed' | 'reverted' | 'failed' + status: Mapped[str] = mapped_column( + String(30), + nullable=False, + default="pending_verification", + server_default="pending_verification", + ) + started_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + completed_at: Mapped[datetime | None] = mapped_column( + DateTime(timezone=True), + nullable=True, + ) + + def __repr__(self) -> str: + return ( + f"" + ) diff --git a/backend/app/models/config_template.py b/backend/app/models/config_template.py new file mode 100644 index 0000000..b375181 --- /dev/null +++ b/backend/app/models/config_template.py @@ -0,0 +1,153 @@ +"""Config template, template tag, and template push job models.""" + +import uuid +from datetime import datetime + +from sqlalchemy import ( + DateTime, + Float, + ForeignKey, + String, + Text, + UniqueConstraint, + func, +) +from sqlalchemy.dialects.postgresql import JSON, UUID +from sqlalchemy.orm import Mapped, mapped_column, relationship + +from app.database import Base + + +class ConfigTemplate(Base): + __tablename__ = "config_templates" + __table_args__ = ( + UniqueConstraint("tenant_id", "name", name="uq_config_templates_tenant_name"), + ) + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + tenant_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + index=True, + ) + name: Mapped[str] = mapped_column(Text, nullable=False) + description: Mapped[str | None] = mapped_column(Text, nullable=True) + content: Mapped[str] = mapped_column(Text, nullable=False) + variables: Mapped[list] = mapped_column(JSON, nullable=False, default=list, server_default="[]") + created_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + updated_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + onupdate=func.now(), + nullable=False, + ) + + # Relationships + tenant: Mapped["Tenant"] = relationship("Tenant") # type: ignore[name-defined] + tags: Mapped[list["ConfigTemplateTag"]] = relationship( + "ConfigTemplateTag", back_populates="template", cascade="all, delete-orphan" + ) + + def __repr__(self) -> str: + return f"" + + +class ConfigTemplateTag(Base): + __tablename__ = "config_template_tags" + __table_args__ = ( + UniqueConstraint("template_id", "name", name="uq_config_template_tags_template_name"), + ) + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + tenant_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + ) + name: Mapped[str] = mapped_column(String(100), nullable=False) + template_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("config_templates.id", ondelete="CASCADE"), + nullable=False, + index=True, + ) + + # Relationships + template: Mapped["ConfigTemplate"] = relationship( + "ConfigTemplate", back_populates="tags" + ) + + def __repr__(self) -> str: + return f"" + + +class TemplatePushJob(Base): + __tablename__ = "template_push_jobs" + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + tenant_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + ) + template_id: Mapped[uuid.UUID | None] = mapped_column( + UUID(as_uuid=True), + ForeignKey("config_templates.id", ondelete="SET NULL"), + nullable=True, + ) + device_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("devices.id", ondelete="CASCADE"), + nullable=False, + ) + rollout_id: Mapped[uuid.UUID | None] = mapped_column( + UUID(as_uuid=True), + nullable=True, + ) + rendered_content: Mapped[str] = mapped_column(Text, nullable=False) + status: Mapped[str] = mapped_column( + Text, + nullable=False, + default="pending", + server_default="pending", + ) + pre_push_backup_sha: Mapped[str | None] = mapped_column(Text, nullable=True) + error_message: Mapped[str | None] = mapped_column(Text, nullable=True) + started_at: Mapped[datetime | None] = mapped_column( + DateTime(timezone=True), nullable=True + ) + completed_at: Mapped[datetime | None] = mapped_column( + DateTime(timezone=True), nullable=True + ) + created_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + + # Relationships + template: Mapped["ConfigTemplate | None"] = relationship("ConfigTemplate") + device: Mapped["Device"] = relationship("Device") # type: ignore[name-defined] + + def __repr__(self) -> str: + return f"" diff --git a/backend/app/models/device.py b/backend/app/models/device.py new file mode 100644 index 0000000..f4bfb4d --- /dev/null +++ b/backend/app/models/device.py @@ -0,0 +1,214 @@ +"""Device, DeviceGroup, DeviceTag, and membership models.""" + +import uuid +from datetime import datetime +from enum import Enum + +from sqlalchemy import ( + Boolean, + DateTime, + Float, + ForeignKey, + Integer, + LargeBinary, + String, + Text, + UniqueConstraint, + func, +) +from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy.orm import Mapped, mapped_column, relationship + +from app.database import Base + + +class DeviceStatus(str, Enum): + """Device connection status.""" + UNKNOWN = "unknown" + ONLINE = "online" + OFFLINE = "offline" + + +class Device(Base): + __tablename__ = "devices" + __table_args__ = ( + UniqueConstraint("tenant_id", "hostname", name="uq_devices_tenant_hostname"), + ) + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + tenant_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + index=True, + ) + hostname: Mapped[str] = mapped_column(String(255), nullable=False) + ip_address: Mapped[str] = mapped_column(String(45), nullable=False) # IPv4 or IPv6 + api_port: Mapped[int] = mapped_column(Integer, default=8728, nullable=False) + api_ssl_port: Mapped[int] = mapped_column(Integer, default=8729, nullable=False) + model: Mapped[str | None] = mapped_column(String(255), nullable=True) + serial_number: Mapped[str | None] = mapped_column(String(255), nullable=True) + firmware_version: Mapped[str | None] = mapped_column(String(100), nullable=True) + routeros_version: Mapped[str | None] = mapped_column(String(100), nullable=True) + routeros_major_version: Mapped[int | None] = mapped_column(Integer, nullable=True) + uptime_seconds: Mapped[int | None] = mapped_column(Integer, nullable=True) + last_cpu_load: Mapped[int | None] = mapped_column(Integer, nullable=True) + last_memory_used_pct: Mapped[int | None] = mapped_column(Integer, nullable=True) + architecture: Mapped[str | None] = mapped_column(Text, nullable=True) # CPU arch (arm, arm64, mipsbe, etc.) + preferred_channel: Mapped[str] = mapped_column( + Text, default="stable", server_default="stable", nullable=False + ) # Firmware release channel + last_seen: Mapped[datetime | None] = mapped_column(DateTime(timezone=True), nullable=True) + # AES-256-GCM encrypted credentials (username + password JSON) + encrypted_credentials: Mapped[bytes | None] = mapped_column(LargeBinary, nullable=True) + # OpenBao Transit ciphertext (dual-write migration) + encrypted_credentials_transit: Mapped[str | None] = mapped_column(Text, nullable=True) + latitude: Mapped[float | None] = mapped_column(Float, nullable=True) + longitude: Mapped[float | None] = mapped_column(Float, nullable=True) + status: Mapped[str] = mapped_column( + String(20), + default=DeviceStatus.UNKNOWN.value, + nullable=False, + ) + tls_mode: Mapped[str] = mapped_column( + String(20), + default="auto", + server_default="auto", + nullable=False, + ) + created_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + updated_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + onupdate=func.now(), + nullable=False, + ) + + # Relationships + tenant: Mapped["Tenant"] = relationship("Tenant", back_populates="devices") # type: ignore[name-defined] + group_memberships: Mapped[list["DeviceGroupMembership"]] = relationship( + "DeviceGroupMembership", back_populates="device", cascade="all, delete-orphan" + ) + tag_assignments: Mapped[list["DeviceTagAssignment"]] = relationship( + "DeviceTagAssignment", back_populates="device", cascade="all, delete-orphan" + ) + + def __repr__(self) -> str: + return f"" + + +class DeviceGroup(Base): + __tablename__ = "device_groups" + __table_args__ = ( + UniqueConstraint("tenant_id", "name", name="uq_device_groups_tenant_name"), + ) + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + tenant_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + index=True, + ) + name: Mapped[str] = mapped_column(String(255), nullable=False) + description: Mapped[str | None] = mapped_column(Text, nullable=True) + preferred_channel: Mapped[str] = mapped_column( + Text, default="stable", server_default="stable", nullable=False + ) # Firmware release channel for the group + created_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + + # Relationships + tenant: Mapped["Tenant"] = relationship("Tenant", back_populates="device_groups") # type: ignore[name-defined] + memberships: Mapped[list["DeviceGroupMembership"]] = relationship( + "DeviceGroupMembership", back_populates="group", cascade="all, delete-orphan" + ) + + def __repr__(self) -> str: + return f"" + + +class DeviceTag(Base): + __tablename__ = "device_tags" + __table_args__ = ( + UniqueConstraint("tenant_id", "name", name="uq_device_tags_tenant_name"), + ) + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + tenant_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + index=True, + ) + name: Mapped[str] = mapped_column(String(100), nullable=False) + color: Mapped[str | None] = mapped_column(String(7), nullable=True) # hex color e.g. #FF5733 + + # Relationships + tenant: Mapped["Tenant"] = relationship("Tenant", back_populates="device_tags") # type: ignore[name-defined] + assignments: Mapped[list["DeviceTagAssignment"]] = relationship( + "DeviceTagAssignment", back_populates="tag", cascade="all, delete-orphan" + ) + + def __repr__(self) -> str: + return f"" + + +class DeviceGroupMembership(Base): + __tablename__ = "device_group_memberships" + + device_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("devices.id", ondelete="CASCADE"), + primary_key=True, + ) + group_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("device_groups.id", ondelete="CASCADE"), + primary_key=True, + ) + + # Relationships + device: Mapped["Device"] = relationship("Device", back_populates="group_memberships") + group: Mapped["DeviceGroup"] = relationship("DeviceGroup", back_populates="memberships") + + +class DeviceTagAssignment(Base): + __tablename__ = "device_tag_assignments" + + device_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("devices.id", ondelete="CASCADE"), + primary_key=True, + ) + tag_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("device_tags.id", ondelete="CASCADE"), + primary_key=True, + ) + + # Relationships + device: Mapped["Device"] = relationship("Device", back_populates="tag_assignments") + tag: Mapped["DeviceTag"] = relationship("DeviceTag", back_populates="assignments") diff --git a/backend/app/models/firmware.py b/backend/app/models/firmware.py new file mode 100644 index 0000000..67385c5 --- /dev/null +++ b/backend/app/models/firmware.py @@ -0,0 +1,102 @@ +"""Firmware version tracking and upgrade job ORM models.""" + +import uuid +from datetime import datetime + +from sqlalchemy import ( + BigInteger, + Boolean, + DateTime, + Integer, + Text, + UniqueConstraint, + func, +) +from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy.orm import Mapped, mapped_column +from sqlalchemy import ForeignKey + +from app.database import Base + + +class FirmwareVersion(Base): + """Cached firmware version from MikroTik download server or poller discovery. + + Not tenant-scoped — firmware versions are global data shared across all tenants. + """ + __tablename__ = "firmware_versions" + __table_args__ = ( + UniqueConstraint("architecture", "channel", "version"), + ) + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + architecture: Mapped[str] = mapped_column(Text, nullable=False) + channel: Mapped[str] = mapped_column(Text, nullable=False) # "stable", "long-term", "testing" + version: Mapped[str] = mapped_column(Text, nullable=False) + npk_url: Mapped[str] = mapped_column(Text, nullable=False) + npk_local_path: Mapped[str | None] = mapped_column(Text, nullable=True) + npk_size_bytes: Mapped[int | None] = mapped_column(BigInteger, nullable=True) + checked_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + + def __repr__(self) -> str: + return f"" + + +class FirmwareUpgradeJob(Base): + """Tracks a firmware upgrade operation for a single device. + + Multiple jobs can share a rollout_group_id for mass upgrades. + """ + __tablename__ = "firmware_upgrade_jobs" + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + tenant_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + ) + device_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("devices.id", ondelete="CASCADE"), + nullable=False, + ) + rollout_group_id: Mapped[uuid.UUID | None] = mapped_column( + UUID(as_uuid=True), + nullable=True, + ) + target_version: Mapped[str] = mapped_column(Text, nullable=False) + architecture: Mapped[str] = mapped_column(Text, nullable=False) + channel: Mapped[str] = mapped_column(Text, nullable=False) + status: Mapped[str] = mapped_column( + Text, nullable=False, default="pending", server_default="pending" + ) + pre_upgrade_backup_sha: Mapped[str | None] = mapped_column(Text, nullable=True) + scheduled_at: Mapped[datetime | None] = mapped_column(DateTime(timezone=True), nullable=True) + started_at: Mapped[datetime | None] = mapped_column(DateTime(timezone=True), nullable=True) + completed_at: Mapped[datetime | None] = mapped_column(DateTime(timezone=True), nullable=True) + error_message: Mapped[str | None] = mapped_column(Text, nullable=True) + confirmed_major_upgrade: Mapped[bool] = mapped_column( + Boolean, nullable=False, default=False, server_default="false" + ) + created_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + + def __repr__(self) -> str: + return f"" diff --git a/backend/app/models/key_set.py b/backend/app/models/key_set.py new file mode 100644 index 0000000..4124515 --- /dev/null +++ b/backend/app/models/key_set.py @@ -0,0 +1,134 @@ +"""Key set and key access log models for zero-knowledge architecture.""" + +import uuid +from datetime import datetime + +from sqlalchemy import DateTime, ForeignKey, Integer, LargeBinary, Text, func +from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy.orm import Mapped, mapped_column, relationship + +from app.database import Base + + +class UserKeySet(Base): + """Encrypted key bundle for a user. + + Stores the RSA private key (wrapped by AUK), tenant vault key + (wrapped by AUK), RSA public key, and key derivation salts. + One key set per user (UNIQUE on user_id). + """ + + __tablename__ = "user_key_sets" + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + user_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("users.id", ondelete="CASCADE"), + nullable=False, + unique=True, + ) + tenant_id: Mapped[uuid.UUID | None] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=True, # NULL for super_admin + ) + encrypted_private_key: Mapped[bytes] = mapped_column( + LargeBinary, nullable=False + ) + private_key_nonce: Mapped[bytes] = mapped_column( + LargeBinary, nullable=False + ) + encrypted_vault_key: Mapped[bytes] = mapped_column( + LargeBinary, nullable=False + ) + vault_key_nonce: Mapped[bytes] = mapped_column( + LargeBinary, nullable=False + ) + public_key: Mapped[bytes] = mapped_column( + LargeBinary, nullable=False + ) + pbkdf2_iterations: Mapped[int] = mapped_column( + Integer, + server_default=func.literal_column("650000"), + nullable=False, + ) + pbkdf2_salt: Mapped[bytes] = mapped_column( + LargeBinary, nullable=False + ) + hkdf_salt: Mapped[bytes] = mapped_column( + LargeBinary, nullable=False + ) + key_version: Mapped[int] = mapped_column( + Integer, + server_default=func.literal_column("1"), + nullable=False, + ) + created_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + updated_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + + # Relationships + user: Mapped["User"] = relationship("User") # type: ignore[name-defined] + tenant: Mapped["Tenant | None"] = relationship("Tenant") # type: ignore[name-defined] + + def __repr__(self) -> str: + return f"" + + +class KeyAccessLog(Base): + """Immutable audit trail for key operations. + + Append-only: INSERT+SELECT only, no UPDATE/DELETE via RLS. + """ + + __tablename__ = "key_access_log" + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + tenant_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + ) + user_id: Mapped[uuid.UUID | None] = mapped_column( + UUID(as_uuid=True), + ForeignKey("users.id", ondelete="SET NULL"), + nullable=True, + ) + action: Mapped[str] = mapped_column(Text, nullable=False) + resource_type: Mapped[str | None] = mapped_column(Text, nullable=True) + resource_id: Mapped[str | None] = mapped_column(Text, nullable=True) + key_version: Mapped[int | None] = mapped_column(Integer, nullable=True) + ip_address: Mapped[str | None] = mapped_column(Text, nullable=True) + # Phase 29 extensions for device credential access tracking + device_id: Mapped[uuid.UUID | None] = mapped_column( + UUID(as_uuid=True), + ForeignKey("devices.id"), + nullable=True, + ) + justification: Mapped[str | None] = mapped_column(Text, nullable=True) + correlation_id: Mapped[str | None] = mapped_column(Text, nullable=True) + created_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + + def __repr__(self) -> str: + return f"" diff --git a/backend/app/models/maintenance_window.py b/backend/app/models/maintenance_window.py new file mode 100644 index 0000000..ec3a9f8 --- /dev/null +++ b/backend/app/models/maintenance_window.py @@ -0,0 +1,74 @@ +"""Maintenance window ORM model for scheduled maintenance periods. + +Maintenance windows allow operators to define time periods during which +alerts are suppressed for specific devices (or all devices in a tenant). +""" + +import uuid +from datetime import datetime + +from sqlalchemy import Boolean, DateTime, ForeignKey, Text, VARCHAR, func +from sqlalchemy.dialects.postgresql import JSONB, UUID +from sqlalchemy.orm import Mapped, mapped_column + +from app.database import Base + + +class MaintenanceWindow(Base): + """Scheduled maintenance window with optional alert suppression. + + device_ids is a JSONB array of device UUID strings. + An empty array means "all devices in tenant". + """ + __tablename__ = "maintenance_windows" + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + tenant_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + ) + name: Mapped[str] = mapped_column(VARCHAR(200), nullable=False) + device_ids: Mapped[list] = mapped_column( + JSONB, + nullable=False, + server_default="'[]'::jsonb", + ) + start_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + nullable=False, + ) + end_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + nullable=False, + ) + suppress_alerts: Mapped[bool] = mapped_column( + Boolean, + nullable=False, + default=True, + server_default="true", + ) + notes: Mapped[str | None] = mapped_column(Text, nullable=True) + created_by: Mapped[uuid.UUID | None] = mapped_column( + UUID(as_uuid=True), + ForeignKey("users.id", ondelete="SET NULL"), + nullable=True, + ) + created_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + updated_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + + def __repr__(self) -> str: + return f"" diff --git a/backend/app/models/tenant.py b/backend/app/models/tenant.py new file mode 100644 index 0000000..4271be4 --- /dev/null +++ b/backend/app/models/tenant.py @@ -0,0 +1,49 @@ +"""Tenant model — represents an MSP client organization.""" + +import uuid +from datetime import datetime + +from sqlalchemy import DateTime, LargeBinary, Integer, String, Text, func +from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy.orm import Mapped, mapped_column, relationship + +from app.database import Base + + +class Tenant(Base): + __tablename__ = "tenants" + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + name: Mapped[str] = mapped_column(String(255), unique=True, nullable=False, index=True) + description: Mapped[str | None] = mapped_column(Text, nullable=True) + contact_email: Mapped[str | None] = mapped_column(String(255), nullable=True) + created_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + updated_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + onupdate=func.now(), + nullable=False, + ) + + # Zero-knowledge key management (Phase 28+29) + encrypted_vault_key: Mapped[bytes | None] = mapped_column(LargeBinary, nullable=True) + vault_key_version: Mapped[int | None] = mapped_column(Integer, nullable=True) + openbao_key_name: Mapped[str | None] = mapped_column(Text, nullable=True) + + # Relationships — passive_deletes=True lets the DB ON DELETE CASCADE handle cleanup + users: Mapped[list["User"]] = relationship("User", back_populates="tenant", passive_deletes=True) # type: ignore[name-defined] + devices: Mapped[list["Device"]] = relationship("Device", back_populates="tenant", passive_deletes=True) # type: ignore[name-defined] + device_groups: Mapped[list["DeviceGroup"]] = relationship("DeviceGroup", back_populates="tenant", passive_deletes=True) # type: ignore[name-defined] + device_tags: Mapped[list["DeviceTag"]] = relationship("DeviceTag", back_populates="tenant", passive_deletes=True) # type: ignore[name-defined] + + def __repr__(self) -> str: + return f"" diff --git a/backend/app/models/user.py b/backend/app/models/user.py new file mode 100644 index 0000000..8b43f4b --- /dev/null +++ b/backend/app/models/user.py @@ -0,0 +1,74 @@ +"""User model with role-based access control.""" + +import uuid +from datetime import datetime +from enum import Enum + +from sqlalchemy import Boolean, DateTime, ForeignKey, LargeBinary, SmallInteger, String, func, text +from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy.orm import Mapped, mapped_column, relationship + +from app.database import Base + + +class UserRole(str, Enum): + """User roles with increasing privilege levels.""" + SUPER_ADMIN = "super_admin" + TENANT_ADMIN = "tenant_admin" + OPERATOR = "operator" + VIEWER = "viewer" + + +class User(Base): + __tablename__ = "users" + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + email: Mapped[str] = mapped_column(String(255), unique=True, nullable=False, index=True) + hashed_password: Mapped[str | None] = mapped_column(String(255), nullable=True) + name: Mapped[str] = mapped_column(String(255), nullable=False) + role: Mapped[str] = mapped_column( + String(50), + nullable=False, + default=UserRole.VIEWER.value, + ) + # tenant_id is nullable for super_admin users (portal-wide role) + tenant_id: Mapped[uuid.UUID | None] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=True, + index=True, + ) + # SRP zero-knowledge authentication columns (nullable during migration period) + srp_salt: Mapped[bytes | None] = mapped_column(LargeBinary, nullable=True) + srp_verifier: Mapped[bytes | None] = mapped_column(LargeBinary, nullable=True) + auth_version: Mapped[int] = mapped_column( + SmallInteger, server_default=text("1"), nullable=False + ) # 1=bcrypt legacy, 2=SRP + must_upgrade_auth: Mapped[bool] = mapped_column( + Boolean, server_default=text("false"), nullable=False + ) # True for bcrypt users who need SRP upgrade + + is_active: Mapped[bool] = mapped_column(Boolean, default=True, nullable=False) + last_login: Mapped[datetime | None] = mapped_column(DateTime(timezone=True), nullable=True) + created_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + nullable=False, + ) + updated_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + server_default=func.now(), + onupdate=func.now(), + nullable=False, + ) + + # Relationships + tenant: Mapped["Tenant | None"] = relationship("Tenant", back_populates="users") # type: ignore[name-defined] + + def __repr__(self) -> str: + return f"" diff --git a/backend/app/models/vpn.py b/backend/app/models/vpn.py new file mode 100644 index 0000000..0f531f4 --- /dev/null +++ b/backend/app/models/vpn.py @@ -0,0 +1,85 @@ +"""VPN configuration and peer models for WireGuard management.""" + +import uuid +from datetime import datetime +from typing import Optional + +from sqlalchemy import Boolean, DateTime, ForeignKey, Integer, LargeBinary, String, func +from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy.orm import Mapped, mapped_column + +from app.database import Base + + +class VpnConfig(Base): + """Per-tenant WireGuard server configuration.""" + + __tablename__ = "vpn_config" + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + tenant_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + unique=True, + ) + server_private_key: Mapped[bytes] = mapped_column(LargeBinary, nullable=False) + server_public_key: Mapped[str] = mapped_column(String(64), nullable=False) + subnet: Mapped[str] = mapped_column(String(32), nullable=False, server_default="10.10.0.0/24") + server_port: Mapped[int] = mapped_column(Integer, nullable=False, server_default="51820") + server_address: Mapped[str] = mapped_column(String(32), nullable=False, server_default="10.10.0.1/24") + endpoint: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) + is_enabled: Mapped[bool] = mapped_column(Boolean, nullable=False, server_default="false") + created_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), server_default=func.now(), nullable=False + ) + updated_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), server_default=func.now(), nullable=False, onupdate=func.now() + ) + + # Peers are queried separately via tenant_id — no ORM relationship needed + + +class VpnPeer(Base): + """WireGuard peer representing a device's VPN connection.""" + + __tablename__ = "vpn_peers" + + id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid4, + server_default=func.gen_random_uuid(), + ) + tenant_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tenants.id", ondelete="CASCADE"), + nullable=False, + index=True, + ) + device_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("devices.id", ondelete="CASCADE"), + nullable=False, + unique=True, + ) + peer_private_key: Mapped[bytes] = mapped_column(LargeBinary, nullable=False) + peer_public_key: Mapped[str] = mapped_column(String(64), nullable=False) + preshared_key: Mapped[Optional[bytes]] = mapped_column(LargeBinary, nullable=True) + assigned_ip: Mapped[str] = mapped_column(String(32), nullable=False) + additional_allowed_ips: Mapped[Optional[str]] = mapped_column(String(512), nullable=True) + is_enabled: Mapped[bool] = mapped_column(Boolean, nullable=False, server_default="true") + last_handshake: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), nullable=True) + created_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), server_default=func.now(), nullable=False + ) + updated_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), server_default=func.now(), nullable=False, onupdate=func.now() + ) + + # Config is queried separately via tenant_id — no ORM relationship needed diff --git a/backend/app/observability.py b/backend/app/observability.py new file mode 100644 index 0000000..17befe5 --- /dev/null +++ b/backend/app/observability.py @@ -0,0 +1,140 @@ +"""Prometheus metrics and health check infrastructure. + +Provides: +- setup_instrumentator(): Configures Prometheus auto-instrumentation for FastAPI +- check_health_ready(): Verifies PostgreSQL, Redis, and NATS connectivity for readiness probes +""" + +import asyncio +import time + +import structlog +from fastapi import FastAPI +from prometheus_fastapi_instrumentator import Instrumentator + +logger = structlog.get_logger(__name__) + + +def setup_instrumentator(app: FastAPI) -> Instrumentator: + """Configure and mount Prometheus metrics instrumentation. + + Auto-instruments all HTTP endpoints with: + - http_requests_total (counter) by method, handler, status_code + - http_request_duration_seconds (histogram) by method, handler + - http_requests_in_progress (gauge) + + The /metrics endpoint is mounted at root level (not under /api prefix). + Labels use handler templates (e.g., /api/tenants/{tenant_id}/...) not + resolved paths, ensuring bounded cardinality. + + Must be called AFTER all routers are included so all routes are captured. + """ + instrumentator = Instrumentator( + should_group_status_codes=False, + should_ignore_untemplated=True, + excluded_handlers=["/health", "/health/ready", "/metrics", "/api/health"], + should_respect_env_var=False, + ) + instrumentator.instrument(app) + instrumentator.expose(app, include_in_schema=False, should_gzip=True) + logger.info("prometheus instrumentation enabled", endpoint="/metrics") + return instrumentator + + +async def check_health_ready() -> dict: + """Check readiness by verifying all critical dependencies. + + Checks PostgreSQL, Redis, and NATS connectivity with 5-second timeouts. + Returns a structured result with per-dependency status and latency. + + Returns: + dict with "status" ("healthy"|"unhealthy"), "version", and "checks" + containing per-dependency results. + """ + from app.config import settings + + checks: dict[str, dict] = {} + all_healthy = True + + # PostgreSQL check + checks["postgres"] = await _check_postgres() + if checks["postgres"]["status"] != "up": + all_healthy = False + + # Redis check + checks["redis"] = await _check_redis(settings.REDIS_URL) + if checks["redis"]["status"] != "up": + all_healthy = False + + # NATS check + checks["nats"] = await _check_nats(settings.NATS_URL) + if checks["nats"]["status"] != "up": + all_healthy = False + + return { + "status": "healthy" if all_healthy else "unhealthy", + "version": settings.APP_VERSION, + "checks": checks, + } + + +async def _check_postgres() -> dict: + """Verify PostgreSQL connectivity via the admin engine.""" + start = time.monotonic() + try: + from sqlalchemy import text + + from app.database import engine + + async with engine.connect() as conn: + await asyncio.wait_for( + conn.execute(text("SELECT 1")), + timeout=5.0, + ) + latency_ms = round((time.monotonic() - start) * 1000) + return {"status": "up", "latency_ms": latency_ms, "error": None} + except Exception as exc: + latency_ms = round((time.monotonic() - start) * 1000) + logger.warning("health check: postgres failed", error=str(exc)) + return {"status": "down", "latency_ms": latency_ms, "error": str(exc)} + + +async def _check_redis(redis_url: str) -> dict: + """Verify Redis connectivity.""" + start = time.monotonic() + try: + import redis.asyncio as aioredis + + client = aioredis.from_url(redis_url, socket_connect_timeout=5) + try: + await asyncio.wait_for(client.ping(), timeout=5.0) + finally: + await client.aclose() + latency_ms = round((time.monotonic() - start) * 1000) + return {"status": "up", "latency_ms": latency_ms, "error": None} + except Exception as exc: + latency_ms = round((time.monotonic() - start) * 1000) + logger.warning("health check: redis failed", error=str(exc)) + return {"status": "down", "latency_ms": latency_ms, "error": str(exc)} + + +async def _check_nats(nats_url: str) -> dict: + """Verify NATS connectivity.""" + start = time.monotonic() + try: + import nats + + nc = await asyncio.wait_for( + nats.connect(nats_url), + timeout=5.0, + ) + try: + await nc.drain() + except Exception: + pass + latency_ms = round((time.monotonic() - start) * 1000) + return {"status": "up", "latency_ms": latency_ms, "error": None} + except Exception as exc: + latency_ms = round((time.monotonic() - start) * 1000) + logger.warning("health check: nats failed", error=str(exc)) + return {"status": "down", "latency_ms": latency_ms, "error": str(exc)} diff --git a/backend/app/routers/__init__.py b/backend/app/routers/__init__.py new file mode 100644 index 0000000..b58ae3e --- /dev/null +++ b/backend/app/routers/__init__.py @@ -0,0 +1 @@ +"""FastAPI routers for all API endpoints.""" diff --git a/backend/app/routers/alerts.py b/backend/app/routers/alerts.py new file mode 100644 index 0000000..02dad2d --- /dev/null +++ b/backend/app/routers/alerts.py @@ -0,0 +1,1088 @@ +"""Alert management API endpoints. + +Tenant-scoped routes under /api/tenants/{tenant_id}/ for: +- Alert rules CRUD (list, create, update, delete, toggle) +- Notification channels CRUD (list, create, update, delete, test) +- Alert events listing with pagination and filtering +- Active alert count for nav badge +- Acknowledge and silence actions +- Device-scoped alert listing + +RLS enforced via get_db() (app_user engine with tenant context). +RBAC: viewer = read-only (GET); operator and above = write (POST/PUT/PATCH/DELETE). +""" + +import base64 +import logging +import uuid +from datetime import datetime, timedelta, timezone +from typing import Any, Optional + +from fastapi import APIRouter, Depends, HTTPException, Query, Request, status +import re + +from pydantic import BaseModel, ConfigDict, model_validator +from sqlalchemy import text +from sqlalchemy.ext.asyncio import AsyncSession + +from app.database import get_db, set_tenant_context +from app.middleware.rate_limit import limiter +from app.middleware.rbac import require_scope +from app.middleware.tenant_context import CurrentUser, get_current_user +from app.services.audit_service import log_action + +logger = logging.getLogger(__name__) + +router = APIRouter(tags=["alerts"]) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +async def _check_tenant_access( + current_user: CurrentUser, tenant_id: uuid.UUID, db: AsyncSession +) -> None: + """Verify the current user is allowed to access the given tenant.""" + if current_user.is_super_admin: + await set_tenant_context(db, str(tenant_id)) + elif current_user.tenant_id != tenant_id: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Access denied to this tenant", + ) + + +def _require_write(current_user: CurrentUser) -> None: + """Raise 403 if user is a viewer (read-only).""" + if current_user.role == "viewer": + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Viewers have read-only access.", + ) + + +EMAIL_REGEX = re.compile(r"^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$") + +ALLOWED_METRICS = { + "cpu_load", "memory_used_pct", "disk_used_pct", "temperature", + "signal_strength", "ccq", "client_count", +} +ALLOWED_OPERATORS = {"gt", "lt", "gte", "lte"} +ALLOWED_SEVERITIES = {"critical", "warning", "info"} + + +# --------------------------------------------------------------------------- +# Request/response models +# --------------------------------------------------------------------------- + + +class AlertRuleCreate(BaseModel): + model_config = ConfigDict(extra="forbid") + name: str + metric: str + operator: str + threshold: float + duration_polls: int = 1 + severity: str = "warning" + device_id: Optional[str] = None + group_id: Optional[str] = None + channel_ids: list[str] = [] + enabled: bool = True + + +class AlertRuleUpdate(BaseModel): + model_config = ConfigDict(extra="forbid") + name: str + metric: str + operator: str + threshold: float + duration_polls: int = 1 + severity: str = "warning" + device_id: Optional[str] = None + group_id: Optional[str] = None + channel_ids: list[str] = [] + enabled: bool = True + + +class ChannelCreate(BaseModel): + model_config = ConfigDict(extra="forbid") + name: str + channel_type: str # "email", "webhook", or "slack" + smtp_host: Optional[str] = None + smtp_port: Optional[int] = None + smtp_user: Optional[str] = None + smtp_password: Optional[str] = None # plaintext — will be encrypted before storage + smtp_use_tls: bool = False + from_address: Optional[str] = None + to_address: Optional[str] = None + webhook_url: Optional[str] = None + slack_webhook_url: Optional[str] = None + + @model_validator(mode="after") + def validate_email_fields(self): + if self.channel_type == "email": + missing = [] + if not self.smtp_host: + missing.append("smtp_host") + if not self.smtp_port: + missing.append("smtp_port") + if not self.to_address: + missing.append("to_address") + if missing: + raise ValueError(f"Email channels require: {', '.join(missing)}") + if self.to_address and not EMAIL_REGEX.match(self.to_address): + raise ValueError(f"Invalid email address: {self.to_address}") + if self.from_address and not EMAIL_REGEX.match(self.from_address): + raise ValueError(f"Invalid from address: {self.from_address}") + return self + + +class ChannelUpdate(BaseModel): + model_config = ConfigDict(extra="forbid") + name: str + channel_type: str + smtp_host: Optional[str] = None + smtp_port: Optional[int] = None + smtp_user: Optional[str] = None + smtp_password: Optional[str] = None # if None, keep existing + smtp_use_tls: bool = False + from_address: Optional[str] = None + to_address: Optional[str] = None + webhook_url: Optional[str] = None + slack_webhook_url: Optional[str] = None + + +class SilenceRequest(BaseModel): + model_config = ConfigDict(extra="forbid") + duration_minutes: int + + +# ========================================================================= +# ALERT RULES CRUD +# ========================================================================= + + +@router.get( + "/tenants/{tenant_id}/alert-rules", + summary="List all alert rules for tenant", + dependencies=[require_scope("alerts:read")], +) +async def list_alert_rules( + tenant_id: uuid.UUID, + enabled: Optional[bool] = Query(None), + metric: Optional[str] = Query(None), + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> list[dict[str, Any]]: + await _check_tenant_access(current_user, tenant_id, db) + + filters = ["1=1"] + params: dict[str, Any] = {} + + if enabled is not None: + filters.append("ar.enabled = :enabled") + params["enabled"] = enabled + if metric: + filters.append("ar.metric = :metric") + params["metric"] = metric + + where = " AND ".join(filters) + + result = await db.execute( + text(f""" + SELECT ar.id, ar.tenant_id, ar.device_id, ar.group_id, + ar.name, ar.metric, ar.operator, ar.threshold, + ar.duration_polls, ar.severity, ar.enabled, ar.is_default, + ar.created_at, + COALESCE( + (SELECT json_agg(arc.channel_id) + FROM alert_rule_channels arc + WHERE arc.rule_id = ar.id), + '[]'::json + ) AS channel_ids + FROM alert_rules ar + WHERE {where} + ORDER BY ar.created_at DESC + """), + params, + ) + + rows = result.fetchall() + return [ + { + "id": str(row[0]), + "tenant_id": str(row[1]), + "device_id": str(row[2]) if row[2] else None, + "group_id": str(row[3]) if row[3] else None, + "name": row[4], + "metric": row[5], + "operator": row[6], + "threshold": float(row[7]), + "duration_polls": row[8], + "severity": row[9], + "enabled": row[10], + "is_default": row[11], + "created_at": row[12].isoformat() if row[12] else None, + "channel_ids": [str(c) for c in (row[13] if isinstance(row[13], list) else [])], + } + for row in rows + ] + + +@router.post( + "/tenants/{tenant_id}/alert-rules", + summary="Create alert rule", + status_code=status.HTTP_201_CREATED, +) +@limiter.limit("20/minute") +async def create_alert_rule( + request: Request, + tenant_id: uuid.UUID, + body: AlertRuleCreate, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + await _check_tenant_access(current_user, tenant_id, db) + _require_write(current_user) + + if body.metric not in ALLOWED_METRICS: + raise HTTPException(422, f"metric must be one of: {', '.join(sorted(ALLOWED_METRICS))}") + if body.operator not in ALLOWED_OPERATORS: + raise HTTPException(422, f"operator must be one of: {', '.join(sorted(ALLOWED_OPERATORS))}") + if body.severity not in ALLOWED_SEVERITIES: + raise HTTPException(422, f"severity must be one of: {', '.join(sorted(ALLOWED_SEVERITIES))}") + + rule_id = str(uuid.uuid4()) + + await db.execute( + text(""" + INSERT INTO alert_rules + (id, tenant_id, device_id, group_id, name, metric, operator, + threshold, duration_polls, severity, enabled) + VALUES + (CAST(:id AS uuid), CAST(:tenant_id AS uuid), + CAST(:device_id AS uuid), CAST(:group_id AS uuid), + :name, :metric, :operator, :threshold, :duration_polls, + :severity, :enabled) + """), + { + "id": rule_id, + "tenant_id": str(tenant_id), + "device_id": body.device_id, + "group_id": body.group_id, + "name": body.name, + "metric": body.metric, + "operator": body.operator, + "threshold": body.threshold, + "duration_polls": body.duration_polls, + "severity": body.severity, + "enabled": body.enabled, + }, + ) + + # Create channel associations + for ch_id in body.channel_ids: + await db.execute( + text(""" + INSERT INTO alert_rule_channels (rule_id, channel_id) + VALUES (CAST(:rule_id AS uuid), CAST(:channel_id AS uuid)) + """), + {"rule_id": rule_id, "channel_id": ch_id}, + ) + + await db.commit() + + try: + await log_action( + db, tenant_id, current_user.user_id, "alert_rule_create", + resource_type="alert_rule", resource_id=rule_id, + details={"name": body.name, "metric": body.metric, "severity": body.severity}, + ) + except Exception: + pass + + return { + "id": rule_id, + "tenant_id": str(tenant_id), + "name": body.name, + "metric": body.metric, + "operator": body.operator, + "threshold": body.threshold, + "duration_polls": body.duration_polls, + "severity": body.severity, + "enabled": body.enabled, + "channel_ids": body.channel_ids, + } + + +@router.put( + "/tenants/{tenant_id}/alert-rules/{rule_id}", + summary="Update alert rule", +) +@limiter.limit("20/minute") +async def update_alert_rule( + request: Request, + tenant_id: uuid.UUID, + rule_id: uuid.UUID, + body: AlertRuleUpdate, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + await _check_tenant_access(current_user, tenant_id, db) + _require_write(current_user) + + if body.metric not in ALLOWED_METRICS: + raise HTTPException(422, f"metric must be one of: {', '.join(sorted(ALLOWED_METRICS))}") + if body.operator not in ALLOWED_OPERATORS: + raise HTTPException(422, f"operator must be one of: {', '.join(sorted(ALLOWED_OPERATORS))}") + if body.severity not in ALLOWED_SEVERITIES: + raise HTTPException(422, f"severity must be one of: {', '.join(sorted(ALLOWED_SEVERITIES))}") + + result = await db.execute( + text(""" + UPDATE alert_rules + SET name = :name, metric = :metric, operator = :operator, + threshold = :threshold, duration_polls = :duration_polls, + severity = :severity, device_id = CAST(:device_id AS uuid), + group_id = CAST(:group_id AS uuid), enabled = :enabled + WHERE id = CAST(:rule_id AS uuid) + RETURNING id + """), + { + "rule_id": str(rule_id), + "name": body.name, + "metric": body.metric, + "operator": body.operator, + "threshold": body.threshold, + "duration_polls": body.duration_polls, + "severity": body.severity, + "device_id": body.device_id, + "group_id": body.group_id, + "enabled": body.enabled, + }, + ) + if not result.fetchone(): + raise HTTPException(404, "Alert rule not found") + + # Replace channel associations + await db.execute( + text("DELETE FROM alert_rule_channels WHERE rule_id = CAST(:rule_id AS uuid)"), + {"rule_id": str(rule_id)}, + ) + for ch_id in body.channel_ids: + await db.execute( + text(""" + INSERT INTO alert_rule_channels (rule_id, channel_id) + VALUES (CAST(:rule_id AS uuid), CAST(:channel_id AS uuid)) + """), + {"rule_id": str(rule_id), "channel_id": ch_id}, + ) + + await db.commit() + + try: + await log_action( + db, tenant_id, current_user.user_id, "alert_rule_update", + resource_type="alert_rule", resource_id=str(rule_id), + details={"name": body.name, "metric": body.metric, "severity": body.severity}, + ) + except Exception: + pass + + return { + "id": str(rule_id), + "name": body.name, + "metric": body.metric, + "operator": body.operator, + "threshold": body.threshold, + "duration_polls": body.duration_polls, + "severity": body.severity, + "enabled": body.enabled, + "channel_ids": body.channel_ids, + } + + +@router.delete( + "/tenants/{tenant_id}/alert-rules/{rule_id}", + summary="Delete alert rule", + status_code=status.HTTP_204_NO_CONTENT, +) +@limiter.limit("5/minute") +async def delete_alert_rule( + request: Request, + tenant_id: uuid.UUID, + rule_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> None: + await _check_tenant_access(current_user, tenant_id, db) + _require_write(current_user) + + # Prevent deletion of default rules + check = await db.execute( + text("SELECT is_default FROM alert_rules WHERE id = CAST(:id AS uuid)"), + {"id": str(rule_id)}, + ) + row = check.fetchone() + if not row: + raise HTTPException(404, "Alert rule not found") + if row[0]: + raise HTTPException(422, "Cannot delete default alert rules. Disable them instead.") + + await db.execute( + text("DELETE FROM alert_rules WHERE id = CAST(:id AS uuid)"), + {"id": str(rule_id)}, + ) + await db.commit() + + try: + await log_action( + db, tenant_id, current_user.user_id, "alert_rule_delete", + resource_type="alert_rule", resource_id=str(rule_id), + ) + except Exception: + pass + + +@router.patch( + "/tenants/{tenant_id}/alert-rules/{rule_id}/toggle", + summary="Toggle alert rule enabled/disabled", +) +@limiter.limit("20/minute") +async def toggle_alert_rule( + request: Request, + tenant_id: uuid.UUID, + rule_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + await _check_tenant_access(current_user, tenant_id, db) + _require_write(current_user) + + result = await db.execute( + text(""" + UPDATE alert_rules SET enabled = NOT enabled + WHERE id = CAST(:id AS uuid) + RETURNING id, enabled + """), + {"id": str(rule_id)}, + ) + row = result.fetchone() + if not row: + raise HTTPException(404, "Alert rule not found") + await db.commit() + + return {"id": str(row[0]), "enabled": row[1]} + + +# ========================================================================= +# NOTIFICATION CHANNELS CRUD +# ========================================================================= + + +class SMTPTestRequest(BaseModel): + smtp_host: str + smtp_port: int = 587 + smtp_user: Optional[str] = None + smtp_password: Optional[str] = None + smtp_use_tls: bool = False + from_address: str = "alerts@example.com" + to_address: str + + +@router.post( + "/tenants/{tenant_id}/notification-channels/test-smtp", + summary="Test SMTP settings before creating a channel", +) +async def test_channel_smtp( + tenant_id: uuid.UUID, + data: SMTPTestRequest, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + """Test SMTP settings before creating a channel.""" + await _check_tenant_access(current_user, tenant_id, db) + _require_write(current_user) + + from app.services.email_service import SMTPConfig, send_test_email + + config = SMTPConfig( + host=data.smtp_host, + port=data.smtp_port, + user=data.smtp_user, + password=data.smtp_password, + use_tls=data.smtp_use_tls, + from_address=data.from_address, + ) + result = await send_test_email(data.to_address, config) + if not result["success"]: + raise HTTPException(status_code=400, detail=result["message"]) + return result + + +@router.get( + "/tenants/{tenant_id}/notification-channels", + summary="List notification channels for tenant", + dependencies=[require_scope("alerts:read")], +) +async def list_notification_channels( + tenant_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> list[dict[str, Any]]: + await _check_tenant_access(current_user, tenant_id, db) + + result = await db.execute( + text(""" + SELECT id, tenant_id, name, channel_type, + smtp_host, smtp_port, smtp_user, smtp_use_tls, + from_address, to_address, webhook_url, + created_at, slack_webhook_url + FROM notification_channels + ORDER BY created_at DESC + """) + ) + + return [ + { + "id": str(row[0]), + "tenant_id": str(row[1]), + "name": row[2], + "channel_type": row[3], + "smtp_host": row[4], + "smtp_port": row[5], + "smtp_user": row[6], + "smtp_use_tls": row[7], + "from_address": row[8], + "to_address": row[9], + "webhook_url": row[10], + "created_at": row[11].isoformat() if row[11] else None, + "slack_webhook_url": row[12], + } + for row in result.fetchall() + ] + + +@router.post( + "/tenants/{tenant_id}/notification-channels", + summary="Create notification channel", + status_code=status.HTTP_201_CREATED, +) +@limiter.limit("20/minute") +async def create_notification_channel( + request: Request, + tenant_id: uuid.UUID, + body: ChannelCreate, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + await _check_tenant_access(current_user, tenant_id, db) + _require_write(current_user) + + if body.channel_type not in ("email", "webhook", "slack"): + raise HTTPException(422, "channel_type must be 'email', 'webhook', or 'slack'") + + channel_id = str(uuid.uuid4()) + + from app.services.crypto import encrypt_credentials_transit + + # Encrypt SMTP password via Transit if provided + encrypted_password_transit = None + if body.smtp_password: + encrypted_password_transit = await encrypt_credentials_transit( + body.smtp_password, str(tenant_id), + ) + + await db.execute( + text(""" + INSERT INTO notification_channels + (id, tenant_id, name, channel_type, smtp_host, smtp_port, + smtp_user, smtp_password_transit, smtp_use_tls, from_address, + to_address, webhook_url, slack_webhook_url) + VALUES + (CAST(:id AS uuid), CAST(:tenant_id AS uuid), + :name, :channel_type, :smtp_host, :smtp_port, + :smtp_user, :smtp_password_transit, :smtp_use_tls, + :from_address, :to_address, :webhook_url, + :slack_webhook_url) + """), + { + "id": channel_id, + "tenant_id": str(tenant_id), + "name": body.name, + "channel_type": body.channel_type, + "smtp_host": body.smtp_host, + "smtp_port": body.smtp_port, + "smtp_user": body.smtp_user, + "smtp_password_transit": encrypted_password_transit, + "smtp_use_tls": body.smtp_use_tls, + "from_address": body.from_address, + "to_address": body.to_address, + "webhook_url": body.webhook_url, + "slack_webhook_url": body.slack_webhook_url, + }, + ) + await db.commit() + + return { + "id": channel_id, + "tenant_id": str(tenant_id), + "name": body.name, + "channel_type": body.channel_type, + "smtp_host": body.smtp_host, + "smtp_port": body.smtp_port, + "smtp_user": body.smtp_user, + "smtp_use_tls": body.smtp_use_tls, + "from_address": body.from_address, + "to_address": body.to_address, + "webhook_url": body.webhook_url, + "slack_webhook_url": body.slack_webhook_url, + } + + +@router.put( + "/tenants/{tenant_id}/notification-channels/{channel_id}", + summary="Update notification channel", +) +@limiter.limit("20/minute") +async def update_notification_channel( + request: Request, + tenant_id: uuid.UUID, + channel_id: uuid.UUID, + body: ChannelUpdate, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + await _check_tenant_access(current_user, tenant_id, db) + _require_write(current_user) + + if body.channel_type not in ("email", "webhook", "slack"): + raise HTTPException(422, "channel_type must be 'email', 'webhook', or 'slack'") + + from app.services.crypto import encrypt_credentials_transit + + # Build SET clauses dynamically based on which secrets are provided + set_parts = [ + "name = :name", "channel_type = :channel_type", + "smtp_host = :smtp_host", "smtp_port = :smtp_port", + "smtp_user = :smtp_user", "smtp_use_tls = :smtp_use_tls", + "from_address = :from_address", "to_address = :to_address", + "webhook_url = :webhook_url", + "slack_webhook_url = :slack_webhook_url", + ] + params: dict[str, Any] = { + "id": str(channel_id), + "name": body.name, + "channel_type": body.channel_type, + "smtp_host": body.smtp_host, + "smtp_port": body.smtp_port, + "smtp_user": body.smtp_user, + "smtp_use_tls": body.smtp_use_tls, + "from_address": body.from_address, + "to_address": body.to_address, + "webhook_url": body.webhook_url, + "slack_webhook_url": body.slack_webhook_url, + } + + if body.smtp_password: + set_parts.append("smtp_password_transit = :smtp_password_transit") + params["smtp_password_transit"] = await encrypt_credentials_transit( + body.smtp_password, str(tenant_id), + ) + # Clear legacy column + set_parts.append("smtp_password = NULL") + + set_clause = ", ".join(set_parts) + result = await db.execute( + text(f""" + UPDATE notification_channels + SET {set_clause} + WHERE id = CAST(:id AS uuid) + RETURNING id + """), + params, + ) + + if not result.fetchone(): + raise HTTPException(404, "Notification channel not found") + await db.commit() + + return { + "id": str(channel_id), + "name": body.name, + "channel_type": body.channel_type, + "smtp_host": body.smtp_host, + "smtp_port": body.smtp_port, + "smtp_user": body.smtp_user, + "smtp_use_tls": body.smtp_use_tls, + "from_address": body.from_address, + "to_address": body.to_address, + "webhook_url": body.webhook_url, + "slack_webhook_url": body.slack_webhook_url, + } + + +@router.delete( + "/tenants/{tenant_id}/notification-channels/{channel_id}", + summary="Delete notification channel", + status_code=status.HTTP_204_NO_CONTENT, +) +@limiter.limit("5/minute") +async def delete_notification_channel( + request: Request, + tenant_id: uuid.UUID, + channel_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> None: + await _check_tenant_access(current_user, tenant_id, db) + _require_write(current_user) + + result = await db.execute( + text("DELETE FROM notification_channels WHERE id = CAST(:id AS uuid) RETURNING id"), + {"id": str(channel_id)}, + ) + if not result.fetchone(): + raise HTTPException(404, "Notification channel not found") + await db.commit() + + +@router.post( + "/tenants/{tenant_id}/notification-channels/{channel_id}/test", + summary="Send test notification via channel", +) +@limiter.limit("5/minute") +async def test_notification_channel( + request: Request, + tenant_id: uuid.UUID, + channel_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + await _check_tenant_access(current_user, tenant_id, db) + _require_write(current_user) + + # Fetch channel as dict for notification_service + result = await db.execute( + text(""" + SELECT id, tenant_id, name, channel_type, + smtp_host, smtp_port, smtp_user, smtp_password, + smtp_use_tls, from_address, to_address, + webhook_url, smtp_password_transit, + slack_webhook_url + FROM notification_channels + WHERE id = CAST(:id AS uuid) + """), + {"id": str(channel_id)}, + ) + row = result.fetchone() + if not row: + raise HTTPException(404, "Notification channel not found") + + channel = { + "id": str(row[0]), + "tenant_id": str(row[1]), + "name": row[2], + "channel_type": row[3], + "smtp_host": row[4], + "smtp_port": row[5], + "smtp_user": row[6], + "smtp_password": row[7], + "smtp_use_tls": row[8], + "from_address": row[9], + "to_address": row[10], + "webhook_url": row[11], + "smtp_password_transit": row[12], + "slack_webhook_url": row[13], + } + + from app.services.notification_service import send_test_notification + try: + success = await send_test_notification(channel) + if success: + return {"status": "ok", "message": "Test notification sent successfully"} + else: + raise HTTPException(422, "Test notification delivery failed") + except HTTPException: + raise + except Exception as exc: + raise HTTPException(422, f"Test notification failed: {str(exc)}") + + +# ========================================================================= +# ALERT EVENTS (read + actions) +# ========================================================================= + + +@router.get( + "/tenants/{tenant_id}/alerts", + summary="List alert events with filtering and pagination", + dependencies=[require_scope("alerts:read")], +) +async def list_alerts( + tenant_id: uuid.UUID, + alert_status: Optional[str] = Query(None, alias="status"), + severity: Optional[str] = Query(None), + device_id: Optional[str] = Query(None), + rule_id: Optional[str] = Query(None), + start_date: Optional[str] = Query(None), + end_date: Optional[str] = Query(None), + page: int = Query(1, ge=1), + per_page: int = Query(50, ge=1, le=200), + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + await _check_tenant_access(current_user, tenant_id, db) + + filters = ["1=1"] + params: dict[str, Any] = {} + + if alert_status: + filters.append("ae.status = :status") + params["status"] = alert_status + if severity: + filters.append("ae.severity = :severity") + params["severity"] = severity + if device_id: + filters.append("ae.device_id = CAST(:device_id AS uuid)") + params["device_id"] = device_id + if rule_id: + filters.append("ae.rule_id = CAST(:rule_id AS uuid)") + params["rule_id"] = rule_id + if start_date: + filters.append("ae.fired_at >= CAST(:start_date AS timestamptz)") + params["start_date"] = start_date + if end_date: + filters.append("ae.fired_at <= CAST(:end_date AS timestamptz)") + params["end_date"] = end_date + + where = " AND ".join(filters) + offset = (page - 1) * per_page + + # Get total count + count_result = await db.execute( + text(f"SELECT COUNT(*) FROM alert_events ae WHERE {where}"), + params, + ) + total = count_result.scalar() or 0 + + # Get page of results with device hostname and rule name + result = await db.execute( + text(f""" + SELECT ae.id, ae.rule_id, ae.device_id, ae.tenant_id, + ae.status, ae.severity, ae.metric, ae.value, + ae.threshold, ae.message, ae.is_flapping, + ae.acknowledged_at, ae.silenced_until, + ae.fired_at, ae.resolved_at, + d.hostname AS device_hostname, + ar.name AS rule_name + FROM alert_events ae + LEFT JOIN devices d ON d.id = ae.device_id + LEFT JOIN alert_rules ar ON ar.id = ae.rule_id + WHERE {where} + ORDER BY ae.fired_at DESC + LIMIT :limit OFFSET :offset + """), + {**params, "limit": per_page, "offset": offset}, + ) + + items = [ + { + "id": str(row[0]), + "rule_id": str(row[1]) if row[1] else None, + "device_id": str(row[2]), + "tenant_id": str(row[3]), + "status": row[4], + "severity": row[5], + "metric": row[6], + "value": float(row[7]) if row[7] is not None else None, + "threshold": float(row[8]) if row[8] is not None else None, + "message": row[9], + "is_flapping": row[10], + "acknowledged_at": row[11].isoformat() if row[11] else None, + "silenced_until": row[12].isoformat() if row[12] else None, + "fired_at": row[13].isoformat() if row[13] else None, + "resolved_at": row[14].isoformat() if row[14] else None, + "device_hostname": row[15], + "rule_name": row[16], + } + for row in result.fetchall() + ] + + return { + "items": items, + "total": total, + "page": page, + "per_page": per_page, + } + + +@router.get( + "/tenants/{tenant_id}/alerts/active-count", + summary="Get count of active (firing) alerts for nav badge", + dependencies=[require_scope("alerts:read")], +) +async def get_active_alert_count( + tenant_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, int]: + await _check_tenant_access(current_user, tenant_id, db) + + result = await db.execute( + text(""" + SELECT COUNT(*) FROM alert_events + WHERE status = 'firing' + AND resolved_at IS NULL + AND (silenced_until IS NULL OR silenced_until < NOW()) + """) + ) + count = result.scalar() or 0 + return {"count": count} + + +@router.post( + "/tenants/{tenant_id}/alerts/{alert_id}/acknowledge", + summary="Acknowledge an active alert", +) +@limiter.limit("20/minute") +async def acknowledge_alert( + request: Request, + tenant_id: uuid.UUID, + alert_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, str]: + await _check_tenant_access(current_user, tenant_id, db) + _require_write(current_user) + + result = await db.execute( + text(""" + UPDATE alert_events + SET acknowledged_at = NOW(), acknowledged_by = CAST(:user_id AS uuid) + WHERE id = CAST(:id AS uuid) + RETURNING id + """), + {"id": str(alert_id), "user_id": str(current_user.user_id)}, + ) + if not result.fetchone(): + raise HTTPException(404, "Alert not found") + await db.commit() + + return {"status": "ok", "message": "Alert acknowledged"} + + +@router.post( + "/tenants/{tenant_id}/alerts/{alert_id}/silence", + summary="Silence an alert for a specified duration", +) +@limiter.limit("20/minute") +async def silence_alert( + request: Request, + tenant_id: uuid.UUID, + alert_id: uuid.UUID, + body: SilenceRequest, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, str]: + await _check_tenant_access(current_user, tenant_id, db) + _require_write(current_user) + + if body.duration_minutes < 1: + raise HTTPException(422, "duration_minutes must be at least 1") + + result = await db.execute( + text(""" + UPDATE alert_events + SET silenced_until = NOW() + (:minutes || ' minutes')::interval + WHERE id = CAST(:id AS uuid) + RETURNING id + """), + {"id": str(alert_id), "minutes": str(body.duration_minutes)}, + ) + if not result.fetchone(): + raise HTTPException(404, "Alert not found") + await db.commit() + + return {"status": "ok", "message": f"Alert silenced for {body.duration_minutes} minutes"} + + +@router.get( + "/tenants/{tenant_id}/devices/{device_id}/alerts", + summary="List alerts for a specific device", + dependencies=[require_scope("alerts:read")], +) +async def list_device_alerts( + tenant_id: uuid.UUID, + device_id: uuid.UUID, + alert_status: Optional[str] = Query(None, alias="status"), + page: int = Query(1, ge=1), + per_page: int = Query(20, ge=1, le=100), + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + await _check_tenant_access(current_user, tenant_id, db) + + filters = ["ae.device_id = CAST(:device_id AS uuid)"] + params: dict[str, Any] = {"device_id": str(device_id)} + + if alert_status: + filters.append("ae.status = :status") + params["status"] = alert_status + + where = " AND ".join(filters) + offset = (page - 1) * per_page + + count_result = await db.execute( + text(f"SELECT COUNT(*) FROM alert_events ae WHERE {where}"), + params, + ) + total = count_result.scalar() or 0 + + result = await db.execute( + text(f""" + SELECT ae.id, ae.rule_id, ae.device_id, ae.tenant_id, + ae.status, ae.severity, ae.metric, ae.value, + ae.threshold, ae.message, ae.is_flapping, + ae.acknowledged_at, ae.silenced_until, + ae.fired_at, ae.resolved_at, + ar.name AS rule_name + FROM alert_events ae + LEFT JOIN alert_rules ar ON ar.id = ae.rule_id + WHERE {where} + ORDER BY ae.fired_at DESC + LIMIT :limit OFFSET :offset + """), + {**params, "limit": per_page, "offset": offset}, + ) + + items = [ + { + "id": str(row[0]), + "rule_id": str(row[1]) if row[1] else None, + "device_id": str(row[2]), + "tenant_id": str(row[3]), + "status": row[4], + "severity": row[5], + "metric": row[6], + "value": float(row[7]) if row[7] is not None else None, + "threshold": float(row[8]) if row[8] is not None else None, + "message": row[9], + "is_flapping": row[10], + "acknowledged_at": row[11].isoformat() if row[11] else None, + "silenced_until": row[12].isoformat() if row[12] else None, + "fired_at": row[13].isoformat() if row[13] else None, + "resolved_at": row[14].isoformat() if row[14] else None, + "rule_name": row[15], + } + for row in result.fetchall() + ] + + return { + "items": items, + "total": total, + "page": page, + "per_page": per_page, + } diff --git a/backend/app/routers/api_keys.py b/backend/app/routers/api_keys.py new file mode 100644 index 0000000..e070a72 --- /dev/null +++ b/backend/app/routers/api_keys.py @@ -0,0 +1,172 @@ +"""API key management endpoints. + +Tenant-scoped routes under /api/tenants/{tenant_id}/api-keys: +- List all keys (active + revoked) +- Create new key (returns plaintext once) +- Revoke key (soft delete) + +RBAC: tenant_admin or above for all operations. +RLS enforced via get_db() (app_user engine with tenant context). +""" + +import uuid +from datetime import datetime +from typing import Optional + +from fastapi import APIRouter, Depends, HTTPException, status +from pydantic import BaseModel, ConfigDict +from sqlalchemy.ext.asyncio import AsyncSession + +from app.database import get_db, set_tenant_context +from app.middleware.rbac import require_min_role +from app.middleware.tenant_context import CurrentUser, get_current_user +from app.services.api_key_service import ( + ALLOWED_SCOPES, + create_api_key, + list_api_keys, + revoke_api_key, +) + +router = APIRouter(tags=["api-keys"]) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +async def _check_tenant_access( + current_user: CurrentUser, tenant_id: uuid.UUID, db: AsyncSession +) -> None: + """Verify the current user is allowed to access the given tenant.""" + if current_user.is_super_admin: + await set_tenant_context(db, str(tenant_id)) + elif current_user.tenant_id != tenant_id: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Access denied to this tenant", + ) + + +# --------------------------------------------------------------------------- +# Request/response schemas +# --------------------------------------------------------------------------- + + +class ApiKeyCreate(BaseModel): + model_config = ConfigDict(extra="forbid") + name: str + scopes: list[str] + expires_at: Optional[datetime] = None + + +class ApiKeyResponse(BaseModel): + model_config = ConfigDict(from_attributes=True) + id: str + name: str + key_prefix: str + scopes: list[str] + expires_at: Optional[str] = None + last_used_at: Optional[str] = None + created_at: str + revoked_at: Optional[str] = None + + +class ApiKeyCreateResponse(ApiKeyResponse): + """Extended response that includes the plaintext key (shown once).""" + + key: str + + +# --------------------------------------------------------------------------- +# Endpoints +# --------------------------------------------------------------------------- + + +@router.get("/tenants/{tenant_id}/api-keys", response_model=list[ApiKeyResponse]) +async def list_keys( + tenant_id: uuid.UUID, + db: AsyncSession = Depends(get_db), + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("tenant_admin")), +) -> list[dict]: + """List all API keys for a tenant.""" + await _check_tenant_access(current_user, tenant_id, db) + keys = await list_api_keys(db, tenant_id) + # Convert UUID ids to strings for response + for k in keys: + k["id"] = str(k["id"]) + return keys + + +@router.post( + "/tenants/{tenant_id}/api-keys", + response_model=ApiKeyCreateResponse, + status_code=status.HTTP_201_CREATED, +) +async def create_key( + tenant_id: uuid.UUID, + body: ApiKeyCreate, + db: AsyncSession = Depends(get_db), + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("tenant_admin")), +) -> dict: + """Create a new API key. The plaintext key is returned only once.""" + await _check_tenant_access(current_user, tenant_id, db) + + # Validate scopes against allowed list + invalid_scopes = set(body.scopes) - ALLOWED_SCOPES + if invalid_scopes: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=f"Invalid scopes: {', '.join(sorted(invalid_scopes))}. " + f"Allowed: {', '.join(sorted(ALLOWED_SCOPES))}", + ) + + if not body.scopes: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="At least one scope is required.", + ) + + result = await create_api_key( + db=db, + tenant_id=tenant_id, + user_id=current_user.user_id, + name=body.name, + scopes=body.scopes, + expires_at=body.expires_at, + ) + + return { + "id": str(result["id"]), + "name": result["name"], + "key_prefix": result["key_prefix"], + "key": result["key"], + "scopes": result["scopes"], + "expires_at": result["expires_at"].isoformat() if result["expires_at"] else None, + "last_used_at": None, + "created_at": result["created_at"].isoformat() if result["created_at"] else None, + "revoked_at": None, + } + + +@router.delete("/tenants/{tenant_id}/api-keys/{key_id}", status_code=status.HTTP_200_OK) +async def revoke_key( + tenant_id: uuid.UUID, + key_id: uuid.UUID, + db: AsyncSession = Depends(get_db), + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("tenant_admin")), +) -> dict: + """Revoke an API key (soft delete -- sets revoked_at timestamp).""" + await _check_tenant_access(current_user, tenant_id, db) + + success = await revoke_api_key(db, tenant_id, key_id) + if not success: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="API key not found or already revoked.", + ) + + return {"status": "revoked", "key_id": str(key_id)} diff --git a/backend/app/routers/audit_logs.py b/backend/app/routers/audit_logs.py new file mode 100644 index 0000000..a769b3a --- /dev/null +++ b/backend/app/routers/audit_logs.py @@ -0,0 +1,294 @@ +"""Audit log API endpoints. + +Tenant-scoped routes under /api/tenants/{tenant_id}/ for: +- Paginated, filterable audit log listing +- CSV export of audit logs + +RLS enforced via get_db() (app_user engine with tenant context). +RBAC: operator and above can view audit logs. + +Phase 30: Audit log details are encrypted at rest via Transit (Tier 2). +When encrypted_details is set, the router decrypts via Transit on-demand +and returns the plaintext details in the response. Structural fields +(action, resource_type, timestamp, ip_address) are always plaintext. +""" + +import asyncio +import csv +import io +import json +import logging +import uuid +from datetime import datetime +from typing import Any, Optional + +from fastapi import APIRouter, Depends, HTTPException, Query, status +from fastapi.responses import StreamingResponse +from pydantic import BaseModel +from sqlalchemy import and_, func, select, text +from sqlalchemy.ext.asyncio import AsyncSession + +from app.database import get_db, set_tenant_context +from app.middleware.tenant_context import CurrentUser, get_current_user + +logger = logging.getLogger(__name__) + +router = APIRouter(tags=["audit-logs"]) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +async def _check_tenant_access( + current_user: CurrentUser, tenant_id: uuid.UUID, db: AsyncSession +) -> None: + """Verify the current user is allowed to access the given tenant.""" + if current_user.is_super_admin: + await set_tenant_context(db, str(tenant_id)) + elif current_user.tenant_id != tenant_id: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Access denied to this tenant", + ) + + +def _require_operator(current_user: CurrentUser) -> None: + """Raise 403 if user does not have at least operator role.""" + allowed = {"super_admin", "admin", "operator"} + if current_user.role not in allowed: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="At least operator role required to view audit logs.", + ) + + +async def _decrypt_audit_details( + encrypted_details: str | None, + plaintext_details: dict[str, Any] | None, + tenant_id: str, +) -> dict[str, Any]: + """Decrypt encrypted audit log details via Transit, falling back to plaintext. + + Priority: + 1. If encrypted_details is set, decrypt via Transit and parse as JSON. + 2. If decryption fails, return plaintext details as fallback. + 3. If neither available, return empty dict. + """ + if encrypted_details: + try: + from app.services.crypto import decrypt_data_transit + + decrypted_json = await decrypt_data_transit(encrypted_details, tenant_id) + return json.loads(decrypted_json) + except Exception: + logger.warning( + "Failed to decrypt audit details for tenant %s, using plaintext fallback", + tenant_id, + exc_info=True, + ) + # Fall through to plaintext + return plaintext_details if plaintext_details else {} + + +async def _decrypt_details_batch( + rows: list[Any], + tenant_id: str, +) -> list[dict[str, Any]]: + """Decrypt encrypted_details for a batch of audit log rows concurrently. + + Uses asyncio.gather with limited concurrency to avoid overwhelming OpenBao. + Rows without encrypted_details return their plaintext details directly. + """ + semaphore = asyncio.Semaphore(10) # Limit concurrent Transit calls + + async def _decrypt_one(row: Any) -> dict[str, Any]: + async with semaphore: + return await _decrypt_audit_details( + row.get("encrypted_details"), + row.get("details"), + tenant_id, + ) + + return list(await asyncio.gather(*[_decrypt_one(row) for row in rows])) + + +# --------------------------------------------------------------------------- +# Response models +# --------------------------------------------------------------------------- + + +class AuditLogItem(BaseModel): + id: str + user_email: Optional[str] = None + action: str + resource_type: Optional[str] = None + resource_id: Optional[str] = None + device_name: Optional[str] = None + details: dict[str, Any] = {} + ip_address: Optional[str] = None + created_at: str + + +class AuditLogResponse(BaseModel): + items: list[AuditLogItem] + total: int + page: int + per_page: int + + +# --------------------------------------------------------------------------- +# Endpoints +# --------------------------------------------------------------------------- + + +@router.get( + "/tenants/{tenant_id}/audit-logs", + response_model=AuditLogResponse, + summary="List audit logs with pagination and filters", +) +async def list_audit_logs( + tenant_id: uuid.UUID, + page: int = Query(default=1, ge=1), + per_page: int = Query(default=50, ge=1, le=100), + action: Optional[str] = Query(default=None), + user_id: Optional[uuid.UUID] = Query(default=None), + device_id: Optional[uuid.UUID] = Query(default=None), + date_from: Optional[datetime] = Query(default=None), + date_to: Optional[datetime] = Query(default=None), + format: Optional[str] = Query(default=None, description="Set to 'csv' for CSV export"), + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> Any: + _require_operator(current_user) + await _check_tenant_access(current_user, tenant_id, db) + + # Build filter conditions using parameterized text fragments + conditions = [text("a.tenant_id = :tenant_id")] + params: dict[str, Any] = {"tenant_id": str(tenant_id)} + + if action: + conditions.append(text("a.action = :action")) + params["action"] = action + + if user_id: + conditions.append(text("a.user_id = :user_id")) + params["user_id"] = str(user_id) + + if device_id: + conditions.append(text("a.device_id = :device_id")) + params["device_id"] = str(device_id) + + if date_from: + conditions.append(text("a.created_at >= :date_from")) + params["date_from"] = date_from.isoformat() + + if date_to: + conditions.append(text("a.created_at <= :date_to")) + params["date_to"] = date_to.isoformat() + + where_clause = and_(*conditions) + + # Shared SELECT columns for data queries + _data_columns = text( + "a.id, u.email AS user_email, a.action, a.resource_type, " + "a.resource_id, d.hostname AS device_name, a.details, " + "a.encrypted_details, a.ip_address, a.created_at" + ) + _data_from = text( + "audit_logs a " + "LEFT JOIN users u ON a.user_id = u.id " + "LEFT JOIN devices d ON a.device_id = d.id" + ) + + # Count total + count_result = await db.execute( + select(func.count()).select_from(text("audit_logs a")).where(where_clause), + params, + ) + total = count_result.scalar() or 0 + + # CSV export -- no pagination limit + if format == "csv": + result = await db.execute( + select(_data_columns) + .select_from(_data_from) + .where(where_clause) + .order_by(text("a.created_at DESC")), + params, + ) + all_rows = result.mappings().all() + + # Decrypt encrypted details concurrently + decrypted_details = await _decrypt_details_batch( + all_rows, str(tenant_id) + ) + + output = io.StringIO() + writer = csv.writer(output) + writer.writerow([ + "ID", "User Email", "Action", "Resource Type", + "Resource ID", "Device", "Details", "IP Address", "Timestamp", + ]) + for row, details in zip(all_rows, decrypted_details): + details_str = json.dumps(details) if details else "{}" + writer.writerow([ + str(row["id"]), + row["user_email"] or "", + row["action"], + row["resource_type"] or "", + row["resource_id"] or "", + row["device_name"] or "", + details_str, + row["ip_address"] or "", + str(row["created_at"]), + ]) + + output.seek(0) + return StreamingResponse( + iter([output.getvalue()]), + media_type="text/csv", + headers={"Content-Disposition": "attachment; filename=audit-logs.csv"}, + ) + + # Paginated query + offset = (page - 1) * per_page + params["limit"] = per_page + params["offset"] = offset + + result = await db.execute( + select(_data_columns) + .select_from(_data_from) + .where(where_clause) + .order_by(text("a.created_at DESC")) + .limit(per_page) + .offset(offset), + params, + ) + rows = result.mappings().all() + + # Decrypt encrypted details concurrently (skips rows without encrypted_details) + decrypted_details = await _decrypt_details_batch(rows, str(tenant_id)) + + items = [ + AuditLogItem( + id=str(row["id"]), + user_email=row["user_email"], + action=row["action"], + resource_type=row["resource_type"], + resource_id=row["resource_id"], + device_name=row["device_name"], + details=details, + ip_address=row["ip_address"], + created_at=row["created_at"].isoformat() if row["created_at"] else "", + ) + for row, details in zip(rows, decrypted_details) + ] + + return AuditLogResponse( + items=items, + total=total, + page=page, + per_page=per_page, + ) diff --git a/backend/app/routers/auth.py b/backend/app/routers/auth.py new file mode 100644 index 0000000..1aedccf --- /dev/null +++ b/backend/app/routers/auth.py @@ -0,0 +1,1052 @@ +""" +Authentication endpoints. + +POST /api/auth/login — email/password login, returns JWT tokens +POST /api/auth/refresh — refresh access token using refresh token +POST /api/auth/logout — clear httpOnly cookie +GET /api/auth/me — return current user info +POST /api/auth/forgot-password — send password reset email +POST /api/auth/reset-password — reset password with token +POST /api/auth/srp/init — SRP Step 1: return salt and server ephemeral B +POST /api/auth/srp/verify — SRP Step 2: verify client proof M1, return tokens +GET /api/auth/emergency-kit-template — generate Emergency Kit PDF (without Secret Key) +POST /api/auth/register-srp — store SRP verifier and encrypted key set +""" + +import base64 +import hashlib +import io +import json +import logging +import secrets +import uuid +from datetime import UTC, datetime, timedelta +from typing import Optional + +import redis.asyncio as aioredis +from fastapi import APIRouter, Cookie, Depends, HTTPException, Response, status +from fastapi.responses import JSONResponse, StreamingResponse +from sqlalchemy import select, update +from sqlalchemy.ext.asyncio import AsyncSession +from starlette.requests import Request as StarletteRequest + +from app.config import settings +from app.database import AdminAsyncSessionLocal, get_admin_db +from app.services.audit_service import log_action +from app.services.srp_service import srp_init, srp_verify +from app.services.key_service import get_user_key_set, log_key_access, store_user_key_set +from app.middleware.rate_limit import limiter +from app.middleware.rbac import require_authenticated +from app.middleware.tenant_context import CurrentUser +from app.models.user import User +from app.schemas.auth import ( + ChangePasswordRequest, + DeleteAccountRequest, + DeleteAccountResponse, + ForgotPasswordRequest, + LoginRequest, + MessageResponse, + RefreshRequest, + ResetPasswordRequest, + SRPInitRequest, + SRPInitResponse, + SRPRegisterRequest, + SRPVerifyRequest, + SRPVerifyResponse, + TokenResponse, + UserMeResponse, +) +from app.services.account_service import delete_user_account, export_user_data +from app.services.auth import ( + create_access_token, + create_refresh_token, + hash_password, + is_token_revoked, + revoke_user_tokens, + verify_password, + verify_token, +) + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/auth", tags=["auth"]) + +# Access token cookie settings +ACCESS_TOKEN_COOKIE = "access_token" +ACCESS_TOKEN_MAX_AGE = 15 * 60 # 15 minutes in seconds + +# Cookie Secure flag requires HTTPS. Safari strictly enforces this — +# it silently drops Secure cookies over plain HTTP, unlike Chrome +# which exempts localhost. Auto-detect from CORS origins: if all +# origins are HTTPS, enable Secure; otherwise disable it. +_COOKIE_SECURE = all( + o.startswith("https://") for o in (settings.CORS_ORIGINS or "").split(",") if o.strip() +) + +# ─── Redis for SRP Sessions ────────────────────────────────────────────────── + +_redis: aioredis.Redis | None = None + + +async def get_redis() -> aioredis.Redis: + """Lazily initialise and return the SRP Redis client.""" + global _redis + if _redis is None: + _redis = aioredis.from_url(settings.REDIS_URL, decode_responses=True) + return _redis + + +# ─── SRP Zero-Knowledge Authentication ─────────────────────────────────────── + + +@router.post("/srp/init", response_model=SRPInitResponse, summary="SRP Step 1: return salt and server ephemeral B") +@limiter.limit("5/minute") +async def srp_init_endpoint( + request: StarletteRequest, + body: SRPInitRequest, + db: AsyncSession = Depends(get_admin_db), +) -> SRPInitResponse: + """SRP Step 1: Return salt and server ephemeral B. + + Anti-enumeration: returns a deterministic fake response if the user + does not exist or has no SRP credentials. The fake response is + derived from a hash of the email so it is consistent for repeated + queries against the same unknown address. + """ + # Look up user (case-insensitive) + result = await db.execute(select(User).where(User.email == body.email.lower())) + user = result.scalar_one_or_none() + + # Anti-enumeration: return fake salt/B if user not found or not SRP-enrolled + if not user or not user.srp_verifier: + fake_hash = hashlib.sha256(f"srp-fake-{body.email}".encode()).hexdigest() + return SRPInitResponse( + salt=fake_hash[:64], + server_public=fake_hash * 8, # 512 hex chars (256 bytes) + session_id=secrets.token_urlsafe(16), + pbkdf2_salt=base64.b64encode(bytes.fromhex(fake_hash[:64])).decode(), + hkdf_salt=base64.b64encode(bytes.fromhex(fake_hash[:64])).decode(), + ) + + # Fetch key derivation salts from user_key_sets (needed by client BEFORE SRP verify) + key_set = await get_user_key_set(db, user.id) + + # Generate server ephemeral + try: + server_public, server_private = await srp_init( + user.email, user.srp_verifier.hex() + ) + except ValueError as e: + logger.error("SRP init failed for %s: %s", user.email, e) + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Authentication initialization failed. Please try again.", + ) + + # Store session in Redis with 60s TTL + session_id = secrets.token_urlsafe(16) + redis = await get_redis() + session_data = json.dumps({ + "email": user.email, + "server_private": server_private, + "srp_verifier_hex": user.srp_verifier.hex(), + "srp_salt_hex": user.srp_salt.hex(), + "user_id": str(user.id), + }) + await redis.set(f"srp:session:{session_id}", session_data, ex=60) + + return SRPInitResponse( + salt=user.srp_salt.hex(), + server_public=server_public, + session_id=session_id, + pbkdf2_salt=base64.b64encode(key_set.pbkdf2_salt).decode() if key_set else "", + hkdf_salt=base64.b64encode(key_set.hkdf_salt).decode() if key_set else "", + ) + + +@router.post("/srp/verify", response_model=SRPVerifyResponse, summary="SRP Step 2: verify client proof and return tokens") +@limiter.limit("5/minute") +async def srp_verify_endpoint( + request: StarletteRequest, + body: SRPVerifyRequest, + response: Response, + db: AsyncSession = Depends(get_admin_db), +) -> SRPVerifyResponse: + """SRP Step 2: Verify client proof M1, return server proof M2 + JWT tokens. + + The session is consumed (deleted from Redis) immediately on retrieval + to enforce single-use. If the proof is invalid, the session cannot + be retried — the client must restart from /srp/init. + """ + invalid_error = HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid credentials", + ) + + # Retrieve session from Redis + redis = await get_redis() + session_raw = await redis.get(f"srp:session:{body.session_id}") + if not session_raw: + raise invalid_error + + # Delete session immediately (one-use) + await redis.delete(f"srp:session:{body.session_id}") + + session = json.loads(session_raw) + + # Verify email matches + if session["email"] != body.email.lower(): + raise invalid_error + + # Run SRP verification + try: + is_valid, server_proof = await srp_verify( + email=session["email"], + srp_verifier_hex=session["srp_verifier_hex"], + server_private=session["server_private"], + client_public=body.client_public, + client_proof=body.client_proof, + srp_salt_hex=session["srp_salt_hex"], + ) + except ValueError as e: + logger.error("SRP verify failed for %s: %s", session["email"], e) + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Authentication verification failed. Please try again.", + ) + + if not is_valid: + raise invalid_error + + # Fetch user for token creation + user_id = uuid.UUID(session["user_id"]) + result = await db.execute(select(User).where(User.id == user_id)) + user = result.scalar_one_or_none() + + if not user or not user.is_active: + raise invalid_error + + # Create JWT tokens (same as existing login) + access_token = create_access_token(user.id, user.tenant_id, user.role) + refresh_token = create_refresh_token(user.id) + + # Update last_login and clear upgrade flag on successful SRP login + await db.execute( + update(User).where(User.id == user.id).values( + last_login=datetime.now(UTC), + must_upgrade_auth=False, + ) + ) + await db.commit() + + # Set cookie (same as existing login) + response.set_cookie( + key=ACCESS_TOKEN_COOKIE, + value=access_token, + max_age=ACCESS_TOKEN_MAX_AGE, + httponly=True, + secure=_COOKIE_SECURE, + samesite="lax", + ) + + # Fetch encrypted key set + key_set = await get_user_key_set(db, user.id) + encrypted_key_set = None + if key_set: + encrypted_key_set = { + "encrypted_private_key": base64.b64encode(key_set.encrypted_private_key).decode(), + "private_key_nonce": base64.b64encode(key_set.private_key_nonce).decode(), + "encrypted_vault_key": base64.b64encode(key_set.encrypted_vault_key).decode(), + "vault_key_nonce": base64.b64encode(key_set.vault_key_nonce).decode(), + "public_key": base64.b64encode(key_set.public_key).decode(), + "pbkdf2_salt": base64.b64encode(key_set.pbkdf2_salt).decode(), + "hkdf_salt": base64.b64encode(key_set.hkdf_salt).decode(), + "pbkdf2_iterations": key_set.pbkdf2_iterations, + } + + # Audit log + try: + async with AdminAsyncSessionLocal() as audit_db: + await log_action( + audit_db, + tenant_id=user.tenant_id or uuid.UUID(int=0), + user_id=user.id, + action="login_srp", + resource_type="auth", + details={"email": user.email, "role": user.role}, + ip_address=request.client.host if request.client else None, + ) + await audit_db.commit() + except Exception: + pass + + return SRPVerifyResponse( + access_token=access_token, + refresh_token=refresh_token, + token_type="bearer", + server_proof=server_proof or "", + encrypted_key_set=encrypted_key_set, + ) + + +@router.post("/login", response_model=TokenResponse, summary="Authenticate with email and password") +@limiter.limit("5/minute") +async def login( + request: StarletteRequest, + body: LoginRequest, + response: Response, + db: AsyncSession = Depends(get_admin_db), +) -> TokenResponse: + """ + Login entry point — redirects to SRP for all enrolled users. + + For SRP-enrolled users: returns 409 srp_required (frontend auto-switches). + For legacy bcrypt users (must_upgrade_auth=True): verifies bcrypt password + and returns a temporary session with auth_upgrade_required=True so the + frontend can register SRP credentials before completing login. + + Anti-enumeration: dummy verify_password for unknown users preserves timing. + Rate limited to 5 requests per minute per IP. + """ + # Look up user by email (case-insensitive) + result = await db.execute( + select(User).where(User.email == body.email.lower()) + ) + user = result.scalar_one_or_none() + + # Generic error — do not reveal whether email exists (no user enumeration) + invalid_credentials_error = HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid credentials", + headers={"WWW-Authenticate": "Bearer"}, + ) + + if not user: + # Perform dummy verification to prevent timing attacks + verify_password("dummy", "$2b$12$/MSofyKqE3MkwXyzhigw.OHIefMM.qb5xGt/t9OAwbxgDGnyZjmrG") + raise invalid_credentials_error + + if not user.is_active: + # Still run dummy verify for timing consistency + verify_password("dummy", "$2b$12$/MSofyKqE3MkwXyzhigw.OHIefMM.qb5xGt/t9OAwbxgDGnyZjmrG") + raise invalid_credentials_error + + # SRP-enrolled users: redirect to SRP flow + if user.srp_verifier is not None: + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail="srp_required", + headers={"X-Auth-Method": "srp"}, + ) + + # Bcrypt user (auth_version 1) — verify password + if user.hashed_password: + if not verify_password(body.password, user.hashed_password): + raise invalid_credentials_error + + # Correct bcrypt password — issue session + access_token = create_access_token( + user_id=user.id, + tenant_id=user.tenant_id, + role=user.role, + ) + refresh = create_refresh_token(user.id) + + response.set_cookie( + key=ACCESS_TOKEN_COOKIE, + value=access_token, + max_age=ACCESS_TOKEN_MAX_AGE, + httponly=True, + secure=_COOKIE_SECURE, + samesite="lax", + ) + + # Update last_login + await db.execute( + update(User).where(User.id == user.id).values( + last_login=datetime.now(UTC), + ) + ) + await db.commit() + + # Audit log (fire-and-forget) + try: + async with AdminAsyncSessionLocal() as audit_db: + await log_action( + audit_db, + tenant_id=user.tenant_id or uuid.UUID(int=0), + user_id=user.id, + action="login_upgrade" if user.must_upgrade_auth else "login", + resource_type="auth", + details={"email": user.email, **({"upgrade": "bcrypt_to_srp"} if user.must_upgrade_auth else {})}, + ip_address=request.client.host if request.client else None, + ) + await audit_db.commit() + except Exception: + pass + + return TokenResponse( + access_token=access_token, + refresh_token=refresh if not user.must_upgrade_auth else "", + token_type="bearer", + auth_upgrade_required=user.must_upgrade_auth, + ) + + # No valid credentials at all + raise invalid_credentials_error + + +@router.post("/refresh", response_model=TokenResponse, summary="Refresh access token") +@limiter.limit("10/minute") +async def refresh_token( + request: StarletteRequest, + body: RefreshRequest, + response: Response, + db: AsyncSession = Depends(get_admin_db), + redis: aioredis.Redis = Depends(get_redis), +) -> TokenResponse: + """ + Exchange a valid refresh token for a new access token. + Rate limited to 10 requests per minute per IP. + """ + # Validate refresh token + payload = verify_token(body.refresh_token, expected_type="refresh") + + user_id_str = payload.get("sub") + if not user_id_str: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid refresh token", + ) + + try: + user_id = uuid.UUID(user_id_str) + except ValueError: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid refresh token", + ) + + # Check if token was revoked (issued before logout) + issued_at = payload.get("iat", 0) + if await is_token_revoked(redis, user_id_str, float(issued_at)): + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Token has been revoked", + ) + + # Fetch current user state from DB + result = await db.execute(select(User).where(User.id == user_id)) + user = result.scalar_one_or_none() + + if not user or not user.is_active: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="User not found or inactive", + ) + + # Issue new tokens + new_access_token = create_access_token( + user_id=user.id, + tenant_id=user.tenant_id, + role=user.role, + ) + new_refresh_token = create_refresh_token(user_id=user.id) + + # Update cookie + response.set_cookie( + key=ACCESS_TOKEN_COOKIE, + value=new_access_token, + max_age=ACCESS_TOKEN_MAX_AGE, + httponly=True, + secure=_COOKIE_SECURE, + samesite="lax", + ) + + return TokenResponse( + access_token=new_access_token, + refresh_token=new_refresh_token, + token_type="bearer", + ) + + +@router.post("/logout", status_code=status.HTTP_204_NO_CONTENT, summary="Log out and clear session cookie") +@limiter.limit("10/minute") +async def logout( + request: StarletteRequest, + response: Response, + current_user: CurrentUser = Depends(require_authenticated), + redis: aioredis.Redis = Depends(get_redis), +) -> None: + """Clear the httpOnly access token cookie and revoke all refresh tokens.""" + # Revoke all refresh tokens for this user + await revoke_user_tokens(redis, str(current_user.user_id)) + + # Audit log for logout + try: + tenant_id = current_user.tenant_id or uuid.UUID(int=0) + async with AdminAsyncSessionLocal() as audit_db: + await log_action( + audit_db, tenant_id, current_user.user_id, "logout", + resource_type="auth", + ip_address=request.client.host if request.client else None, + ) + await audit_db.commit() + except Exception: + pass # Fire-and-forget: never fail logout + + response.delete_cookie( + key=ACCESS_TOKEN_COOKIE, + httponly=True, + secure=_COOKIE_SECURE, + samesite="lax", + ) + + +@router.post("/change-password", response_model=MessageResponse, summary="Change password for authenticated user") +@limiter.limit("3/minute") +async def change_password( + request: StarletteRequest, + body: ChangePasswordRequest, + current_user: CurrentUser = Depends(require_authenticated), + db: AsyncSession = Depends(get_admin_db), + redis: aioredis.Redis = Depends(get_redis), +) -> MessageResponse: + """Change the current user's password. Revokes all existing sessions.""" + result = await db.execute(select(User).where(User.id == current_user.user_id)) + user = result.scalar_one_or_none() + if not user: + raise HTTPException(status_code=404, detail="User not found") + + # For SRP users (auth_version 2): client must provide new salt, verifier, and key bundle + if user.auth_version == 2: + if not body.new_srp_salt or not body.new_srp_verifier: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="SRP users must provide new salt and verifier", + ) + # Update SRP credentials + user.srp_salt = bytes.fromhex(body.new_srp_salt) + user.srp_verifier = bytes.fromhex(body.new_srp_verifier) + + # Also update bcrypt hash as a login fallback if SRP ever fails + # (e.g., crypto.subtle unavailable on HTTP, stale Secret Key, etc.) + if body.new_password: + user.hashed_password = hash_password(body.new_password) + + # Update re-wrapped key bundle if provided + if body.encrypted_private_key and body.pbkdf2_salt: + existing_ks = await get_user_key_set(db, user.id) + if existing_ks: + existing_ks.encrypted_private_key = base64.b64decode(body.encrypted_private_key) + existing_ks.private_key_nonce = base64.b64decode(body.private_key_nonce or "") + existing_ks.encrypted_vault_key = base64.b64decode(body.encrypted_vault_key or "") + existing_ks.vault_key_nonce = base64.b64decode(body.vault_key_nonce or "") + existing_ks.public_key = base64.b64decode(body.public_key or "") + existing_ks.pbkdf2_salt = base64.b64decode(body.pbkdf2_salt) + existing_ks.hkdf_salt = base64.b64decode(body.hkdf_salt or "") + else: + # Legacy bcrypt user — verify current password + if not user.hashed_password or not verify_password(body.current_password, user.hashed_password): + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Current password is incorrect", + ) + user.hashed_password = hash_password(body.new_password) + + # Revoke all existing sessions + await revoke_user_tokens(redis, str(user.id)) + + await db.commit() + + # Audit log + try: + async with AdminAsyncSessionLocal() as audit_db: + await log_action( + audit_db, + tenant_id=user.tenant_id or uuid.UUID(int=0), + user_id=user.id, + action="password_change", + resource_type="user", + details={"ip": request.client.host if request.client else None}, + ip_address=request.client.host if request.client else None, + ) + await audit_db.commit() + except Exception: + pass + + return MessageResponse(message="Password changed successfully. Please sign in again.") + + +@router.get("/me", response_model=UserMeResponse, summary="Get current user profile") +async def get_me( + current_user: CurrentUser = Depends(require_authenticated), + db: AsyncSession = Depends(get_admin_db), +) -> UserMeResponse: + """Return current user info from JWT payload.""" + # Fetch from DB to get latest data + result = await db.execute(select(User).where(User.id == current_user.user_id)) + user = result.scalar_one_or_none() + + if not user: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="User not found", + ) + + return UserMeResponse( + id=user.id, + email=user.email, + name=user.name, + role=user.role, + tenant_id=user.tenant_id, + auth_version=user.auth_version or 1, + ) + + +# ─── Account Self-Service (Deletion & Export) ───────────────────────────────── + + +@router.delete( + "/delete-my-account", + response_model=DeleteAccountResponse, + summary="Delete your own account and erase all PII", +) +@limiter.limit("1/minute") +async def delete_my_account( + request: StarletteRequest, + body: DeleteAccountRequest, + response: Response, + current_user: CurrentUser = Depends(require_authenticated), + db: AsyncSession = Depends(get_admin_db), +) -> DeleteAccountResponse: + """Permanently delete the authenticated user's account. + + Performs full PII erasure: anonymizes audit logs, scrubs encrypted + details, and hard-deletes the user row (CASCADE handles related + tables). Requires typing 'DELETE' as confirmation. + """ + from sqlalchemy import text as sa_text + + # Validate confirmation + if body.confirmation != "DELETE": + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail="You must type 'DELETE' to confirm account deletion.", + ) + + # Super admin protection: cannot delete last super admin + if current_user.role == "super_admin": + result = await db.execute( + sa_text( + "SELECT COUNT(*) AS cnt FROM users " + "WHERE role = 'super_admin' AND is_active = true " + "AND id != :current_user_id" + ), + {"current_user_id": current_user.user_id}, + ) + other_admins = result.scalar_one() + if other_admins == 0: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Cannot delete the last super admin account. Transfer the role first.", + ) + + # Fetch user email BEFORE deletion (needed for audit hash) + result = await db.execute( + sa_text("SELECT email FROM users WHERE id = :user_id"), + {"user_id": current_user.user_id}, + ) + email_row = result.mappings().first() + if not email_row: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="User not found.", + ) + user_email = email_row["email"] + + # Perform account deletion + await delete_user_account( + db=db, + user_id=current_user.user_id, + tenant_id=current_user.tenant_id, + user_email=user_email, + ) + + # Clear access token cookie (same pattern as logout) + response.delete_cookie( + key=ACCESS_TOKEN_COOKIE, + httponly=True, + secure=_COOKIE_SECURE, + samesite="lax", + ) + + return DeleteAccountResponse( + message="Account deleted successfully. All personal data has been erased.", + deleted=True, + ) + + +@router.get( + "/export-my-data", + summary="Export all your personal data (GDPR Art. 20)", +) +@limiter.limit("3/minute") +async def export_my_data( + request: StarletteRequest, + current_user: CurrentUser = Depends(require_authenticated), + db: AsyncSession = Depends(get_admin_db), +) -> JSONResponse: + """Export all personal data for the authenticated user. + + Returns a JSON file containing user profile, API keys, audit logs, + and key access log entries. Complies with GDPR Article 20 + (Right to Data Portability). + """ + data = await export_user_data( + db=db, + user_id=current_user.user_id, + tenant_id=current_user.tenant_id, + ) + + # Audit log the export action + try: + async with AdminAsyncSessionLocal() as audit_db: + await log_action( + audit_db, + tenant_id=current_user.tenant_id or uuid.UUID(int=0), + user_id=current_user.user_id, + action="data_export", + resource_type="user", + details={"type": "gdpr_art20"}, + ip_address=request.client.host if request.client else None, + ) + await audit_db.commit() + except Exception: + pass # Fire-and-forget: never fail the export + + return JSONResponse( + content=data, + headers={ + "Content-Disposition": 'attachment; filename="my-data-export.json"', + }, + ) + + +# ─── Emergency Kit & SRP Registration ───────────────────────────────────────── + + +@router.get("/emergency-kit-template", summary="Generate Emergency Kit PDF template") +@limiter.limit("3/minute") +async def get_emergency_kit_template( + request: StarletteRequest, + current_user: CurrentUser = Depends(require_authenticated), + db: AsyncSession = Depends(get_admin_db), +) -> StreamingResponse: + """Generate Emergency Kit PDF template (without Secret Key). + + The Secret Key is injected client-side. This endpoint returns + a PDF with a placeholder that the browser fills in before + the user downloads it. + """ + from app.services.emergency_kit_service import generate_emergency_kit_template + + result = await db.execute(select(User).where(User.id == current_user.user_id)) + user = result.scalar_one_or_none() + if not user: + raise HTTPException(status_code=404, detail="User not found") + + pdf_bytes = await generate_emergency_kit_template(email=user.email) + + return StreamingResponse( + io.BytesIO(pdf_bytes), + media_type="application/pdf", + headers={ + "Content-Disposition": 'attachment; filename="MikroTik-Portal-Emergency-Kit.pdf"', + }, + ) + + +@router.post("/register-srp", response_model=MessageResponse, summary="Register SRP credentials for a user") +@limiter.limit("3/minute") +async def register_srp( + request: StarletteRequest, + body: SRPRegisterRequest, + current_user: CurrentUser = Depends(require_authenticated), + db: AsyncSession = Depends(get_admin_db), +) -> MessageResponse: + """Store SRP verifier and encrypted key set for the current user. + + Called after client-side key generation during initial setup + or when upgrading from bcrypt to SRP. + """ + result = await db.execute(select(User).where(User.id == current_user.user_id)) + user = result.scalar_one_or_none() + if not user: + raise HTTPException(status_code=404, detail="User not found") + + if user.srp_verifier is not None: + raise HTTPException(status_code=409, detail="SRP already registered") + + # Update user with SRP credentials and clear upgrade flag + await db.execute( + update(User).where(User.id == user.id).values( + srp_salt=bytes.fromhex(body.srp_salt), + srp_verifier=bytes.fromhex(body.srp_verifier), + auth_version=2, + must_upgrade_auth=False, + ) + ) + + # Store encrypted key set + await store_user_key_set( + db=db, + user_id=user.id, + tenant_id=user.tenant_id, + encrypted_private_key=base64.b64decode(body.encrypted_private_key), + private_key_nonce=base64.b64decode(body.private_key_nonce), + encrypted_vault_key=base64.b64decode(body.encrypted_vault_key), + vault_key_nonce=base64.b64decode(body.vault_key_nonce), + public_key=base64.b64decode(body.public_key), + pbkdf2_salt=base64.b64decode(body.pbkdf2_salt), + hkdf_salt=base64.b64decode(body.hkdf_salt), + ) + + await db.commit() + + # Audit log + try: + async with AdminAsyncSessionLocal() as audit_db: + await log_key_access( + audit_db, user.tenant_id or uuid.UUID(int=0), user.id, + "create_key_set", resource_type="user_key_set", + ip_address=request.client.host if request.client else None, + ) + await audit_db.commit() + except Exception: + pass + + return MessageResponse(message="SRP credentials registered successfully") + + +# ─── SSE Exchange Tokens ───────────────────────────────────────────────────── + + +@router.post("/sse-token", summary="Issue a short-lived SSE exchange token") +async def create_sse_token( + current_user: CurrentUser = Depends(require_authenticated), + redis: aioredis.Redis = Depends(get_redis), +) -> dict: + """Issue a 30-second, single-use token for SSE connections. + + Replaces sending the full JWT in the SSE URL query parameter. + The returned token is stored in Redis with user context and a 30s TTL. + The SSE endpoint retrieves and deletes it on first use (single-use). + """ + token = secrets.token_urlsafe(32) + key = f"sse_token:{token}" + # Store user context for the SSE endpoint to retrieve + await redis.set(key, json.dumps({ + "user_id": str(current_user.user_id), + "tenant_id": str(current_user.tenant_id) if current_user.tenant_id else None, + "role": current_user.role, + }), ex=30) # 30 second TTL + return {"token": token} + + +# ─── Password Reset ────────────────────────────────────────────────────────── + + +def _hash_token(token: str) -> str: + """SHA-256 hash a reset token so plaintext is never stored.""" + return hashlib.sha256(token.encode()).hexdigest() + + +async def _send_reset_email(email: str, token: str) -> None: + """Send password reset email via unified email service.""" + from app.routers.settings import get_smtp_config + from app.services.email_service import send_email + + reset_url = f"{settings.APP_BASE_URL}/reset-password?token={token}" + expire_mins = settings.PASSWORD_RESET_TOKEN_EXPIRE_MINUTES + + plain = ( + f"You requested a password reset for The Other Dude.\n\n" + f"Click the link below to reset your password (valid for {expire_mins} minutes):\n\n" + f"{reset_url}\n\n" + f"If you did not request this, you can safely ignore this email." + ) + + html = f""" +
+
+

Password Reset

+
+
+

You requested a password reset for The Other Dude.

+

Click the button below to reset your password. This link is valid for {expire_mins} minutes.

+ +

+ If you did not request this, you can safely ignore this email. +

+

+ TOD — Fleet Management for MikroTik RouterOS +

+
+
+ """ + + smtp_config = await get_smtp_config() + await send_email(email, "TOD — Password Reset", html, plain, smtp_config) + + +@router.post( + "/forgot-password", + response_model=MessageResponse, + summary="Request password reset email", +) +@limiter.limit("3/minute") +async def forgot_password( + request: StarletteRequest, + body: ForgotPasswordRequest, + db: AsyncSession = Depends(get_admin_db), +) -> MessageResponse: + """Send a password reset link if the email exists. + + Always returns success to prevent user enumeration. + Rate limited to 3 requests per minute per IP. + """ + generic_msg = "If an account with that email exists, a reset link has been sent." + + result = await db.execute( + select(User).where(User.email == body.email.lower()) + ) + user = result.scalar_one_or_none() + + if not user or not user.is_active: + return MessageResponse(message=generic_msg) + + # Generate a secure token + raw_token = secrets.token_urlsafe(32) + token_hash = _hash_token(raw_token) + expires_at = datetime.now(UTC) + timedelta( + minutes=settings.PASSWORD_RESET_TOKEN_EXPIRE_MINUTES + ) + + # Insert token record (using raw SQL to avoid importing the model globally) + from sqlalchemy import text + + await db.execute( + text( + "INSERT INTO password_reset_tokens (user_id, token_hash, expires_at) " + "VALUES (:user_id, :token_hash, :expires_at)" + ), + {"user_id": user.id, "token_hash": token_hash, "expires_at": expires_at}, + ) + await db.commit() + + # Send email (best-effort) + try: + await _send_reset_email(user.email, raw_token) + except Exception as e: + logger.warning("Failed to send password reset email to %s: %s", user.email, e) + + return MessageResponse(message=generic_msg) + + +@router.post( + "/reset-password", + response_model=MessageResponse, + summary="Reset password with token", +) +@limiter.limit("5/minute") +async def reset_password( + request: StarletteRequest, + body: ResetPasswordRequest, + db: AsyncSession = Depends(get_admin_db), +) -> MessageResponse: + """Validate the reset token and update the user's password. + + Rate limited to 5 requests per minute per IP. + """ + from sqlalchemy import text + + token_hash = _hash_token(body.token) + + # Find the token record + result = await db.execute( + text( + "SELECT id, user_id, expires_at, used_at " + "FROM password_reset_tokens " + "WHERE token_hash = :token_hash" + ), + {"token_hash": token_hash}, + ) + row = result.mappings().first() + + if not row: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Invalid or expired reset token.", + ) + + if row["used_at"] is not None: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="This reset link has already been used.", + ) + + if row["expires_at"] < datetime.now(UTC): + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Invalid or expired reset token.", + ) + + # Validate password strength (minimum 8 characters) + if len(body.new_password) < 8: + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail="Password must be at least 8 characters.", + ) + + # Update the user's password and clear SRP credentials for re-registration. + # The bcrypt hash is kept as a temporary credential for the upgrade flow: + # user logs in with bcrypt -> gets temp session -> registers SRP -> done. + new_hash = hash_password(body.new_password) + await db.execute( + text( + "UPDATE users SET hashed_password = :pw, auth_version = 1, " + "must_upgrade_auth = true, srp_salt = NULL, srp_verifier = NULL, " + "updated_at = now() WHERE id = :uid" + ), + {"pw": new_hash, "uid": row["user_id"]}, + ) + + # Mark token as used + await db.execute( + text("UPDATE password_reset_tokens SET used_at = now() WHERE id = :tid"), + {"tid": row["id"]}, + ) + + await db.commit() + + # Audit log + try: + async with AdminAsyncSessionLocal() as audit_db: + await log_action( + audit_db, + tenant_id=uuid.UUID(int=0), + user_id=row["user_id"], + action="password_reset", + resource_type="auth", + ip_address=request.client.host if request.client else None, + ) + await audit_db.commit() + except Exception: + pass + + return MessageResponse(message="Password has been reset successfully.") diff --git a/backend/app/routers/certificates.py b/backend/app/routers/certificates.py new file mode 100644 index 0000000..effe93f --- /dev/null +++ b/backend/app/routers/certificates.py @@ -0,0 +1,763 @@ +"""Certificate Authority management API endpoints. + +Provides the full certificate lifecycle for tenant CAs: +- CA initialization and info retrieval +- Per-device certificate signing +- Certificate deployment via NATS to Go poller (SFTP + RouterOS import) +- Bulk deployment across multiple devices +- Certificate rotation and revocation + +RLS enforced via get_db() (app_user engine with tenant context). +RBAC: viewer = read-only (GET); tenant_admin and above = mutating actions. +""" + +import json +import logging +import uuid +from datetime import datetime, timezone + +import nats +import nats.aio.client +import nats.errors +import structlog +from fastapi import APIRouter, Depends, HTTPException, Query, Request, status +from fastapi.responses import PlainTextResponse +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.config import settings +from app.database import get_db, set_tenant_context +from app.middleware.rate_limit import limiter +from app.middleware.rbac import require_min_role +from app.middleware.tenant_context import CurrentUser, get_current_user +from app.models.certificate import CertificateAuthority, DeviceCertificate +from app.models.device import Device +from app.schemas.certificate import ( + BulkCertDeployRequest, + CACreateRequest, + CAResponse, + CertDeployResponse, + CertSignRequest, + DeviceCertResponse, +) +from app.services.audit_service import log_action +from app.services.ca_service import ( + generate_ca, + get_ca_for_tenant, + get_cert_for_deploy, + get_device_certs, + sign_device_cert, + update_cert_status, +) + +logger = structlog.get_logger(__name__) + +router = APIRouter(tags=["certificates"]) + +# Module-level NATS connection for cert deployment (lazy initialized) +_nc: nats.aio.client.Client | None = None + + +async def _get_nats() -> nats.aio.client.Client: + """Get or create a NATS connection for certificate deployment requests.""" + global _nc + if _nc is None or _nc.is_closed: + _nc = await nats.connect(settings.NATS_URL) + logger.info("Certificate NATS connection established") + return _nc + + +async def _deploy_cert_via_nats( + device_id: str, + cert_pem: str, + key_pem: str, + cert_name: str, + ssh_port: int = 22, +) -> dict: + """Send a certificate deployment request to the Go poller via NATS. + + Args: + device_id: Target device UUID string. + cert_pem: PEM-encoded device certificate. + key_pem: PEM-encoded device private key (decrypted). + cert_name: Name for the cert on the device (e.g., "portal-device-cert"). + ssh_port: SSH port for SFTP upload (default 22). + + Returns: + Dict with success, cert_name_on_device, and error fields. + """ + nc = await _get_nats() + payload = json.dumps({ + "device_id": device_id, + "cert_pem": cert_pem, + "key_pem": key_pem, + "cert_name": cert_name, + "ssh_port": ssh_port, + }).encode() + + try: + reply = await nc.request( + f"cert.deploy.{device_id}", + payload, + timeout=60.0, + ) + return json.loads(reply.data) + except nats.errors.TimeoutError: + return { + "success": False, + "error": "Certificate deployment timed out -- device may be offline or unreachable", + } + except Exception as exc: + logger.error("NATS cert deploy request failed", device_id=device_id, error=str(exc)) + return {"success": False, "error": str(exc)} + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +async def _get_device_for_tenant( + db: AsyncSession, device_id: uuid.UUID, current_user: CurrentUser +) -> Device: + """Fetch a device and verify tenant ownership.""" + result = await db.execute( + select(Device).where(Device.id == device_id) + ) + device = result.scalar_one_or_none() + if device is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Device {device_id} not found", + ) + return device + + +async def _get_tenant_id( + current_user: CurrentUser, + db: AsyncSession, + tenant_id_override: uuid.UUID | None = None, +) -> uuid.UUID: + """Extract tenant_id from the current user, handling super_admin. + + Super admins must provide tenant_id_override (from query param). + Regular users use their own tenant_id. + """ + if current_user.is_super_admin: + if tenant_id_override is None: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Super admin must provide tenant_id query parameter.", + ) + # Set RLS context for the selected tenant + await set_tenant_context(db, str(tenant_id_override)) + return tenant_id_override + if current_user.tenant_id is None: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="No tenant context available.", + ) + return current_user.tenant_id + + +async def _get_cert_with_tenant_check( + db: AsyncSession, cert_id: uuid.UUID, tenant_id: uuid.UUID +) -> DeviceCertificate: + """Fetch a device certificate and verify tenant ownership.""" + result = await db.execute( + select(DeviceCertificate).where(DeviceCertificate.id == cert_id) + ) + cert = result.scalar_one_or_none() + if cert is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Certificate {cert_id} not found", + ) + # RLS should enforce this, but double-check + if cert.tenant_id != tenant_id: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Certificate {cert_id} not found", + ) + return cert + + +# --------------------------------------------------------------------------- +# Endpoints +# --------------------------------------------------------------------------- + + +@router.post( + "/ca", + response_model=CAResponse, + status_code=status.HTTP_201_CREATED, + summary="Initialize a Certificate Authority for the tenant", +) +@limiter.limit("5/minute") +async def create_ca( + request: Request, + body: CACreateRequest, + tenant_id: uuid.UUID | None = Query(None, description="Tenant ID (required for super_admin)"), + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("tenant_admin")), + db: AsyncSession = Depends(get_db), +) -> CAResponse: + """Generate a self-signed root CA for the tenant. + + Each tenant may have at most one CA. Returns 409 if a CA already exists. + """ + tenant_id = await _get_tenant_id(current_user, db, tenant_id) + + # Check if CA already exists + existing = await get_ca_for_tenant(db, tenant_id) + if existing is not None: + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail="Tenant already has a Certificate Authority. Delete it before creating a new one.", + ) + + ca = await generate_ca( + db, + tenant_id, + body.common_name, + body.validity_years, + settings.get_encryption_key_bytes(), + ) + + try: + await log_action( + db, tenant_id, current_user.user_id, "ca_create", + resource_type="certificate_authority", resource_id=str(ca.id), + details={"common_name": body.common_name, "validity_years": body.validity_years}, + ) + except Exception: + pass + + logger.info("CA created", tenant_id=str(tenant_id), ca_id=str(ca.id)) + return CAResponse.model_validate(ca) + + +@router.get( + "/ca", + response_model=CAResponse, + summary="Get tenant CA information", +) +async def get_ca( + tenant_id: uuid.UUID | None = Query(None, description="Tenant ID (required for super_admin)"), + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("viewer")), + db: AsyncSession = Depends(get_db), +) -> CAResponse: + """Return the tenant's CA public information (no private key).""" + tenant_id = await _get_tenant_id(current_user, db, tenant_id) + ca = await get_ca_for_tenant(db, tenant_id) + if ca is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="No Certificate Authority configured for this tenant.", + ) + return CAResponse.model_validate(ca) + + +@router.get( + "/ca/pem", + response_class=PlainTextResponse, + summary="Download the CA public certificate (PEM)", +) +async def get_ca_pem( + tenant_id: uuid.UUID | None = Query(None, description="Tenant ID (required for super_admin)"), + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("viewer")), + db: AsyncSession = Depends(get_db), +) -> PlainTextResponse: + """Return the CA's public certificate in PEM format. + + Users can import this into their trust store to validate device connections. + """ + tenant_id = await _get_tenant_id(current_user, db, tenant_id) + ca = await get_ca_for_tenant(db, tenant_id) + if ca is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="No Certificate Authority configured for this tenant.", + ) + return PlainTextResponse( + content=ca.cert_pem, + media_type="application/x-pem-file", + headers={"Content-Disposition": "attachment; filename=portal-ca.pem"}, + ) + + +@router.post( + "/sign", + response_model=DeviceCertResponse, + status_code=status.HTTP_201_CREATED, + summary="Sign a certificate for a device", +) +@limiter.limit("20/minute") +async def sign_cert( + request: Request, + body: CertSignRequest, + tenant_id: uuid.UUID | None = Query(None, description="Tenant ID (required for super_admin)"), + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("tenant_admin")), + db: AsyncSession = Depends(get_db), +) -> DeviceCertResponse: + """Sign a per-device TLS certificate using the tenant's CA. + + The device must belong to the tenant. The cert uses CN=hostname, SAN=IP+DNS. + """ + tenant_id = await _get_tenant_id(current_user, db, tenant_id) + + # Verify device belongs to tenant (RLS enforces, but also get device data) + device = await _get_device_for_tenant(db, body.device_id, current_user) + + # Get tenant CA + ca = await get_ca_for_tenant(db, tenant_id) + if ca is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="No Certificate Authority configured. Initialize a CA first.", + ) + + cert = await sign_device_cert( + db, + ca, + body.device_id, + device.hostname, + device.ip_address, + body.validity_days, + settings.get_encryption_key_bytes(), + ) + + try: + await log_action( + db, tenant_id, current_user.user_id, "cert_sign", + resource_type="device_certificate", resource_id=str(cert.id), + device_id=body.device_id, + details={"hostname": device.hostname, "validity_days": body.validity_days}, + ) + except Exception: + pass + + logger.info("Device cert signed", device_id=str(body.device_id), cert_id=str(cert.id)) + return DeviceCertResponse.model_validate(cert) + + +@router.post( + "/{cert_id}/deploy", + response_model=CertDeployResponse, + summary="Deploy a signed certificate to a device", +) +@limiter.limit("20/minute") +async def deploy_cert( + request: Request, + cert_id: uuid.UUID, + tenant_id: uuid.UUID | None = Query(None, description="Tenant ID (required for super_admin)"), + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("tenant_admin")), + db: AsyncSession = Depends(get_db), +) -> CertDeployResponse: + """Deploy a signed certificate to a device via NATS/SFTP. + + The Go poller receives the cert, uploads it via SFTP, imports it, + and assigns it to the api-ssl service on the RouterOS device. + """ + tenant_id = await _get_tenant_id(current_user, db, tenant_id) + cert = await _get_cert_with_tenant_check(db, cert_id, tenant_id) + + # Update status to deploying + try: + await update_cert_status(db, cert_id, "deploying") + except ValueError as e: + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail=str(e), + ) + + # Get decrypted cert data for deployment + try: + cert_pem, key_pem, _ca_cert_pem = await get_cert_for_deploy( + db, cert_id, settings.get_encryption_key_bytes() + ) + except ValueError as e: + # Rollback status + await update_cert_status(db, cert_id, "issued") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Failed to prepare cert for deployment: {e}", + ) + + # Flush DB changes before NATS call so deploying status is persisted + await db.flush() + + # Send deployment command via NATS + result = await _deploy_cert_via_nats( + device_id=str(cert.device_id), + cert_pem=cert_pem, + key_pem=key_pem, + cert_name="portal-device-cert", + ) + + if result.get("success"): + # Update cert status to deployed + await update_cert_status(db, cert_id, "deployed") + + # Update device tls_mode to portal_ca + device_result = await db.execute( + select(Device).where(Device.id == cert.device_id) + ) + device = device_result.scalar_one_or_none() + if device is not None: + device.tls_mode = "portal_ca" + + try: + await log_action( + db, tenant_id, current_user.user_id, "cert_deploy", + resource_type="device_certificate", resource_id=str(cert_id), + device_id=cert.device_id, + details={"cert_name_on_device": result.get("cert_name_on_device")}, + ) + except Exception: + pass + + logger.info( + "Certificate deployed successfully", + cert_id=str(cert_id), + device_id=str(cert.device_id), + cert_name_on_device=result.get("cert_name_on_device"), + ) + + return CertDeployResponse( + success=True, + device_id=cert.device_id, + cert_name_on_device=result.get("cert_name_on_device"), + ) + else: + # Rollback status to issued + await update_cert_status(db, cert_id, "issued") + + logger.warning( + "Certificate deployment failed", + cert_id=str(cert_id), + device_id=str(cert.device_id), + error=result.get("error"), + ) + + return CertDeployResponse( + success=False, + device_id=cert.device_id, + error=result.get("error"), + ) + + +@router.post( + "/deploy/bulk", + response_model=list[CertDeployResponse], + summary="Bulk deploy certificates to multiple devices", +) +@limiter.limit("5/minute") +async def bulk_deploy( + request: Request, + body: BulkCertDeployRequest, + tenant_id: uuid.UUID | None = Query(None, description="Tenant ID (required for super_admin)"), + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("tenant_admin")), + db: AsyncSession = Depends(get_db), +) -> list[CertDeployResponse]: + """Deploy certificates to multiple devices sequentially. + + For each device: signs a cert if none exists (status=issued), then deploys. + Sequential deployment per project patterns (no concurrent NATS calls). + """ + tenant_id = await _get_tenant_id(current_user, db, tenant_id) + + # Get tenant CA + ca = await get_ca_for_tenant(db, tenant_id) + if ca is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="No Certificate Authority configured. Initialize a CA first.", + ) + + results: list[CertDeployResponse] = [] + + for device_id in body.device_ids: + try: + # Get device info + device = await _get_device_for_tenant(db, device_id, current_user) + + # Check if device already has an issued cert + existing_certs = await get_device_certs(db, tenant_id, device_id) + issued_cert = None + for c in existing_certs: + if c.status == "issued": + issued_cert = c + break + + # Sign a new cert if none exists in issued state + if issued_cert is None: + issued_cert = await sign_device_cert( + db, + ca, + device_id, + device.hostname, + device.ip_address, + 730, # Default 2 years + settings.get_encryption_key_bytes(), + ) + await db.flush() + + # Deploy the cert + await update_cert_status(db, issued_cert.id, "deploying") + + cert_pem, key_pem, _ca_cert_pem = await get_cert_for_deploy( + db, issued_cert.id, settings.get_encryption_key_bytes() + ) + + await db.flush() + + result = await _deploy_cert_via_nats( + device_id=str(device_id), + cert_pem=cert_pem, + key_pem=key_pem, + cert_name="portal-device-cert", + ) + + if result.get("success"): + await update_cert_status(db, issued_cert.id, "deployed") + device.tls_mode = "portal_ca" + + results.append(CertDeployResponse( + success=True, + device_id=device_id, + cert_name_on_device=result.get("cert_name_on_device"), + )) + else: + await update_cert_status(db, issued_cert.id, "issued") + results.append(CertDeployResponse( + success=False, + device_id=device_id, + error=result.get("error"), + )) + + except HTTPException as e: + results.append(CertDeployResponse( + success=False, + device_id=device_id, + error=e.detail, + )) + except Exception as e: + logger.error("Bulk deploy error", device_id=str(device_id), error=str(e)) + results.append(CertDeployResponse( + success=False, + device_id=device_id, + error=str(e), + )) + + try: + await log_action( + db, tenant_id, current_user.user_id, "cert_bulk_deploy", + resource_type="device_certificate", + details={ + "device_count": len(body.device_ids), + "successful": sum(1 for r in results if r.success), + "failed": sum(1 for r in results if not r.success), + }, + ) + except Exception: + pass + + return results + + +@router.get( + "/devices", + response_model=list[DeviceCertResponse], + summary="List device certificates", +) +async def list_device_certs( + device_id: uuid.UUID | None = Query(None, description="Filter by device ID"), + tenant_id: uuid.UUID | None = Query(None, description="Tenant ID (required for super_admin)"), + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("viewer")), + db: AsyncSession = Depends(get_db), +) -> list[DeviceCertResponse]: + """List device certificates for the tenant. + + Optionally filter by device_id. Excludes superseded certs. + """ + tenant_id = await _get_tenant_id(current_user, db, tenant_id) + certs = await get_device_certs(db, tenant_id, device_id) + return [DeviceCertResponse.model_validate(c) for c in certs] + + +@router.post( + "/{cert_id}/revoke", + response_model=DeviceCertResponse, + summary="Revoke a device certificate", +) +@limiter.limit("5/minute") +async def revoke_cert( + request: Request, + cert_id: uuid.UUID, + tenant_id: uuid.UUID | None = Query(None, description="Tenant ID (required for super_admin)"), + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("tenant_admin")), + db: AsyncSession = Depends(get_db), +) -> DeviceCertResponse: + """Revoke a device certificate and reset the device TLS mode to insecure.""" + tenant_id = await _get_tenant_id(current_user, db, tenant_id) + cert = await _get_cert_with_tenant_check(db, cert_id, tenant_id) + + try: + updated_cert = await update_cert_status(db, cert_id, "revoked") + except ValueError as e: + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail=str(e), + ) + + # Reset device tls_mode to insecure + device_result = await db.execute( + select(Device).where(Device.id == cert.device_id) + ) + device = device_result.scalar_one_or_none() + if device is not None: + device.tls_mode = "insecure" + + try: + await log_action( + db, tenant_id, current_user.user_id, "cert_revoke", + resource_type="device_certificate", resource_id=str(cert_id), + device_id=cert.device_id, + ) + except Exception: + pass + + logger.info("Certificate revoked", cert_id=str(cert_id), device_id=str(cert.device_id)) + return DeviceCertResponse.model_validate(updated_cert) + + +@router.post( + "/{cert_id}/rotate", + response_model=CertDeployResponse, + summary="Rotate a device certificate", +) +@limiter.limit("5/minute") +async def rotate_cert( + request: Request, + cert_id: uuid.UUID, + tenant_id: uuid.UUID | None = Query(None, description="Tenant ID (required for super_admin)"), + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("tenant_admin")), + db: AsyncSession = Depends(get_db), +) -> CertDeployResponse: + """Rotate a device certificate: supersede the old cert, sign a new one, and deploy it. + + This is equivalent to: mark old cert as superseded, sign new cert, deploy new cert. + """ + tenant_id = await _get_tenant_id(current_user, db, tenant_id) + old_cert = await _get_cert_with_tenant_check(db, cert_id, tenant_id) + + # Get the device for hostname/IP + device_result = await db.execute( + select(Device).where(Device.id == old_cert.device_id) + ) + device = device_result.scalar_one_or_none() + if device is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Device {old_cert.device_id} not found", + ) + + # Get tenant CA + ca = await get_ca_for_tenant(db, tenant_id) + if ca is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="No Certificate Authority configured.", + ) + + # Mark old cert as superseded + try: + await update_cert_status(db, cert_id, "superseded") + except ValueError as e: + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail=str(e), + ) + + # Sign new cert + new_cert = await sign_device_cert( + db, + ca, + old_cert.device_id, + device.hostname, + device.ip_address, + 730, # Default 2 years + settings.get_encryption_key_bytes(), + ) + await db.flush() + + # Deploy new cert + await update_cert_status(db, new_cert.id, "deploying") + + cert_pem, key_pem, _ca_cert_pem = await get_cert_for_deploy( + db, new_cert.id, settings.get_encryption_key_bytes() + ) + + await db.flush() + + result = await _deploy_cert_via_nats( + device_id=str(old_cert.device_id), + cert_pem=cert_pem, + key_pem=key_pem, + cert_name="portal-device-cert", + ) + + if result.get("success"): + await update_cert_status(db, new_cert.id, "deployed") + device.tls_mode = "portal_ca" + + try: + await log_action( + db, tenant_id, current_user.user_id, "cert_rotate", + resource_type="device_certificate", resource_id=str(new_cert.id), + device_id=old_cert.device_id, + details={ + "old_cert_id": str(cert_id), + "cert_name_on_device": result.get("cert_name_on_device"), + }, + ) + except Exception: + pass + + logger.info( + "Certificate rotated successfully", + old_cert_id=str(cert_id), + new_cert_id=str(new_cert.id), + device_id=str(old_cert.device_id), + ) + + return CertDeployResponse( + success=True, + device_id=old_cert.device_id, + cert_name_on_device=result.get("cert_name_on_device"), + ) + else: + # Rollback: mark new cert as issued (deploy failed) + await update_cert_status(db, new_cert.id, "issued") + + logger.warning( + "Certificate rotation deploy failed", + new_cert_id=str(new_cert.id), + device_id=str(old_cert.device_id), + error=result.get("error"), + ) + + return CertDeployResponse( + success=False, + device_id=old_cert.device_id, + error=result.get("error"), + ) diff --git a/backend/app/routers/clients.py b/backend/app/routers/clients.py new file mode 100644 index 0000000..c66f096 --- /dev/null +++ b/backend/app/routers/clients.py @@ -0,0 +1,297 @@ +""" +Client device discovery API endpoint. + +Fetches ARP, DHCP lease, and wireless registration data from a RouterOS device +via the NATS command proxy, merges by MAC address, and returns a unified client list. + +All routes are tenant-scoped under: + /api/tenants/{tenant_id}/devices/{device_id}/clients + +RLS is enforced via get_db() (app_user engine with tenant context). +RBAC: viewer and above (read-only operation). +""" + +import asyncio +import uuid +from datetime import datetime, timezone +from typing import Any + +import structlog +from fastapi import APIRouter, Depends, HTTPException, status +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.database import get_db +from app.middleware.rbac import require_min_role +from app.middleware.tenant_context import CurrentUser, get_current_user +from app.models.device import Device +from app.services import routeros_proxy + +logger = structlog.get_logger(__name__) + +router = APIRouter(tags=["clients"]) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +async def _check_tenant_access( + current_user: CurrentUser, tenant_id: uuid.UUID, db: AsyncSession +) -> None: + """Verify the current user is allowed to access the given tenant.""" + if current_user.is_super_admin: + from app.database import set_tenant_context + await set_tenant_context(db, str(tenant_id)) + return + if current_user.tenant_id != tenant_id: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Access denied: you do not belong to this tenant.", + ) + + +async def _check_device_online( + db: AsyncSession, device_id: uuid.UUID +) -> Device: + """Verify the device exists and is online. Returns the Device object.""" + result = await db.execute( + select(Device).where(Device.id == device_id) # type: ignore[arg-type] + ) + device = result.scalar_one_or_none() + if device is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Device {device_id} not found", + ) + if device.status != "online": + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail="Device is offline -- client discovery requires a live connection.", + ) + return device + + +# --------------------------------------------------------------------------- +# MAC-address merge logic +# --------------------------------------------------------------------------- + + +def _normalize_mac(mac: str) -> str: + """Normalize a MAC address to uppercase colon-separated format.""" + return mac.strip().upper().replace("-", ":") + + +def _merge_client_data( + arp_data: list[dict[str, Any]], + dhcp_data: list[dict[str, Any]], + wireless_data: list[dict[str, Any]], +) -> list[dict[str, Any]]: + """Merge ARP, DHCP lease, and wireless registration data by MAC address. + + ARP entries are the base. DHCP enriches with hostname. Wireless enriches + with signal/tx/rx/uptime and marks the client as wireless. + """ + # Index DHCP leases by MAC + dhcp_by_mac: dict[str, dict[str, Any]] = {} + for lease in dhcp_data: + mac_raw = lease.get("mac-address") or lease.get("active-mac-address", "") + if mac_raw: + dhcp_by_mac[_normalize_mac(mac_raw)] = lease + + # Index wireless registrations by MAC + wireless_by_mac: dict[str, dict[str, Any]] = {} + for reg in wireless_data: + mac_raw = reg.get("mac-address", "") + if mac_raw: + wireless_by_mac[_normalize_mac(mac_raw)] = reg + + # Track which MACs we've already processed (from ARP) + seen_macs: set[str] = set() + clients: list[dict[str, Any]] = [] + + # Start with ARP entries as base + for entry in arp_data: + mac_raw = entry.get("mac-address", "") + if not mac_raw: + continue + mac = _normalize_mac(mac_raw) + if mac in seen_macs: + continue + seen_macs.add(mac) + + # Determine status: ARP complete flag or dynamic flag + is_complete = entry.get("complete", "true").lower() == "true" + arp_status = "reachable" if is_complete else "stale" + + client: dict[str, Any] = { + "mac": mac, + "ip": entry.get("address", ""), + "interface": entry.get("interface", ""), + "hostname": None, + "status": arp_status, + "signal_strength": None, + "tx_rate": None, + "rx_rate": None, + "uptime": None, + "is_wireless": False, + } + + # Enrich with DHCP data + dhcp = dhcp_by_mac.get(mac) + if dhcp: + client["hostname"] = dhcp.get("host-name") or None + dhcp_status = dhcp.get("status", "") + if dhcp_status: + client["dhcp_status"] = dhcp_status + + # Enrich with wireless data + wireless = wireless_by_mac.get(mac) + if wireless: + client["is_wireless"] = True + client["signal_strength"] = wireless.get("signal-strength") or None + client["tx_rate"] = wireless.get("tx-rate") or None + client["rx_rate"] = wireless.get("rx-rate") or None + client["uptime"] = wireless.get("uptime") or None + + clients.append(client) + + # Also include DHCP-only entries (no ARP match -- e.g. expired leases) + for mac, lease in dhcp_by_mac.items(): + if mac in seen_macs: + continue + seen_macs.add(mac) + + client = { + "mac": mac, + "ip": lease.get("active-address") or lease.get("address", ""), + "interface": lease.get("active-server") or "", + "hostname": lease.get("host-name") or None, + "status": "stale", # No ARP entry = not actively reachable + "signal_strength": None, + "tx_rate": None, + "rx_rate": None, + "uptime": None, + "is_wireless": mac in wireless_by_mac, + } + + wireless = wireless_by_mac.get(mac) + if wireless: + client["signal_strength"] = wireless.get("signal-strength") or None + client["tx_rate"] = wireless.get("tx-rate") or None + client["rx_rate"] = wireless.get("rx-rate") or None + client["uptime"] = wireless.get("uptime") or None + + clients.append(client) + + return clients + + +# --------------------------------------------------------------------------- +# Endpoint +# --------------------------------------------------------------------------- + + +@router.get( + "/tenants/{tenant_id}/devices/{device_id}/clients", + summary="List connected client devices (ARP + DHCP + wireless)", +) +async def list_clients( + tenant_id: uuid.UUID, + device_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("viewer")), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + """Discover all client devices connected to a MikroTik device. + + Fetches ARP table, DHCP server leases, and wireless registration table + in parallel, then merges by MAC address into a unified client list. + + Wireless fetch failure is non-fatal (device may not have wireless interfaces). + DHCP fetch failure is non-fatal (device may not run a DHCP server). + ARP fetch failure is fatal (core data source). + """ + await _check_tenant_access(current_user, tenant_id, db) + await _check_device_online(db, device_id) + + device_id_str = str(device_id) + + # Fetch all three sources in parallel + arp_result, dhcp_result, wireless_result = await asyncio.gather( + routeros_proxy.execute_command(device_id_str, "/ip/arp/print"), + routeros_proxy.execute_command(device_id_str, "/ip/dhcp-server/lease/print"), + routeros_proxy.execute_command( + device_id_str, "/interface/wireless/registration-table/print" + ), + return_exceptions=True, + ) + + # ARP is required -- if it failed, return 502 + if isinstance(arp_result, Exception): + logger.error("ARP fetch exception", device_id=device_id_str, error=str(arp_result)) + raise HTTPException( + status_code=status.HTTP_502_BAD_GATEWAY, + detail=f"Failed to fetch ARP table: {arp_result}", + ) + if not arp_result.get("success"): + raise HTTPException( + status_code=status.HTTP_502_BAD_GATEWAY, + detail=arp_result.get("error", "Failed to fetch ARP table"), + ) + + arp_data: list[dict[str, Any]] = arp_result.get("data", []) + + # DHCP is optional -- log warning and continue with empty data + dhcp_data: list[dict[str, Any]] = [] + if isinstance(dhcp_result, Exception): + logger.warning( + "DHCP fetch exception (continuing without DHCP data)", + device_id=device_id_str, + error=str(dhcp_result), + ) + elif not dhcp_result.get("success"): + logger.warning( + "DHCP fetch failed (continuing without DHCP data)", + device_id=device_id_str, + error=dhcp_result.get("error"), + ) + else: + dhcp_data = dhcp_result.get("data", []) + + # Wireless is optional -- many devices have no wireless interfaces + wireless_data: list[dict[str, Any]] = [] + if isinstance(wireless_result, Exception): + logger.warning( + "Wireless fetch exception (device may not have wireless interfaces)", + device_id=device_id_str, + error=str(wireless_result), + ) + elif not wireless_result.get("success"): + logger.warning( + "Wireless fetch failed (device may not have wireless interfaces)", + device_id=device_id_str, + error=wireless_result.get("error"), + ) + else: + wireless_data = wireless_result.get("data", []) + + # Merge by MAC address + clients = _merge_client_data(arp_data, dhcp_data, wireless_data) + + logger.info( + "client_discovery_complete", + device_id=device_id_str, + tenant_id=str(tenant_id), + arp_count=len(arp_data), + dhcp_count=len(dhcp_data), + wireless_count=len(wireless_data), + merged_count=len(clients), + ) + + return { + "clients": clients, + "device_id": device_id_str, + "timestamp": datetime.now(timezone.utc).isoformat(), + } diff --git a/backend/app/routers/config_backups.py b/backend/app/routers/config_backups.py new file mode 100644 index 0000000..c13e963 --- /dev/null +++ b/backend/app/routers/config_backups.py @@ -0,0 +1,745 @@ +""" +Config backup API endpoints. + +All routes are tenant-scoped under: + /api/tenants/{tenant_id}/devices/{device_id}/config/ + +Provides: + - GET /backups — list backup timeline + - POST /backups — trigger manual backup + - POST /checkpoint — create a checkpoint (restore point) + - GET /backups/{sha}/export — retrieve export.rsc text + - GET /backups/{sha}/binary — download backup.bin + - POST /preview-restore — preview impact analysis before restore + - POST /restore — restore a config version (two-phase panic-revert) + - POST /emergency-rollback — rollback to most recent pre-push backup + - GET /schedules — view effective backup schedule + - PUT /schedules — create/update device-specific schedule override + +RLS is enforced via get_db() (app_user engine with tenant context). +RBAC: viewer = read-only (GET); operator and above = write (POST/PUT). +""" + +import asyncio +import logging +import uuid +from datetime import timezone, datetime +from typing import Any + +from fastapi import APIRouter, Depends, HTTPException, Request, status +from fastapi.responses import Response +from pydantic import BaseModel, ConfigDict +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.database import get_db +from app.middleware.rate_limit import limiter +from app.middleware.rbac import require_min_role, require_scope +from app.middleware.tenant_context import CurrentUser, get_current_user +from app.models.config_backup import ConfigBackupRun, ConfigBackupSchedule +from app.config import settings +from app.models.device import Device +from app.services import backup_service, git_store +from app.services import restore_service +from app.services.crypto import decrypt_credentials_hybrid +from app.services.rsc_parser import parse_rsc, validate_rsc, compute_impact + +logger = logging.getLogger(__name__) + +router = APIRouter(tags=["config-backups"]) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +async def _check_tenant_access( + current_user: CurrentUser, tenant_id: uuid.UUID, db: AsyncSession +) -> None: + """ + Verify the current user is allowed to access the given tenant. + + - super_admin can access any tenant — re-sets DB tenant context to target tenant. + - All other roles must match their own tenant_id. + """ + if current_user.is_super_admin: + from app.database import set_tenant_context + await set_tenant_context(db, str(tenant_id)) + return + if current_user.tenant_id != tenant_id: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Access denied: you do not belong to this tenant.", + ) + + +# --------------------------------------------------------------------------- +# Request/Response schemas +# --------------------------------------------------------------------------- + + +class RestoreRequest(BaseModel): + model_config = ConfigDict(extra="forbid") + commit_sha: str + + +class ScheduleUpdate(BaseModel): + model_config = ConfigDict(extra="forbid") + cron_expression: str + enabled: bool + + +# --------------------------------------------------------------------------- +# Endpoints +# --------------------------------------------------------------------------- + + +@router.get( + "/tenants/{tenant_id}/devices/{device_id}/config/backups", + summary="List backup timeline for a device", + dependencies=[require_scope("config:read")], +) +async def list_backups( + tenant_id: uuid.UUID, + device_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("viewer")), + db: AsyncSession = Depends(get_db), +) -> list[dict[str, Any]]: + """Return backup timeline for a device, newest first. + + Each entry includes: id, commit_sha, trigger_type, lines_added, + lines_removed, and created_at. + """ + await _check_tenant_access(current_user, tenant_id, db) + + result = await db.execute( + select(ConfigBackupRun) + .where( + ConfigBackupRun.device_id == device_id, # type: ignore[arg-type] + ConfigBackupRun.tenant_id == tenant_id, # type: ignore[arg-type] + ) + .order_by(ConfigBackupRun.created_at.desc()) + ) + runs = result.scalars().all() + + return [ + { + "id": str(run.id), + "commit_sha": run.commit_sha, + "trigger_type": run.trigger_type, + "lines_added": run.lines_added, + "lines_removed": run.lines_removed, + "encryption_tier": run.encryption_tier, + "created_at": run.created_at.isoformat(), + } + for run in runs + ] + + +@router.post( + "/tenants/{tenant_id}/devices/{device_id}/config/backups", + summary="Trigger a manual config backup", + status_code=status.HTTP_201_CREATED, + dependencies=[require_scope("config:write")], +) +@limiter.limit("20/minute") +async def trigger_backup( + request: Request, + tenant_id: uuid.UUID, + device_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("operator")), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + """Trigger an immediate manual backup for a device. + + Captures export.rsc and backup.bin via SSH, commits to the tenant's + git store, and records a ConfigBackupRun with trigger_type='manual'. + Returns the backup metadata dict. + """ + await _check_tenant_access(current_user, tenant_id, db) + + try: + result = await backup_service.run_backup( + device_id=str(device_id), + tenant_id=str(tenant_id), + trigger_type="manual", + db_session=db, + ) + except ValueError as exc: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=str(exc), + ) from exc + except Exception as exc: + logger.error( + "Manual backup failed for device %s tenant %s: %s", + device_id, + tenant_id, + exc, + ) + raise HTTPException( + status_code=status.HTTP_502_BAD_GATEWAY, + detail=f"Backup failed: {exc}", + ) from exc + + return result + + +@router.post( + "/tenants/{tenant_id}/devices/{device_id}/config/checkpoint", + summary="Create a checkpoint (restore point) of the current config", + dependencies=[require_scope("config:write")], +) +@limiter.limit("5/minute") +async def create_checkpoint( + request: Request, + tenant_id: uuid.UUID, + device_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("operator")), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + """Create a checkpoint (restore point) of the current device config. + + Identical to a manual backup but tagged with trigger_type='checkpoint'. + Checkpoints serve as named restore points that operators create before + making risky changes, so they can easily roll back. + """ + await _check_tenant_access(current_user, tenant_id, db) + + try: + result = await backup_service.run_backup( + device_id=str(device_id), + tenant_id=str(tenant_id), + trigger_type="checkpoint", + db_session=db, + ) + except ValueError as exc: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=str(exc), + ) from exc + except Exception as exc: + logger.error( + "Checkpoint backup failed for device %s tenant %s: %s", + device_id, + tenant_id, + exc, + ) + raise HTTPException( + status_code=status.HTTP_502_BAD_GATEWAY, + detail=f"Checkpoint failed: {exc}", + ) from exc + + return result + + +@router.get( + "/tenants/{tenant_id}/devices/{device_id}/config/backups/{commit_sha}/export", + summary="Get export.rsc text for a specific backup", + response_class=Response, + dependencies=[require_scope("config:read")], +) +async def get_export( + tenant_id: uuid.UUID, + device_id: uuid.UUID, + commit_sha: str, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("viewer")), + db: AsyncSession = Depends(get_db), +) -> Response: + """Return the raw /export compact text for a specific backup version. + + For encrypted backups (encryption_tier != NULL), the Transit ciphertext + stored in git is decrypted on-demand before returning plaintext. + Legacy plaintext backups (encryption_tier = NULL) are returned as-is. + + Content-Type: text/plain + """ + await _check_tenant_access(current_user, tenant_id, db) + + loop = asyncio.get_event_loop() + try: + content_bytes = await loop.run_in_executor( + None, + git_store.read_file, + str(tenant_id), + commit_sha, + str(device_id), + "export.rsc", + ) + except KeyError as exc: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Backup version not found: {exc}", + ) from exc + + # Check if this backup is encrypted — decrypt via Transit if so + result = await db.execute( + select(ConfigBackupRun).where( + ConfigBackupRun.commit_sha == commit_sha, + ConfigBackupRun.device_id == device_id, + ) + ) + backup_run = result.scalar_one_or_none() + if backup_run and backup_run.encryption_tier: + try: + from app.services.crypto import decrypt_data_transit + + plaintext = await decrypt_data_transit( + content_bytes.decode("utf-8"), str(tenant_id) + ) + content_bytes = plaintext.encode("utf-8") + except Exception as dec_err: + logger.error( + "Failed to decrypt export for device %s sha %s: %s", + device_id, commit_sha, dec_err, + ) + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Failed to decrypt backup content", + ) from dec_err + + return Response(content=content_bytes, media_type="text/plain") + + +@router.get( + "/tenants/{tenant_id}/devices/{device_id}/config/backups/{commit_sha}/binary", + summary="Download backup.bin for a specific backup", + response_class=Response, + dependencies=[require_scope("config:read")], +) +async def get_binary( + tenant_id: uuid.UUID, + device_id: uuid.UUID, + commit_sha: str, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("viewer")), + db: AsyncSession = Depends(get_db), +) -> Response: + """Download the RouterOS binary backup file for a specific backup version. + + For encrypted backups, the Transit ciphertext is decrypted and the + base64-encoded binary is decoded back to raw bytes before returning. + Legacy plaintext backups are returned as-is. + + Content-Type: application/octet-stream (attachment download). + """ + await _check_tenant_access(current_user, tenant_id, db) + + loop = asyncio.get_event_loop() + try: + content_bytes = await loop.run_in_executor( + None, + git_store.read_file, + str(tenant_id), + commit_sha, + str(device_id), + "backup.bin", + ) + except KeyError as exc: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Backup version not found: {exc}", + ) from exc + + # Check if this backup is encrypted — decrypt via Transit if so + result = await db.execute( + select(ConfigBackupRun).where( + ConfigBackupRun.commit_sha == commit_sha, + ConfigBackupRun.device_id == device_id, + ) + ) + backup_run = result.scalar_one_or_none() + if backup_run and backup_run.encryption_tier: + try: + import base64 as b64 + + from app.services.crypto import decrypt_data_transit + + # Transit ciphertext -> base64-encoded binary -> raw bytes + b64_plaintext = await decrypt_data_transit( + content_bytes.decode("utf-8"), str(tenant_id) + ) + content_bytes = b64.b64decode(b64_plaintext) + except Exception as dec_err: + logger.error( + "Failed to decrypt binary backup for device %s sha %s: %s", + device_id, commit_sha, dec_err, + ) + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Failed to decrypt backup content", + ) from dec_err + + return Response( + content=content_bytes, + media_type="application/octet-stream", + headers={ + "Content-Disposition": f'attachment; filename="backup-{commit_sha[:8]}.bin"' + }, + ) + + +@router.post( + "/tenants/{tenant_id}/devices/{device_id}/config/preview-restore", + summary="Preview the impact of restoring a config backup", + dependencies=[require_scope("config:read")], +) +@limiter.limit("20/minute") +async def preview_restore( + request: Request, + tenant_id: uuid.UUID, + device_id: uuid.UUID, + body: RestoreRequest, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("operator")), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + """Preview the impact of restoring a config backup before executing. + + Reads the target config from the git backup, fetches the current config + from the live device (falling back to the latest backup if unreachable), + and returns a diff with categories, risk levels, warnings, and validation. + """ + await _check_tenant_access(current_user, tenant_id, db) + + loop = asyncio.get_event_loop() + + # 1. Read target export from git + try: + target_bytes = await loop.run_in_executor( + None, + git_store.read_file, + str(tenant_id), + body.commit_sha, + str(device_id), + "export.rsc", + ) + except KeyError as exc: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Backup export not found: {exc}", + ) from exc + + target_text = target_bytes.decode("utf-8", errors="replace") + + # 2. Get current export from device (live) or fallback to latest backup + current_text = "" + try: + result = await db.execute( + select(Device).where(Device.id == device_id) # type: ignore[arg-type] + ) + device = result.scalar_one_or_none() + if device and (device.encrypted_credentials_transit or device.encrypted_credentials): + key = settings.get_encryption_key_bytes() + creds_json = await decrypt_credentials_hybrid( + device.encrypted_credentials_transit, + device.encrypted_credentials, + str(tenant_id), + key, + ) + import json + creds = json.loads(creds_json) + current_text = await backup_service.capture_export( + device.ip_address, + username=creds.get("username", "admin"), + password=creds.get("password", ""), + ) + except Exception: + # Fallback to latest backup in git + logger.debug( + "Live export failed for device %s, falling back to latest backup", + device_id, + ) + latest = await db.execute( + select(ConfigBackupRun) + .where( + ConfigBackupRun.device_id == device_id, # type: ignore[arg-type] + ) + .order_by(ConfigBackupRun.created_at.desc()) + .limit(1) + ) + latest_run = latest.scalar_one_or_none() + if latest_run: + try: + current_bytes = await loop.run_in_executor( + None, + git_store.read_file, + str(tenant_id), + latest_run.commit_sha, + str(device_id), + "export.rsc", + ) + current_text = current_bytes.decode("utf-8", errors="replace") + except Exception: + current_text = "" + + # 3. Parse and analyze + current_parsed = parse_rsc(current_text) + target_parsed = parse_rsc(target_text) + validation = validate_rsc(target_text) + impact = compute_impact(current_parsed, target_parsed) + + return { + "diff": impact["diff"], + "categories": impact["categories"], + "warnings": impact["warnings"], + "validation": validation, + } + + +@router.post( + "/tenants/{tenant_id}/devices/{device_id}/config/restore", + summary="Restore a config version (two-phase push with panic-revert)", + dependencies=[require_scope("config:write")], +) +@limiter.limit("5/minute") +async def restore_config_endpoint( + request: Request, + tenant_id: uuid.UUID, + device_id: uuid.UUID, + body: RestoreRequest, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("operator")), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + """Restore a device config to a specific backup version. + + Implements two-phase push with panic-revert: + 1. Pre-backup is taken on device (mandatory before any push) + 2. RouterOS scheduler is installed as safety net (auto-reverts if unreachable) + 3. Config is pushed via /import + 4. Wait 60s for config to settle + 5. Reachability check — remove scheduler if device is reachable + 6. Return committed/reverted/failed status + + Returns: {"status": str, "message": str, "pre_backup_sha": str} + """ + await _check_tenant_access(current_user, tenant_id, db) + + try: + result = await restore_service.restore_config( + device_id=str(device_id), + tenant_id=str(tenant_id), + commit_sha=body.commit_sha, + db_session=db, + ) + except ValueError as exc: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=str(exc), + ) from exc + except Exception as exc: + logger.error( + "Restore failed for device %s tenant %s commit %s: %s", + device_id, + tenant_id, + body.commit_sha, + exc, + ) + raise HTTPException( + status_code=status.HTTP_502_BAD_GATEWAY, + detail=f"Restore failed: {exc}", + ) from exc + + return result + + +@router.post( + "/tenants/{tenant_id}/devices/{device_id}/config/emergency-rollback", + summary="Emergency rollback to most recent pre-push backup", + dependencies=[require_scope("config:write")], +) +@limiter.limit("5/minute") +async def emergency_rollback( + request: Request, + tenant_id: uuid.UUID, + device_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("operator")), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + """Emergency rollback: restore the most recent pre-push backup. + + Used when a device goes offline after a config push. + Finds the latest 'pre-restore', 'checkpoint', or 'pre-template-push' + backup and restores it via the two-phase panic-revert process. + """ + await _check_tenant_access(current_user, tenant_id, db) + + result = await db.execute( + select(ConfigBackupRun) + .where( + ConfigBackupRun.device_id == device_id, # type: ignore[arg-type] + ConfigBackupRun.tenant_id == tenant_id, # type: ignore[arg-type] + ConfigBackupRun.trigger_type.in_( + ["pre-restore", "checkpoint", "pre-template-push"] + ), + ) + .order_by(ConfigBackupRun.created_at.desc()) + .limit(1) + ) + backup = result.scalar_one_or_none() + if not backup: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="No pre-push backup found for rollback", + ) + + try: + restore_result = await restore_service.restore_config( + device_id=str(device_id), + tenant_id=str(tenant_id), + commit_sha=backup.commit_sha, + db_session=db, + ) + except ValueError as exc: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=str(exc), + ) from exc + except Exception as exc: + logger.error( + "Emergency rollback failed for device %s tenant %s: %s", + device_id, + tenant_id, + exc, + ) + raise HTTPException( + status_code=status.HTTP_502_BAD_GATEWAY, + detail=f"Emergency rollback failed: {exc}", + ) from exc + + return { + **restore_result, + "rolled_back_to": backup.commit_sha, + "rolled_back_to_date": backup.created_at.isoformat(), + } + + +@router.get( + "/tenants/{tenant_id}/devices/{device_id}/config/schedules", + summary="Get effective backup schedule for a device", + dependencies=[require_scope("config:read")], +) +async def get_schedule( + tenant_id: uuid.UUID, + device_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("viewer")), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + """Return the effective backup schedule for a device. + + Returns the device-specific override if it exists; falls back to the + tenant-level default. If no schedule is configured, returns a synthetic + default (2am UTC daily, enabled=True). + """ + await _check_tenant_access(current_user, tenant_id, db) + + # Check for device-specific override first + result = await db.execute( + select(ConfigBackupSchedule).where( + ConfigBackupSchedule.tenant_id == tenant_id, # type: ignore[arg-type] + ConfigBackupSchedule.device_id == device_id, # type: ignore[arg-type] + ) + ) + schedule = result.scalar_one_or_none() + + if schedule is None: + # Fall back to tenant-level default + result = await db.execute( + select(ConfigBackupSchedule).where( + ConfigBackupSchedule.tenant_id == tenant_id, # type: ignore[arg-type] + ConfigBackupSchedule.device_id.is_(None), # type: ignore[union-attr] + ) + ) + schedule = result.scalar_one_or_none() + + if schedule is None: + # No schedule configured — return synthetic default + return { + "id": None, + "tenant_id": str(tenant_id), + "device_id": str(device_id), + "cron_expression": "0 2 * * *", + "enabled": True, + "is_default": True, + } + + is_device_specific = schedule.device_id is not None + return { + "id": str(schedule.id), + "tenant_id": str(schedule.tenant_id), + "device_id": str(schedule.device_id) if schedule.device_id else None, + "cron_expression": schedule.cron_expression, + "enabled": schedule.enabled, + "is_default": not is_device_specific, + } + + +@router.put( + "/tenants/{tenant_id}/devices/{device_id}/config/schedules", + summary="Create or update the device-specific backup schedule", + dependencies=[require_scope("config:write")], +) +@limiter.limit("20/minute") +async def update_schedule( + request: Request, + tenant_id: uuid.UUID, + device_id: uuid.UUID, + body: ScheduleUpdate, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("operator")), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + """Create or update the device-specific backup schedule override. + + If no device-specific schedule exists, creates one. If one exists, updates + its cron_expression and enabled fields. + + Returns the updated schedule. + """ + await _check_tenant_access(current_user, tenant_id, db) + + # Look for existing device-specific schedule + result = await db.execute( + select(ConfigBackupSchedule).where( + ConfigBackupSchedule.tenant_id == tenant_id, # type: ignore[arg-type] + ConfigBackupSchedule.device_id == device_id, # type: ignore[arg-type] + ) + ) + schedule = result.scalar_one_or_none() + + if schedule is None: + # Create new device-specific schedule + schedule = ConfigBackupSchedule( + tenant_id=tenant_id, + device_id=device_id, + cron_expression=body.cron_expression, + enabled=body.enabled, + ) + db.add(schedule) + else: + # Update existing schedule + schedule.cron_expression = body.cron_expression + schedule.enabled = body.enabled + + await db.flush() + + # Hot-reload the scheduler so changes take effect immediately + from app.services.backup_scheduler import on_schedule_change + await on_schedule_change(tenant_id, device_id) + + return { + "id": str(schedule.id), + "tenant_id": str(schedule.tenant_id), + "device_id": str(schedule.device_id), + "cron_expression": schedule.cron_expression, + "enabled": schedule.enabled, + "is_default": False, + } diff --git a/backend/app/routers/config_editor.py b/backend/app/routers/config_editor.py new file mode 100644 index 0000000..2e2833a --- /dev/null +++ b/backend/app/routers/config_editor.py @@ -0,0 +1,371 @@ +""" +Dynamic RouterOS config editor API endpoints. + +All routes are tenant-scoped under: + /api/tenants/{tenant_id}/devices/{device_id}/config-editor/ + +Proxies commands to the Go poller's CmdResponder via the RouterOS proxy service. + +Provides: + - GET /browse -- browse a RouterOS menu path + - POST /add -- add a new entry + - POST /set -- edit an existing entry + - POST /remove -- delete an entry + - POST /execute -- execute an arbitrary CLI command + +RLS is enforced via get_db() (app_user engine with tenant context). +RBAC: viewer = read-only (GET browse); operator and above = write (POST). +""" + +import uuid + +import structlog + +from fastapi import APIRouter, Depends, HTTPException, Query, Request, status +from pydantic import BaseModel, ConfigDict +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.database import get_db +from app.middleware.rate_limit import limiter +from app.middleware.rbac import require_min_role, require_scope +from app.middleware.tenant_context import CurrentUser, get_current_user +from app.models.device import Device +from app.security.command_blocklist import check_command_safety, check_path_safety +from app.services import routeros_proxy +from app.services.audit_service import log_action + +logger = structlog.get_logger(__name__) +audit_logger = structlog.get_logger("audit") + +router = APIRouter(tags=["config-editor"]) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +async def _check_tenant_access( + current_user: CurrentUser, tenant_id: uuid.UUID, db: AsyncSession +) -> None: + """Verify the current user is allowed to access the given tenant.""" + from app.database import set_tenant_context + + if current_user.is_super_admin: + await set_tenant_context(db, str(tenant_id)) + return + if current_user.tenant_id != tenant_id: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Access denied: you do not belong to this tenant.", + ) + # Set RLS context for regular users too + await set_tenant_context(db, str(tenant_id)) + + +async def _check_device_online( + db: AsyncSession, device_id: uuid.UUID +) -> Device: + """Verify the device exists and is online. Returns the Device object.""" + result = await db.execute( + select(Device).where(Device.id == device_id) # type: ignore[arg-type] + ) + device = result.scalar_one_or_none() + if device is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Device {device_id} not found", + ) + if device.status != "online": + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail="Device is offline \u2014 config editor requires a live connection.", + ) + return device + + +# --------------------------------------------------------------------------- +# Request schemas +# --------------------------------------------------------------------------- + + +class AddEntryRequest(BaseModel): + model_config = ConfigDict(extra="forbid") + path: str + properties: dict[str, str] + + +class SetEntryRequest(BaseModel): + model_config = ConfigDict(extra="forbid") + path: str + entry_id: str | None = None # Optional for singleton paths (e.g. /ip/dns) + properties: dict[str, str] + + +class RemoveEntryRequest(BaseModel): + model_config = ConfigDict(extra="forbid") + path: str + entry_id: str + + +class ExecuteRequest(BaseModel): + model_config = ConfigDict(extra="forbid") + command: str + + +# --------------------------------------------------------------------------- +# Endpoints +# --------------------------------------------------------------------------- + + +@router.get( + "/tenants/{tenant_id}/devices/{device_id}/config-editor/browse", + summary="Browse a RouterOS menu path", + dependencies=[require_scope("config:read")], +) +async def browse_menu( + tenant_id: uuid.UUID, + device_id: uuid.UUID, + path: str = Query("/interface", description="RouterOS menu path to browse"), + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("viewer")), + db: AsyncSession = Depends(get_db), +) -> dict: + """Browse a RouterOS menu path and return all entries at that path.""" + await _check_tenant_access(current_user, tenant_id, db) + await _check_device_online(db, device_id) + check_path_safety(path) + + result = await routeros_proxy.browse_menu(str(device_id), path) + + if not result.get("success"): + raise HTTPException( + status_code=status.HTTP_502_BAD_GATEWAY, + detail=result.get("error", "Failed to browse menu path"), + ) + + audit_logger.info( + "routeros_config_browsed", + device_id=str(device_id), + tenant_id=str(tenant_id), + user_id=str(current_user.user_id), + path=path, + ) + + return { + "success": True, + "entries": result.get("data", []), + "error": None, + "path": path, + } + + +@router.post( + "/tenants/{tenant_id}/devices/{device_id}/config-editor/add", + summary="Add a new entry to a RouterOS menu path", + dependencies=[require_scope("config:write")], +) +@limiter.limit("20/minute") +async def add_entry( + request: Request, + tenant_id: uuid.UUID, + device_id: uuid.UUID, + body: AddEntryRequest, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("operator")), + db: AsyncSession = Depends(get_db), +) -> dict: + """Add a new entry to a RouterOS menu path with the given properties.""" + await _check_tenant_access(current_user, tenant_id, db) + await _check_device_online(db, device_id) + check_path_safety(body.path, write=True) + + result = await routeros_proxy.add_entry(str(device_id), body.path, body.properties) + + if not result.get("success"): + raise HTTPException( + status_code=status.HTTP_502_BAD_GATEWAY, + detail=result.get("error", "Failed to add entry"), + ) + + audit_logger.info( + "routeros_config_added", + device_id=str(device_id), + tenant_id=str(tenant_id), + user_id=str(current_user.user_id), + user_role=current_user.role, + path=body.path, + success=result.get("success", False), + ) + + try: + await log_action( + db, tenant_id, current_user.user_id, "config_add", + resource_type="config", resource_id=str(device_id), + device_id=device_id, + details={"path": body.path, "properties": body.properties}, + ) + except Exception: + pass + + return result + + +@router.post( + "/tenants/{tenant_id}/devices/{device_id}/config-editor/set", + summary="Edit an existing entry in a RouterOS menu path", + dependencies=[require_scope("config:write")], +) +@limiter.limit("20/minute") +async def set_entry( + request: Request, + tenant_id: uuid.UUID, + device_id: uuid.UUID, + body: SetEntryRequest, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("operator")), + db: AsyncSession = Depends(get_db), +) -> dict: + """Update an existing entry's properties on the device.""" + await _check_tenant_access(current_user, tenant_id, db) + await _check_device_online(db, device_id) + check_path_safety(body.path, write=True) + + result = await routeros_proxy.update_entry( + str(device_id), body.path, body.entry_id, body.properties + ) + + if not result.get("success"): + raise HTTPException( + status_code=status.HTTP_502_BAD_GATEWAY, + detail=result.get("error", "Failed to update entry"), + ) + + audit_logger.info( + "routeros_config_modified", + device_id=str(device_id), + tenant_id=str(tenant_id), + user_id=str(current_user.user_id), + user_role=current_user.role, + path=body.path, + entry_id=body.entry_id, + success=result.get("success", False), + ) + + try: + await log_action( + db, tenant_id, current_user.user_id, "config_set", + resource_type="config", resource_id=str(device_id), + device_id=device_id, + details={"path": body.path, "entry_id": body.entry_id, "properties": body.properties}, + ) + except Exception: + pass + + return result + + +@router.post( + "/tenants/{tenant_id}/devices/{device_id}/config-editor/remove", + summary="Delete an entry from a RouterOS menu path", + dependencies=[require_scope("config:write")], +) +@limiter.limit("5/minute") +async def remove_entry( + request: Request, + tenant_id: uuid.UUID, + device_id: uuid.UUID, + body: RemoveEntryRequest, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("operator")), + db: AsyncSession = Depends(get_db), +) -> dict: + """Remove an entry from a RouterOS menu path.""" + await _check_tenant_access(current_user, tenant_id, db) + await _check_device_online(db, device_id) + check_path_safety(body.path, write=True) + + result = await routeros_proxy.remove_entry( + str(device_id), body.path, body.entry_id + ) + + if not result.get("success"): + raise HTTPException( + status_code=status.HTTP_502_BAD_GATEWAY, + detail=result.get("error", "Failed to remove entry"), + ) + + audit_logger.info( + "routeros_config_removed", + device_id=str(device_id), + tenant_id=str(tenant_id), + user_id=str(current_user.user_id), + user_role=current_user.role, + path=body.path, + entry_id=body.entry_id, + success=result.get("success", False), + ) + + try: + await log_action( + db, tenant_id, current_user.user_id, "config_remove", + resource_type="config", resource_id=str(device_id), + device_id=device_id, + details={"path": body.path, "entry_id": body.entry_id}, + ) + except Exception: + pass + + return result + + +@router.post( + "/tenants/{tenant_id}/devices/{device_id}/config-editor/execute", + summary="Execute an arbitrary RouterOS CLI command", + dependencies=[require_scope("config:write")], +) +@limiter.limit("20/minute") +async def execute_command( + request: Request, + tenant_id: uuid.UUID, + device_id: uuid.UUID, + body: ExecuteRequest, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("operator")), + db: AsyncSession = Depends(get_db), +) -> dict: + """Execute an arbitrary RouterOS CLI command on the device.""" + await _check_tenant_access(current_user, tenant_id, db) + await _check_device_online(db, device_id) + check_command_safety(body.command) + + result = await routeros_proxy.execute_cli(str(device_id), body.command) + + if not result.get("success"): + raise HTTPException( + status_code=status.HTTP_502_BAD_GATEWAY, + detail=result.get("error", "Failed to execute command"), + ) + + audit_logger.info( + "routeros_command_executed", + device_id=str(device_id), + tenant_id=str(tenant_id), + user_id=str(current_user.user_id), + user_role=current_user.role, + command=body.command, + success=result.get("success", False), + ) + + try: + await log_action( + db, tenant_id, current_user.user_id, "config_execute", + resource_type="config", resource_id=str(device_id), + device_id=device_id, + details={"command": body.command}, + ) + except Exception: + pass + + return result diff --git a/backend/app/routers/device_groups.py b/backend/app/routers/device_groups.py new file mode 100644 index 0000000..25e8665 --- /dev/null +++ b/backend/app/routers/device_groups.py @@ -0,0 +1,94 @@ +""" +Device group management API endpoints. + +Routes: /api/tenants/{tenant_id}/device-groups + +RBAC: +- viewer: GET (read-only) +- operator: POST, PUT (write) +- tenant_admin/admin: DELETE +""" + +import uuid + +from fastapi import APIRouter, Depends, status +from sqlalchemy.ext.asyncio import AsyncSession + +from app.database import get_db +from app.middleware.rbac import require_operator_or_above, require_tenant_admin_or_above +from app.middleware.tenant_context import CurrentUser, get_current_user +from app.routers.devices import _check_tenant_access +from app.schemas.device import DeviceGroupCreate, DeviceGroupResponse, DeviceGroupUpdate +from app.services import device as device_service + +router = APIRouter(tags=["device-groups"]) + + +@router.get( + "/tenants/{tenant_id}/device-groups", + response_model=list[DeviceGroupResponse], + summary="List device groups", +) +async def list_groups( + tenant_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> list[DeviceGroupResponse]: + """List all device groups for a tenant. Viewer role and above.""" + await _check_tenant_access(current_user, tenant_id, db) + return await device_service.get_groups(db=db, tenant_id=tenant_id) + + +@router.post( + "/tenants/{tenant_id}/device-groups", + response_model=DeviceGroupResponse, + status_code=status.HTTP_201_CREATED, + summary="Create a device group", + dependencies=[Depends(require_operator_or_above)], +) +async def create_group( + tenant_id: uuid.UUID, + data: DeviceGroupCreate, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> DeviceGroupResponse: + """Create a new device group. Requires operator role or above.""" + await _check_tenant_access(current_user, tenant_id, db) + return await device_service.create_group(db=db, tenant_id=tenant_id, data=data) + + +@router.put( + "/tenants/{tenant_id}/device-groups/{group_id}", + response_model=DeviceGroupResponse, + summary="Update a device group", + dependencies=[Depends(require_operator_or_above)], +) +async def update_group( + tenant_id: uuid.UUID, + group_id: uuid.UUID, + data: DeviceGroupUpdate, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> DeviceGroupResponse: + """Update a device group. Requires operator role or above.""" + await _check_tenant_access(current_user, tenant_id, db) + return await device_service.update_group( + db=db, tenant_id=tenant_id, group_id=group_id, data=data + ) + + +@router.delete( + "/tenants/{tenant_id}/device-groups/{group_id}", + status_code=status.HTTP_204_NO_CONTENT, + summary="Delete a device group", + dependencies=[Depends(require_tenant_admin_or_above)], +) +async def delete_group( + tenant_id: uuid.UUID, + group_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> None: + """Delete a device group. Requires tenant_admin or above.""" + await _check_tenant_access(current_user, tenant_id, db) + await device_service.delete_group(db=db, tenant_id=tenant_id, group_id=group_id) diff --git a/backend/app/routers/device_logs.py b/backend/app/routers/device_logs.py new file mode 100644 index 0000000..bdfe07c --- /dev/null +++ b/backend/app/routers/device_logs.py @@ -0,0 +1,150 @@ +""" +Device syslog fetch endpoint via NATS RouterOS proxy. + +Provides: + - GET /tenants/{tenant_id}/devices/{device_id}/logs -- fetch device log entries + +RLS enforced via get_db() (app_user engine with tenant context). +RBAC: viewer and above can read logs. +""" + +import uuid + +import structlog +from fastapi import APIRouter, Depends, HTTPException, Query, status +from pydantic import BaseModel +from sqlalchemy.ext.asyncio import AsyncSession + +from app.database import get_db +from app.middleware.rbac import require_min_role +from app.middleware.tenant_context import CurrentUser, get_current_user +from app.services import routeros_proxy + +logger = structlog.get_logger(__name__) + +router = APIRouter(tags=["device-logs"]) + + +# --------------------------------------------------------------------------- +# Helpers (same pattern as config_editor.py) +# --------------------------------------------------------------------------- + +async def _check_tenant_access( + current_user: CurrentUser, tenant_id: uuid.UUID, db: AsyncSession +) -> None: + """Verify the current user is allowed to access the given tenant.""" + if current_user.is_super_admin: + from app.database import set_tenant_context + await set_tenant_context(db, str(tenant_id)) + return + if current_user.tenant_id != tenant_id: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Access denied: you do not belong to this tenant.", + ) + + +async def _check_device_exists( + db: AsyncSession, device_id: uuid.UUID +) -> None: + """Verify the device exists (does not require online status for logs).""" + from sqlalchemy import select + from app.models.device import Device + + result = await db.execute( + select(Device).where(Device.id == device_id) + ) + device = result.scalar_one_or_none() + if device is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Device {device_id} not found", + ) + + +# --------------------------------------------------------------------------- +# Response model +# --------------------------------------------------------------------------- + +class LogEntry(BaseModel): + time: str + topics: str + message: str + + +class LogsResponse(BaseModel): + logs: list[LogEntry] + device_id: str + count: int + + +# --------------------------------------------------------------------------- +# Endpoint +# --------------------------------------------------------------------------- + +@router.get( + "/tenants/{tenant_id}/devices/{device_id}/logs", + response_model=LogsResponse, + summary="Fetch device syslog entries via RouterOS API", + dependencies=[Depends(require_min_role("viewer"))], +) +async def get_device_logs( + tenant_id: uuid.UUID, + device_id: uuid.UUID, + limit: int = Query(default=100, ge=1, le=500), + topic: str | None = Query(default=None, description="Filter by log topic"), + search: str | None = Query(default=None, description="Search in message/topics"), + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> LogsResponse: + """Fetch device log entries via the RouterOS /log/print command.""" + await _check_tenant_access(current_user, tenant_id, db) + await _check_device_exists(db, device_id) + + # Build RouterOS command args + args = [f"=count={limit}"] + if topic: + args.append(f"?topics={topic}") + + result = await routeros_proxy.execute_command( + str(device_id), "/log/print", args=args, timeout=15.0 + ) + + if not result.get("success"): + error_msg = result.get("error", "Unknown error fetching logs") + logger.warning( + "failed to fetch device logs", + device_id=str(device_id), + error=error_msg, + ) + raise HTTPException( + status_code=status.HTTP_502_BAD_GATEWAY, + detail=f"Failed to fetch device logs: {error_msg}", + ) + + # Parse log entries from RouterOS response + raw_entries = result.get("data", []) + logs: list[LogEntry] = [] + for entry in raw_entries: + log_entry = LogEntry( + time=entry.get("time", ""), + topics=entry.get("topics", ""), + message=entry.get("message", ""), + ) + + # Apply search filter (case-insensitive) if provided + if search: + search_lower = search.lower() + if ( + search_lower not in log_entry.message.lower() + and search_lower not in log_entry.topics.lower() + ): + continue + + logs.append(log_entry) + + return LogsResponse( + logs=logs, + device_id=str(device_id), + count=len(logs), + ) diff --git a/backend/app/routers/device_tags.py b/backend/app/routers/device_tags.py new file mode 100644 index 0000000..523cca1 --- /dev/null +++ b/backend/app/routers/device_tags.py @@ -0,0 +1,94 @@ +""" +Device tag management API endpoints. + +Routes: /api/tenants/{tenant_id}/device-tags + +RBAC: +- viewer: GET (read-only) +- operator: POST, PUT (write) +- tenant_admin/admin: DELETE +""" + +import uuid + +from fastapi import APIRouter, Depends, status +from sqlalchemy.ext.asyncio import AsyncSession + +from app.database import get_db +from app.middleware.rbac import require_operator_or_above, require_tenant_admin_or_above +from app.middleware.tenant_context import CurrentUser, get_current_user +from app.routers.devices import _check_tenant_access +from app.schemas.device import DeviceTagCreate, DeviceTagResponse, DeviceTagUpdate +from app.services import device as device_service + +router = APIRouter(tags=["device-tags"]) + + +@router.get( + "/tenants/{tenant_id}/device-tags", + response_model=list[DeviceTagResponse], + summary="List device tags", +) +async def list_tags( + tenant_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> list[DeviceTagResponse]: + """List all device tags for a tenant. Viewer role and above.""" + await _check_tenant_access(current_user, tenant_id, db) + return await device_service.get_tags(db=db, tenant_id=tenant_id) + + +@router.post( + "/tenants/{tenant_id}/device-tags", + response_model=DeviceTagResponse, + status_code=status.HTTP_201_CREATED, + summary="Create a device tag", + dependencies=[Depends(require_operator_or_above)], +) +async def create_tag( + tenant_id: uuid.UUID, + data: DeviceTagCreate, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> DeviceTagResponse: + """Create a new device tag. Requires operator role or above.""" + await _check_tenant_access(current_user, tenant_id, db) + return await device_service.create_tag(db=db, tenant_id=tenant_id, data=data) + + +@router.put( + "/tenants/{tenant_id}/device-tags/{tag_id}", + response_model=DeviceTagResponse, + summary="Update a device tag", + dependencies=[Depends(require_operator_or_above)], +) +async def update_tag( + tenant_id: uuid.UUID, + tag_id: uuid.UUID, + data: DeviceTagUpdate, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> DeviceTagResponse: + """Update a device tag. Requires operator role or above.""" + await _check_tenant_access(current_user, tenant_id, db) + return await device_service.update_tag( + db=db, tenant_id=tenant_id, tag_id=tag_id, data=data + ) + + +@router.delete( + "/tenants/{tenant_id}/device-tags/{tag_id}", + status_code=status.HTTP_204_NO_CONTENT, + summary="Delete a device tag", + dependencies=[Depends(require_tenant_admin_or_above)], +) +async def delete_tag( + tenant_id: uuid.UUID, + tag_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> None: + """Delete a device tag. Requires tenant_admin or above.""" + await _check_tenant_access(current_user, tenant_id, db) + await device_service.delete_tag(db=db, tenant_id=tenant_id, tag_id=tag_id) diff --git a/backend/app/routers/devices.py b/backend/app/routers/devices.py new file mode 100644 index 0000000..c3ac89b --- /dev/null +++ b/backend/app/routers/devices.py @@ -0,0 +1,452 @@ +""" +Device management API endpoints. + +All routes are tenant-scoped under /api/tenants/{tenant_id}/devices. +RLS is enforced via PostgreSQL — the app_user engine automatically filters +cross-tenant data based on the SET LOCAL app.current_tenant context set by +get_current_user dependency. + +RBAC: +- viewer: GET (read-only) +- operator: POST, PUT (write) +- admin/tenant_admin: DELETE +""" + +import uuid +from typing import Optional + +from fastapi import APIRouter, Depends, HTTPException, Query, Request, status +from sqlalchemy.ext.asyncio import AsyncSession + +from app.config import settings +from app.database import get_db +from app.middleware.rate_limit import limiter +from app.services.audit_service import log_action +from app.middleware.rbac import ( + require_min_role, + require_operator_or_above, + require_scope, + require_tenant_admin_or_above, +) +from app.middleware.tenant_context import CurrentUser, get_current_user +from app.schemas.device import ( + BulkAddRequest, + BulkAddResult, + DeviceCreate, + DeviceListResponse, + DeviceResponse, + DeviceUpdate, + SubnetScanRequest, + SubnetScanResponse, +) +from app.services import device as device_service +from app.services.scanner import scan_subnet + +router = APIRouter(tags=["devices"]) + + +async def _check_tenant_access( + current_user: CurrentUser, tenant_id: uuid.UUID, db: AsyncSession +) -> None: + """ + Verify the current user is allowed to access the given tenant. + + - super_admin can access any tenant — re-sets DB tenant context to target tenant. + - All other roles must match their own tenant_id. + """ + if current_user.is_super_admin: + # Re-set tenant context to the target tenant so RLS allows the operation + from app.database import set_tenant_context + await set_tenant_context(db, str(tenant_id)) + return + if current_user.tenant_id != tenant_id: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Access denied: you do not belong to this tenant.", + ) + + +# --------------------------------------------------------------------------- +# Device CRUD +# --------------------------------------------------------------------------- + + +@router.get( + "/tenants/{tenant_id}/devices", + response_model=DeviceListResponse, + summary="List devices with pagination and filtering", + dependencies=[require_scope("devices:read")], +) +async def list_devices( + tenant_id: uuid.UUID, + page: int = Query(1, ge=1, description="Page number (1-based)"), + page_size: int = Query(25, ge=1, le=100, description="Items per page (1-100)"), + status_filter: Optional[str] = Query(None, alias="status"), + search: Optional[str] = Query(None, description="Text search on hostname or IP"), + tag_id: Optional[uuid.UUID] = Query(None), + group_id: Optional[uuid.UUID] = Query(None), + sort_by: str = Query("created_at", description="Field to sort by"), + sort_order: str = Query("desc", description="asc or desc"), + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> DeviceListResponse: + """List devices for a tenant with optional pagination, filtering, and sorting.""" + await _check_tenant_access(current_user, tenant_id, db) + + items, total = await device_service.get_devices( + db=db, + tenant_id=tenant_id, + page=page, + page_size=page_size, + status=status_filter, + search=search, + tag_id=tag_id, + group_id=group_id, + sort_by=sort_by, + sort_order=sort_order, + ) + return DeviceListResponse(items=items, total=total, page=page, page_size=page_size) + + +@router.post( + "/tenants/{tenant_id}/devices", + response_model=DeviceResponse, + status_code=status.HTTP_201_CREATED, + summary="Add a device (validates TCP connectivity first)", + dependencies=[Depends(require_operator_or_above), require_scope("devices:write")], +) +@limiter.limit("20/minute") +async def create_device( + request: Request, + tenant_id: uuid.UUID, + data: DeviceCreate, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> DeviceResponse: + """ + Create a new device. Requires operator role or above. + + The device IP/port is TCP-probed before the record is saved. + Credentials are encrypted with AES-256-GCM before storage and never returned. + """ + await _check_tenant_access(current_user, tenant_id, db) + result = await device_service.create_device( + db=db, + tenant_id=tenant_id, + data=data, + encryption_key=settings.get_encryption_key_bytes(), + ) + try: + await log_action( + db, tenant_id, current_user.user_id, "device_create", + resource_type="device", resource_id=str(result.id), + details={"hostname": data.hostname, "ip_address": data.ip_address}, + ip_address=request.client.host if request.client else None, + ) + except Exception: + pass + return result + + +@router.get( + "/tenants/{tenant_id}/devices/{device_id}", + response_model=DeviceResponse, + summary="Get a single device", + dependencies=[require_scope("devices:read")], +) +async def get_device( + tenant_id: uuid.UUID, + device_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> DeviceResponse: + """Get device details. Viewer role and above.""" + await _check_tenant_access(current_user, tenant_id, db) + return await device_service.get_device(db=db, tenant_id=tenant_id, device_id=device_id) + + +@router.put( + "/tenants/{tenant_id}/devices/{device_id}", + response_model=DeviceResponse, + summary="Update a device", + dependencies=[Depends(require_operator_or_above), require_scope("devices:write")], +) +@limiter.limit("20/minute") +async def update_device( + request: Request, + tenant_id: uuid.UUID, + device_id: uuid.UUID, + data: DeviceUpdate, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> DeviceResponse: + """Update device fields. Requires operator role or above.""" + await _check_tenant_access(current_user, tenant_id, db) + result = await device_service.update_device( + db=db, + tenant_id=tenant_id, + device_id=device_id, + data=data, + encryption_key=settings.get_encryption_key_bytes(), + ) + try: + await log_action( + db, tenant_id, current_user.user_id, "device_update", + resource_type="device", resource_id=str(device_id), + device_id=device_id, + details={"changes": data.model_dump(exclude_unset=True)}, + ip_address=request.client.host if request.client else None, + ) + except Exception: + pass + return result + + +@router.delete( + "/tenants/{tenant_id}/devices/{device_id}", + status_code=status.HTTP_204_NO_CONTENT, + summary="Delete a device", + dependencies=[Depends(require_tenant_admin_or_above), require_scope("devices:write")], +) +@limiter.limit("5/minute") +async def delete_device( + request: Request, + tenant_id: uuid.UUID, + device_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> None: + """Hard-delete a device. Requires tenant_admin or above.""" + await _check_tenant_access(current_user, tenant_id, db) + try: + await log_action( + db, tenant_id, current_user.user_id, "device_delete", + resource_type="device", resource_id=str(device_id), + device_id=device_id, + ip_address=request.client.host if request.client else None, + ) + except Exception: + pass + await device_service.delete_device(db=db, tenant_id=tenant_id, device_id=device_id) + + +# --------------------------------------------------------------------------- +# Subnet scan and bulk add +# --------------------------------------------------------------------------- + + +@router.post( + "/tenants/{tenant_id}/devices/scan", + response_model=SubnetScanResponse, + summary="Scan a subnet for MikroTik devices", + dependencies=[Depends(require_operator_or_above), require_scope("devices:write")], +) +@limiter.limit("5/minute") +async def scan_devices( + request: Request, + tenant_id: uuid.UUID, + data: SubnetScanRequest, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> SubnetScanResponse: + """ + Scan a CIDR subnet for hosts with open RouterOS API ports (8728/8729). + + Returns a list of discovered IPs for the user to review and selectively + import — does NOT automatically add devices. + + Requires operator role or above. + """ + if not current_user.is_super_admin and current_user.tenant_id != tenant_id: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Access denied") + + discovered = await scan_subnet(data.cidr) + import ipaddress + network = ipaddress.ip_network(data.cidr, strict=False) + total_scanned = network.num_addresses - 2 if network.num_addresses > 2 else network.num_addresses + + # Audit log the scan (fire-and-forget — never breaks the response) + try: + await log_action( + db, tenant_id, current_user.user_id, "subnet_scan", + resource_type="network", resource_id=data.cidr, + details={ + "cidr": data.cidr, + "devices_found": len(discovered), + "ip": request.client.host if request.client else None, + }, + ip_address=request.client.host if request.client else None, + ) + except Exception: + pass + + return SubnetScanResponse( + cidr=data.cidr, + discovered=discovered, + total_scanned=total_scanned, + total_discovered=len(discovered), + ) + + +@router.post( + "/tenants/{tenant_id}/devices/bulk-add", + response_model=BulkAddResult, + status_code=status.HTTP_201_CREATED, + summary="Bulk-add devices from scan results", + dependencies=[Depends(require_operator_or_above), require_scope("devices:write")], +) +@limiter.limit("5/minute") +async def bulk_add_devices( + request: Request, + tenant_id: uuid.UUID, + data: BulkAddRequest, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> BulkAddResult: + """ + Add multiple devices at once from scan results. + + Per-device credentials take precedence over shared credentials. + Devices that fail connectivity checks or validation are reported in `failed`. + Requires operator role or above. + """ + await _check_tenant_access(current_user, tenant_id, db) + + added = [] + failed = [] + encryption_key = settings.get_encryption_key_bytes() + + for dev_data in data.devices: + # Resolve credentials: per-device first, then shared + username = dev_data.username or data.shared_username + password = dev_data.password or data.shared_password + + if not username or not password: + failed.append({ + "ip_address": dev_data.ip_address, + "error": "No credentials provided (set per-device or shared credentials)", + }) + continue + + create_data = DeviceCreate( + hostname=dev_data.hostname or dev_data.ip_address, + ip_address=dev_data.ip_address, + api_port=dev_data.api_port, + api_ssl_port=dev_data.api_ssl_port, + username=username, + password=password, + ) + + try: + device = await device_service.create_device( + db=db, + tenant_id=tenant_id, + data=create_data, + encryption_key=encryption_key, + ) + added.append(device) + try: + await log_action( + db, tenant_id, current_user.user_id, "device_adopt", + resource_type="device", resource_id=str(device.id), + details={"hostname": create_data.hostname, "ip_address": create_data.ip_address}, + ip_address=request.client.host if request.client else None, + ) + except Exception: + pass + except HTTPException as exc: + failed.append({"ip_address": dev_data.ip_address, "error": exc.detail}) + except Exception as exc: + failed.append({"ip_address": dev_data.ip_address, "error": str(exc)}) + + return BulkAddResult(added=added, failed=failed) + + +# --------------------------------------------------------------------------- +# Group assignment +# --------------------------------------------------------------------------- + + +@router.post( + "/tenants/{tenant_id}/devices/{device_id}/groups/{group_id}", + status_code=status.HTTP_204_NO_CONTENT, + summary="Add device to a group", + dependencies=[Depends(require_operator_or_above), require_scope("devices:write")], +) +@limiter.limit("20/minute") +async def add_device_to_group( + request: Request, + tenant_id: uuid.UUID, + device_id: uuid.UUID, + group_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> None: + """Assign a device to a group. Requires operator or above.""" + await _check_tenant_access(current_user, tenant_id, db) + await device_service.assign_device_to_group(db, tenant_id, device_id, group_id) + + +@router.delete( + "/tenants/{tenant_id}/devices/{device_id}/groups/{group_id}", + status_code=status.HTTP_204_NO_CONTENT, + summary="Remove device from a group", + dependencies=[Depends(require_operator_or_above), require_scope("devices:write")], +) +@limiter.limit("5/minute") +async def remove_device_from_group( + request: Request, + tenant_id: uuid.UUID, + device_id: uuid.UUID, + group_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> None: + """Remove a device from a group. Requires operator or above.""" + await _check_tenant_access(current_user, tenant_id, db) + await device_service.remove_device_from_group(db, tenant_id, device_id, group_id) + + +# --------------------------------------------------------------------------- +# Tag assignment +# --------------------------------------------------------------------------- + + +@router.post( + "/tenants/{tenant_id}/devices/{device_id}/tags/{tag_id}", + status_code=status.HTTP_204_NO_CONTENT, + summary="Add tag to a device", + dependencies=[Depends(require_operator_or_above), require_scope("devices:write")], +) +@limiter.limit("20/minute") +async def add_tag_to_device( + request: Request, + tenant_id: uuid.UUID, + device_id: uuid.UUID, + tag_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> None: + """Assign a tag to a device. Requires operator or above.""" + await _check_tenant_access(current_user, tenant_id, db) + await device_service.assign_tag_to_device(db, tenant_id, device_id, tag_id) + + +@router.delete( + "/tenants/{tenant_id}/devices/{device_id}/tags/{tag_id}", + status_code=status.HTTP_204_NO_CONTENT, + summary="Remove tag from a device", + dependencies=[Depends(require_operator_or_above), require_scope("devices:write")], +) +@limiter.limit("5/minute") +async def remove_tag_from_device( + request: Request, + tenant_id: uuid.UUID, + device_id: uuid.UUID, + tag_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> None: + """Remove a tag from a device. Requires operator or above.""" + await _check_tenant_access(current_user, tenant_id, db) + await device_service.remove_tag_from_device(db, tenant_id, device_id, tag_id) diff --git a/backend/app/routers/events.py b/backend/app/routers/events.py new file mode 100644 index 0000000..3ac9f19 --- /dev/null +++ b/backend/app/routers/events.py @@ -0,0 +1,164 @@ +"""Unified events timeline API endpoint. + +Provides a single GET endpoint that unions alert events, device status changes, +and config backup runs into a unified timeline for the dashboard. + +RLS enforced via get_db() (app_user engine with tenant context). +""" + +import logging +import uuid +from typing import Any, Optional + +from fastapi import APIRouter, Depends, HTTPException, Query, status +from sqlalchemy import text +from sqlalchemy.ext.asyncio import AsyncSession + +from app.database import get_db, set_tenant_context +from app.middleware.tenant_context import CurrentUser, get_current_user + +logger = logging.getLogger(__name__) + +router = APIRouter(tags=["events"]) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +async def _check_tenant_access( + current_user: CurrentUser, tenant_id: uuid.UUID, db: AsyncSession +) -> None: + """Verify the current user is allowed to access the given tenant.""" + if current_user.is_super_admin: + await set_tenant_context(db, str(tenant_id)) + elif current_user.tenant_id != tenant_id: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Access denied to this tenant", + ) + + +# --------------------------------------------------------------------------- +# Unified events endpoint +# --------------------------------------------------------------------------- + + +@router.get( + "/tenants/{tenant_id}/events", + summary="List unified events (alerts, status changes, config backups)", +) +async def list_events( + tenant_id: uuid.UUID, + limit: int = Query(50, ge=1, le=200, description="Max events to return"), + event_type: Optional[str] = Query( + None, + description="Filter by event type: alert, status_change, config_backup", + ), + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> list[dict[str, Any]]: + """Return a unified list of recent events across alerts, device status, and config backups. + + Events are ordered by timestamp descending, limited to `limit` (default 50). + RLS automatically filters to the tenant's data via the app_user session. + """ + await _check_tenant_access(current_user, tenant_id, db) + + if event_type and event_type not in ("alert", "status_change", "config_backup"): + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail="event_type must be one of: alert, status_change, config_backup", + ) + + events: list[dict[str, Any]] = [] + + # 1. Alert events + if not event_type or event_type == "alert": + alert_result = await db.execute( + text(""" + SELECT ae.id, ae.status, ae.severity, ae.metric, ae.message, + ae.fired_at, ae.device_id, d.hostname + FROM alert_events ae + LEFT JOIN devices d ON d.id = ae.device_id + ORDER BY ae.fired_at DESC + LIMIT :limit + """), + {"limit": limit}, + ) + for row in alert_result.fetchall(): + alert_status = row[1] or "firing" + metric = row[3] or "unknown" + events.append({ + "id": str(row[0]), + "event_type": "alert", + "severity": row[2], + "title": f"{alert_status}: {metric}", + "description": row[4] or f"Alert {alert_status} for {metric}", + "device_hostname": row[7], + "device_id": str(row[6]) if row[6] else None, + "timestamp": row[5].isoformat() if row[5] else None, + }) + + # 2. Device status changes (inferred from current status + last_seen) + if not event_type or event_type == "status_change": + status_result = await db.execute( + text(""" + SELECT d.id, d.hostname, d.status, d.last_seen + FROM devices d + WHERE d.last_seen IS NOT NULL + ORDER BY d.last_seen DESC + LIMIT :limit + """), + {"limit": limit}, + ) + for row in status_result.fetchall(): + device_status = row[2] or "unknown" + hostname = row[1] or "Unknown device" + severity = "info" if device_status == "online" else "warning" + events.append({ + "id": f"status-{row[0]}", + "event_type": "status_change", + "severity": severity, + "title": f"Device {device_status}", + "description": f"{hostname} is now {device_status}", + "device_hostname": hostname, + "device_id": str(row[0]), + "timestamp": row[3].isoformat() if row[3] else None, + }) + + # 3. Config backup runs + if not event_type or event_type == "config_backup": + backup_result = await db.execute( + text(""" + SELECT cbr.id, cbr.trigger_type, cbr.created_at, + cbr.device_id, d.hostname + FROM config_backup_runs cbr + LEFT JOIN devices d ON d.id = cbr.device_id + ORDER BY cbr.created_at DESC + LIMIT :limit + """), + {"limit": limit}, + ) + for row in backup_result.fetchall(): + trigger_type = row[1] or "manual" + hostname = row[4] or "Unknown device" + events.append({ + "id": str(row[0]), + "event_type": "config_backup", + "severity": "info", + "title": "Config backup", + "description": f"{trigger_type} backup completed for {hostname}", + "device_hostname": hostname, + "device_id": str(row[3]) if row[3] else None, + "timestamp": row[2].isoformat() if row[2] else None, + }) + + # Sort all events by timestamp descending, then apply final limit + events.sort( + key=lambda e: e["timestamp"] or "", + reverse=True, + ) + + return events[:limit] diff --git a/backend/app/routers/firmware.py b/backend/app/routers/firmware.py new file mode 100644 index 0000000..278be84 --- /dev/null +++ b/backend/app/routers/firmware.py @@ -0,0 +1,712 @@ +"""Firmware API endpoints for version overview, cache management, preferred channel, +and firmware upgrade orchestration. + +Tenant-scoped routes under /api/tenants/{tenant_id}/firmware/*. +Global routes under /api/firmware/* for version listing and admin actions. +""" + +import asyncio +import uuid +from datetime import datetime +from typing import Any, Optional + +from fastapi import APIRouter, Depends, HTTPException, Query, Request, status +from pydantic import BaseModel, ConfigDict +from sqlalchemy import text +from sqlalchemy.ext.asyncio import AsyncSession + +from app.database import get_db, set_tenant_context +from app.middleware.rate_limit import limiter +from app.middleware.rbac import require_scope +from app.middleware.tenant_context import CurrentUser, get_current_user +from app.services.audit_service import log_action + +router = APIRouter(tags=["firmware"]) + + +async def _check_tenant_access( + current_user: CurrentUser, tenant_id: uuid.UUID, db: AsyncSession +) -> None: + """Verify the current user is allowed to access the given tenant.""" + if current_user.is_super_admin: + await set_tenant_context(db, str(tenant_id)) + elif current_user.tenant_id != tenant_id: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Access denied to this tenant", + ) + + +class PreferredChannelRequest(BaseModel): + model_config = ConfigDict(extra="forbid") + preferred_channel: str # "stable", "long-term", "testing" + + +class FirmwareDownloadRequest(BaseModel): + model_config = ConfigDict(extra="forbid") + architecture: str + channel: str + version: str + + +# ========================================================================= +# TENANT-SCOPED ENDPOINTS +# ========================================================================= + + +@router.get( + "/tenants/{tenant_id}/firmware/overview", + summary="Get firmware status for all devices in tenant", + dependencies=[require_scope("firmware:write")], +) +async def get_firmware_overview( + tenant_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + await _check_tenant_access(current_user, tenant_id, db) + + from app.services.firmware_service import get_firmware_overview as _get_overview + return await _get_overview(str(tenant_id)) + + +@router.patch( + "/tenants/{tenant_id}/devices/{device_id}/preferred-channel", + summary="Set preferred firmware channel for a device", + dependencies=[require_scope("firmware:write")], +) +@limiter.limit("20/minute") +async def set_device_preferred_channel( + request: Request, + tenant_id: uuid.UUID, + device_id: uuid.UUID, + body: PreferredChannelRequest, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, str]: + await _check_tenant_access(current_user, tenant_id, db) + + if body.preferred_channel not in ("stable", "long-term", "testing"): + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail="preferred_channel must be one of: stable, long-term, testing", + ) + + result = await db.execute( + text(""" + UPDATE devices SET preferred_channel = :channel, updated_at = NOW() + WHERE id = :device_id + RETURNING id + """), + {"channel": body.preferred_channel, "device_id": str(device_id)}, + ) + if not result.fetchone(): + raise HTTPException(status_code=404, detail="Device not found") + await db.commit() + return {"status": "ok", "preferred_channel": body.preferred_channel} + + +@router.patch( + "/tenants/{tenant_id}/device-groups/{group_id}/preferred-channel", + summary="Set preferred firmware channel for a device group", + dependencies=[require_scope("firmware:write")], +) +@limiter.limit("20/minute") +async def set_group_preferred_channel( + request: Request, + tenant_id: uuid.UUID, + group_id: uuid.UUID, + body: PreferredChannelRequest, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, str]: + await _check_tenant_access(current_user, tenant_id, db) + + if body.preferred_channel not in ("stable", "long-term", "testing"): + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail="preferred_channel must be one of: stable, long-term, testing", + ) + + result = await db.execute( + text(""" + UPDATE device_groups SET preferred_channel = :channel + WHERE id = :group_id + RETURNING id + """), + {"channel": body.preferred_channel, "group_id": str(group_id)}, + ) + if not result.fetchone(): + raise HTTPException(status_code=404, detail="Device group not found") + await db.commit() + return {"status": "ok", "preferred_channel": body.preferred_channel} + + +# ========================================================================= +# GLOBAL ENDPOINTS (firmware versions are not tenant-scoped) +# ========================================================================= + + +@router.get( + "/firmware/versions", + summary="List all known firmware versions from cache", +) +async def list_firmware_versions( + architecture: Optional[str] = Query(None), + channel: Optional[str] = Query(None), + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> list[dict[str, Any]]: + filters = [] + params: dict[str, Any] = {} + + if architecture: + filters.append("architecture = :arch") + params["arch"] = architecture + if channel: + filters.append("channel = :channel") + params["channel"] = channel + + where = f"WHERE {' AND '.join(filters)}" if filters else "" + + result = await db.execute( + text(f""" + SELECT id, architecture, channel, version, npk_url, + npk_local_path, npk_size_bytes, checked_at + FROM firmware_versions + {where} + ORDER BY architecture, channel, checked_at DESC + """), + params, + ) + + return [ + { + "id": str(row[0]), + "architecture": row[1], + "channel": row[2], + "version": row[3], + "npk_url": row[4], + "npk_local_path": row[5], + "npk_size_bytes": row[6], + "checked_at": row[7].isoformat() if row[7] else None, + } + for row in result.fetchall() + ] + + +@router.post( + "/firmware/check", + summary="Trigger immediate firmware version check (super admin only)", +) +async def trigger_firmware_check( + current_user: CurrentUser = Depends(get_current_user), +) -> dict[str, Any]: + if not current_user.is_super_admin: + raise HTTPException(status_code=403, detail="Super admin only") + + from app.services.firmware_service import check_latest_versions + results = await check_latest_versions() + return {"status": "ok", "versions_discovered": len(results), "versions": results} + + +@router.get( + "/firmware/cache", + summary="List locally cached NPK files (super admin only)", +) +async def list_firmware_cache( + current_user: CurrentUser = Depends(get_current_user), +) -> list[dict[str, Any]]: + if not current_user.is_super_admin: + raise HTTPException(status_code=403, detail="Super admin only") + + from app.services.firmware_service import get_cached_firmware + return await get_cached_firmware() + + +@router.post( + "/firmware/download", + summary="Download a specific NPK to local cache (super admin only)", +) +async def download_firmware( + body: FirmwareDownloadRequest, + current_user: CurrentUser = Depends(get_current_user), +) -> dict[str, str]: + if not current_user.is_super_admin: + raise HTTPException(status_code=403, detail="Super admin only") + + from app.services.firmware_service import download_firmware as _download + path = await _download(body.architecture, body.channel, body.version) + return {"status": "ok", "path": path} + + +# ========================================================================= +# UPGRADE ENDPOINTS +# ========================================================================= + + +class UpgradeRequest(BaseModel): + model_config = ConfigDict(extra="forbid") + device_id: str + target_version: str + architecture: str + channel: str = "stable" + confirmed_major_upgrade: bool = False + scheduled_at: Optional[str] = None # ISO datetime or None for immediate + + +class MassUpgradeRequest(BaseModel): + model_config = ConfigDict(extra="forbid") + device_ids: list[str] + target_version: str + channel: str = "stable" + confirmed_major_upgrade: bool = False + scheduled_at: Optional[str] = None + + +@router.post( + "/tenants/{tenant_id}/firmware/upgrade", + summary="Start or schedule a single device firmware upgrade", + status_code=status.HTTP_202_ACCEPTED, + dependencies=[require_scope("firmware:write")], +) +@limiter.limit("20/minute") +async def start_firmware_upgrade( + request: Request, + tenant_id: uuid.UUID, + body: UpgradeRequest, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + await _check_tenant_access(current_user, tenant_id, db) + + if current_user.role == "viewer": + raise HTTPException(403, "Viewers cannot initiate upgrades") + + # Look up device architecture if not provided + architecture = body.architecture + if not architecture: + dev_result = await db.execute( + text("SELECT architecture FROM devices WHERE id = CAST(:id AS uuid)"), + {"id": body.device_id}, + ) + dev_row = dev_result.fetchone() + if not dev_row or not dev_row[0]: + raise HTTPException(422, "Device architecture unknown — cannot upgrade") + architecture = dev_row[0] + + # Create upgrade job + job_id = str(uuid.uuid4()) + await db.execute( + text(""" + INSERT INTO firmware_upgrade_jobs + (id, tenant_id, device_id, target_version, architecture, channel, + status, confirmed_major_upgrade, scheduled_at) + VALUES + (CAST(:id AS uuid), CAST(:tenant_id AS uuid), CAST(:device_id AS uuid), + :target_version, :architecture, :channel, + :status, :confirmed, :scheduled_at) + """), + { + "id": job_id, + "tenant_id": str(tenant_id), + "device_id": body.device_id, + "target_version": body.target_version, + "architecture": architecture, + "channel": body.channel, + "status": "scheduled" if body.scheduled_at else "pending", + "confirmed": body.confirmed_major_upgrade, + "scheduled_at": body.scheduled_at, + }, + ) + await db.commit() + + # Schedule or start immediately + if body.scheduled_at: + from app.services.upgrade_service import schedule_upgrade + schedule_upgrade(job_id, datetime.fromisoformat(body.scheduled_at)) + else: + from app.services.upgrade_service import start_upgrade + asyncio.create_task(start_upgrade(job_id)) + + try: + await log_action( + db, tenant_id, current_user.user_id, "firmware_upgrade", + resource_type="firmware", resource_id=job_id, + device_id=uuid.UUID(body.device_id), + details={"target_version": body.target_version, "channel": body.channel}, + ) + except Exception: + pass + + return {"status": "accepted", "job_id": job_id} + + +@router.post( + "/tenants/{tenant_id}/firmware/mass-upgrade", + summary="Start or schedule a mass firmware upgrade for multiple devices", + status_code=status.HTTP_202_ACCEPTED, + dependencies=[require_scope("firmware:write")], +) +@limiter.limit("5/minute") +async def start_mass_firmware_upgrade( + request: Request, + tenant_id: uuid.UUID, + body: MassUpgradeRequest, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + await _check_tenant_access(current_user, tenant_id, db) + + if current_user.role == "viewer": + raise HTTPException(403, "Viewers cannot initiate upgrades") + + rollout_group_id = str(uuid.uuid4()) + jobs = [] + + for device_id in body.device_ids: + # Look up architecture per device + dev_result = await db.execute( + text("SELECT architecture FROM devices WHERE id = CAST(:id AS uuid)"), + {"id": device_id}, + ) + dev_row = dev_result.fetchone() + architecture = dev_row[0] if dev_row and dev_row[0] else "unknown" + + job_id = str(uuid.uuid4()) + await db.execute( + text(""" + INSERT INTO firmware_upgrade_jobs + (id, tenant_id, device_id, rollout_group_id, + target_version, architecture, channel, + status, confirmed_major_upgrade, scheduled_at) + VALUES + (CAST(:id AS uuid), CAST(:tenant_id AS uuid), + CAST(:device_id AS uuid), CAST(:group_id AS uuid), + :target_version, :architecture, :channel, + :status, :confirmed, :scheduled_at) + """), + { + "id": job_id, + "tenant_id": str(tenant_id), + "device_id": device_id, + "group_id": rollout_group_id, + "target_version": body.target_version, + "architecture": architecture, + "channel": body.channel, + "status": "scheduled" if body.scheduled_at else "pending", + "confirmed": body.confirmed_major_upgrade, + "scheduled_at": body.scheduled_at, + }, + ) + jobs.append({"job_id": job_id, "device_id": device_id, "architecture": architecture}) + + await db.commit() + + # Schedule or start immediately + if body.scheduled_at: + from app.services.upgrade_service import schedule_mass_upgrade + schedule_mass_upgrade(rollout_group_id, datetime.fromisoformat(body.scheduled_at)) + else: + from app.services.upgrade_service import start_mass_upgrade + asyncio.create_task(start_mass_upgrade(rollout_group_id)) + + return { + "status": "accepted", + "rollout_group_id": rollout_group_id, + "jobs": jobs, + } + + +@router.get( + "/tenants/{tenant_id}/firmware/upgrades", + summary="List firmware upgrade jobs for tenant", + dependencies=[require_scope("firmware:write")], +) +async def list_upgrade_jobs( + tenant_id: uuid.UUID, + upgrade_status: Optional[str] = Query(None, alias="status"), + device_id: Optional[str] = Query(None), + rollout_group_id: Optional[str] = Query(None), + page: int = Query(1, ge=1), + per_page: int = Query(50, ge=1, le=200), + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + await _check_tenant_access(current_user, tenant_id, db) + + filters = ["1=1"] + params: dict[str, Any] = {} + + if upgrade_status: + filters.append("j.status = :status") + params["status"] = upgrade_status + if device_id: + filters.append("j.device_id = CAST(:device_id AS uuid)") + params["device_id"] = device_id + if rollout_group_id: + filters.append("j.rollout_group_id = CAST(:group_id AS uuid)") + params["group_id"] = rollout_group_id + + where = " AND ".join(filters) + offset = (page - 1) * per_page + + count_result = await db.execute( + text(f"SELECT COUNT(*) FROM firmware_upgrade_jobs j WHERE {where}"), + params, + ) + total = count_result.scalar() or 0 + + result = await db.execute( + text(f""" + SELECT j.id, j.device_id, j.rollout_group_id, + j.target_version, j.architecture, j.channel, + j.status, j.pre_upgrade_backup_sha, j.scheduled_at, + j.started_at, j.completed_at, j.error_message, + j.confirmed_major_upgrade, j.created_at, + d.hostname AS device_hostname + FROM firmware_upgrade_jobs j + LEFT JOIN devices d ON d.id = j.device_id + WHERE {where} + ORDER BY j.created_at DESC + LIMIT :limit OFFSET :offset + """), + {**params, "limit": per_page, "offset": offset}, + ) + + items = [ + { + "id": str(row[0]), + "device_id": str(row[1]), + "rollout_group_id": str(row[2]) if row[2] else None, + "target_version": row[3], + "architecture": row[4], + "channel": row[5], + "status": row[6], + "pre_upgrade_backup_sha": row[7], + "scheduled_at": row[8].isoformat() if row[8] else None, + "started_at": row[9].isoformat() if row[9] else None, + "completed_at": row[10].isoformat() if row[10] else None, + "error_message": row[11], + "confirmed_major_upgrade": row[12], + "created_at": row[13].isoformat() if row[13] else None, + "device_hostname": row[14], + } + for row in result.fetchall() + ] + + return {"items": items, "total": total, "page": page, "per_page": per_page} + + +@router.get( + "/tenants/{tenant_id}/firmware/upgrades/{job_id}", + summary="Get single upgrade job detail", + dependencies=[require_scope("firmware:write")], +) +async def get_upgrade_job( + tenant_id: uuid.UUID, + job_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + await _check_tenant_access(current_user, tenant_id, db) + + result = await db.execute( + text(""" + SELECT j.id, j.device_id, j.rollout_group_id, + j.target_version, j.architecture, j.channel, + j.status, j.pre_upgrade_backup_sha, j.scheduled_at, + j.started_at, j.completed_at, j.error_message, + j.confirmed_major_upgrade, j.created_at, + d.hostname AS device_hostname + FROM firmware_upgrade_jobs j + LEFT JOIN devices d ON d.id = j.device_id + WHERE j.id = CAST(:job_id AS uuid) + """), + {"job_id": str(job_id)}, + ) + row = result.fetchone() + if not row: + raise HTTPException(404, "Upgrade job not found") + + return { + "id": str(row[0]), + "device_id": str(row[1]), + "rollout_group_id": str(row[2]) if row[2] else None, + "target_version": row[3], + "architecture": row[4], + "channel": row[5], + "status": row[6], + "pre_upgrade_backup_sha": row[7], + "scheduled_at": row[8].isoformat() if row[8] else None, + "started_at": row[9].isoformat() if row[9] else None, + "completed_at": row[10].isoformat() if row[10] else None, + "error_message": row[11], + "confirmed_major_upgrade": row[12], + "created_at": row[13].isoformat() if row[13] else None, + "device_hostname": row[14], + } + + +@router.get( + "/tenants/{tenant_id}/firmware/rollouts/{rollout_group_id}", + summary="Get mass rollout status with all jobs", + dependencies=[require_scope("firmware:write")], +) +async def get_rollout_status( + tenant_id: uuid.UUID, + rollout_group_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + await _check_tenant_access(current_user, tenant_id, db) + + result = await db.execute( + text(""" + SELECT j.id, j.device_id, j.status, j.target_version, + j.architecture, j.error_message, j.started_at, + j.completed_at, d.hostname + FROM firmware_upgrade_jobs j + LEFT JOIN devices d ON d.id = j.device_id + WHERE j.rollout_group_id = CAST(:group_id AS uuid) + ORDER BY j.created_at ASC + """), + {"group_id": str(rollout_group_id)}, + ) + rows = result.fetchall() + + if not rows: + raise HTTPException(404, "Rollout group not found") + + # Compute summary + total = len(rows) + completed = sum(1 for r in rows if r[2] == "completed") + failed = sum(1 for r in rows if r[2] == "failed") + paused = sum(1 for r in rows if r[2] == "paused") + pending = sum(1 for r in rows if r[2] in ("pending", "scheduled")) + + # Find currently running device + active_statuses = {"downloading", "uploading", "rebooting", "verifying"} + current_device = None + for r in rows: + if r[2] in active_statuses: + current_device = r[8] or str(r[1]) + break + + jobs = [ + { + "id": str(r[0]), + "device_id": str(r[1]), + "status": r[2], + "target_version": r[3], + "architecture": r[4], + "error_message": r[5], + "started_at": r[6].isoformat() if r[6] else None, + "completed_at": r[7].isoformat() if r[7] else None, + "device_hostname": r[8], + } + for r in rows + ] + + return { + "rollout_group_id": str(rollout_group_id), + "total": total, + "completed": completed, + "failed": failed, + "paused": paused, + "pending": pending, + "current_device": current_device, + "jobs": jobs, + } + + +@router.post( + "/tenants/{tenant_id}/firmware/upgrades/{job_id}/cancel", + summary="Cancel a scheduled or pending upgrade", + dependencies=[require_scope("firmware:write")], +) +@limiter.limit("20/minute") +async def cancel_upgrade_endpoint( + request: Request, + tenant_id: uuid.UUID, + job_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, str]: + await _check_tenant_access(current_user, tenant_id, db) + + if current_user.role == "viewer": + raise HTTPException(403, "Viewers cannot cancel upgrades") + + from app.services.upgrade_service import cancel_upgrade + await cancel_upgrade(str(job_id)) + return {"status": "ok", "message": "Upgrade cancelled"} + + +@router.post( + "/tenants/{tenant_id}/firmware/upgrades/{job_id}/retry", + summary="Retry a failed upgrade", + dependencies=[require_scope("firmware:write")], +) +@limiter.limit("20/minute") +async def retry_upgrade_endpoint( + request: Request, + tenant_id: uuid.UUID, + job_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, str]: + await _check_tenant_access(current_user, tenant_id, db) + + if current_user.role == "viewer": + raise HTTPException(403, "Viewers cannot retry upgrades") + + from app.services.upgrade_service import retry_failed_upgrade + await retry_failed_upgrade(str(job_id)) + return {"status": "ok", "message": "Upgrade retry started"} + + +@router.post( + "/tenants/{tenant_id}/firmware/rollouts/{rollout_group_id}/resume", + summary="Resume a paused mass rollout", + dependencies=[require_scope("firmware:write")], +) +@limiter.limit("20/minute") +async def resume_rollout_endpoint( + request: Request, + tenant_id: uuid.UUID, + rollout_group_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, str]: + await _check_tenant_access(current_user, tenant_id, db) + + if current_user.role == "viewer": + raise HTTPException(403, "Viewers cannot resume rollouts") + + from app.services.upgrade_service import resume_mass_upgrade + await resume_mass_upgrade(str(rollout_group_id)) + return {"status": "ok", "message": "Rollout resumed"} + + +@router.post( + "/tenants/{tenant_id}/firmware/rollouts/{rollout_group_id}/abort", + summary="Abort remaining devices in a paused rollout", + dependencies=[require_scope("firmware:write")], +) +@limiter.limit("5/minute") +async def abort_rollout_endpoint( + request: Request, + tenant_id: uuid.UUID, + rollout_group_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + await _check_tenant_access(current_user, tenant_id, db) + + if current_user.role == "viewer": + raise HTTPException(403, "Viewers cannot abort rollouts") + + from app.services.upgrade_service import abort_mass_upgrade + aborted = await abort_mass_upgrade(str(rollout_group_id)) + return {"status": "ok", "aborted_count": aborted} diff --git a/backend/app/routers/maintenance_windows.py b/backend/app/routers/maintenance_windows.py new file mode 100644 index 0000000..61e5abf --- /dev/null +++ b/backend/app/routers/maintenance_windows.py @@ -0,0 +1,309 @@ +"""Maintenance windows API endpoints. + +Tenant-scoped routes under /api/tenants/{tenant_id}/ for: +- Maintenance window CRUD (list, create, update, delete) +- Filterable by status: upcoming, active, past + +RLS enforced via get_db() (app_user engine with tenant context). +RBAC: operator and above for all operations. +""" + +import json +import logging +import uuid +from datetime import datetime +from typing import Any, Optional + +from fastapi import APIRouter, Depends, HTTPException, Query, Request, status +from pydantic import BaseModel, ConfigDict +from sqlalchemy import text +from sqlalchemy.ext.asyncio import AsyncSession + +from app.database import get_db, set_tenant_context +from app.middleware.rate_limit import limiter +from app.middleware.tenant_context import CurrentUser, get_current_user + +logger = logging.getLogger(__name__) + +router = APIRouter(tags=["maintenance-windows"]) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +async def _check_tenant_access( + current_user: CurrentUser, tenant_id: uuid.UUID, db: AsyncSession +) -> None: + """Verify the current user is allowed to access the given tenant.""" + if current_user.is_super_admin: + await set_tenant_context(db, str(tenant_id)) + elif current_user.tenant_id != tenant_id: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Access denied to this tenant", + ) + + +def _require_operator(current_user: CurrentUser) -> None: + """Raise 403 if user does not have at least operator role.""" + if current_user.role == "viewer": + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Requires at least operator role.", + ) + + +# --------------------------------------------------------------------------- +# Request/response schemas +# --------------------------------------------------------------------------- + + +class MaintenanceWindowCreate(BaseModel): + model_config = ConfigDict(extra="forbid") + name: str + device_ids: list[str] = [] + start_at: datetime + end_at: datetime + suppress_alerts: bool = True + notes: Optional[str] = None + + +class MaintenanceWindowUpdate(BaseModel): + model_config = ConfigDict(extra="forbid") + name: Optional[str] = None + device_ids: Optional[list[str]] = None + start_at: Optional[datetime] = None + end_at: Optional[datetime] = None + suppress_alerts: Optional[bool] = None + notes: Optional[str] = None + + +class MaintenanceWindowResponse(BaseModel): + model_config = ConfigDict(extra="forbid") + id: str + tenant_id: str + name: str + device_ids: list[str] + start_at: str + end_at: str + suppress_alerts: bool + notes: Optional[str] = None + created_by: Optional[str] = None + created_at: str + + +# --------------------------------------------------------------------------- +# CRUD endpoints +# --------------------------------------------------------------------------- + + +@router.get( + "/tenants/{tenant_id}/maintenance-windows", + summary="List maintenance windows for tenant", +) +async def list_maintenance_windows( + tenant_id: uuid.UUID, + window_status: Optional[str] = Query(None, alias="status"), + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> list[dict[str, Any]]: + await _check_tenant_access(current_user, tenant_id, db) + _require_operator(current_user) + + filters = ["1=1"] + params: dict[str, Any] = {} + + if window_status == "active": + filters.append("mw.start_at <= NOW() AND mw.end_at >= NOW()") + elif window_status == "upcoming": + filters.append("mw.start_at > NOW()") + elif window_status == "past": + filters.append("mw.end_at < NOW()") + + where = " AND ".join(filters) + + result = await db.execute( + text(f""" + SELECT mw.id, mw.tenant_id, mw.name, mw.device_ids, + mw.start_at, mw.end_at, mw.suppress_alerts, + mw.notes, mw.created_by, mw.created_at + FROM maintenance_windows mw + WHERE {where} + ORDER BY mw.start_at DESC + """), + params, + ) + + return [ + { + "id": str(row[0]), + "tenant_id": str(row[1]), + "name": row[2], + "device_ids": row[3] if isinstance(row[3], list) else [], + "start_at": row[4].isoformat() if row[4] else None, + "end_at": row[5].isoformat() if row[5] else None, + "suppress_alerts": row[6], + "notes": row[7], + "created_by": str(row[8]) if row[8] else None, + "created_at": row[9].isoformat() if row[9] else None, + } + for row in result.fetchall() + ] + + +@router.post( + "/tenants/{tenant_id}/maintenance-windows", + summary="Create maintenance window", + status_code=status.HTTP_201_CREATED, +) +@limiter.limit("20/minute") +async def create_maintenance_window( + request: Request, + tenant_id: uuid.UUID, + body: MaintenanceWindowCreate, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + await _check_tenant_access(current_user, tenant_id, db) + _require_operator(current_user) + + if body.end_at <= body.start_at: + raise HTTPException(422, "end_at must be after start_at") + + window_id = str(uuid.uuid4()) + + await db.execute( + text(""" + INSERT INTO maintenance_windows + (id, tenant_id, name, device_ids, start_at, end_at, + suppress_alerts, notes, created_by) + VALUES + (CAST(:id AS uuid), CAST(:tenant_id AS uuid), + :name, CAST(:device_ids AS jsonb), :start_at, :end_at, + :suppress_alerts, :notes, CAST(:created_by AS uuid)) + """), + { + "id": window_id, + "tenant_id": str(tenant_id), + "name": body.name, + "device_ids": json.dumps(body.device_ids), + "start_at": body.start_at, + "end_at": body.end_at, + "suppress_alerts": body.suppress_alerts, + "notes": body.notes, + "created_by": str(current_user.user_id), + }, + ) + await db.commit() + + return { + "id": window_id, + "tenant_id": str(tenant_id), + "name": body.name, + "device_ids": body.device_ids, + "start_at": body.start_at.isoformat(), + "end_at": body.end_at.isoformat(), + "suppress_alerts": body.suppress_alerts, + "notes": body.notes, + "created_by": str(current_user.user_id), + "created_at": datetime.utcnow().isoformat(), + } + + +@router.put( + "/tenants/{tenant_id}/maintenance-windows/{window_id}", + summary="Update maintenance window", +) +@limiter.limit("20/minute") +async def update_maintenance_window( + request: Request, + tenant_id: uuid.UUID, + window_id: uuid.UUID, + body: MaintenanceWindowUpdate, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> dict[str, Any]: + await _check_tenant_access(current_user, tenant_id, db) + _require_operator(current_user) + + # Build dynamic SET clause for partial updates + set_parts: list[str] = ["updated_at = NOW()"] + params: dict[str, Any] = {"window_id": str(window_id)} + + if body.name is not None: + set_parts.append("name = :name") + params["name"] = body.name + if body.device_ids is not None: + set_parts.append("device_ids = CAST(:device_ids AS jsonb)") + params["device_ids"] = json.dumps(body.device_ids) + if body.start_at is not None: + set_parts.append("start_at = :start_at") + params["start_at"] = body.start_at + if body.end_at is not None: + set_parts.append("end_at = :end_at") + params["end_at"] = body.end_at + if body.suppress_alerts is not None: + set_parts.append("suppress_alerts = :suppress_alerts") + params["suppress_alerts"] = body.suppress_alerts + if body.notes is not None: + set_parts.append("notes = :notes") + params["notes"] = body.notes + + set_clause = ", ".join(set_parts) + + result = await db.execute( + text(f""" + UPDATE maintenance_windows + SET {set_clause} + WHERE id = CAST(:window_id AS uuid) + RETURNING id, tenant_id, name, device_ids, start_at, end_at, + suppress_alerts, notes, created_by, created_at + """), + params, + ) + row = result.fetchone() + if not row: + raise HTTPException(404, "Maintenance window not found") + await db.commit() + + return { + "id": str(row[0]), + "tenant_id": str(row[1]), + "name": row[2], + "device_ids": row[3] if isinstance(row[3], list) else [], + "start_at": row[4].isoformat() if row[4] else None, + "end_at": row[5].isoformat() if row[5] else None, + "suppress_alerts": row[6], + "notes": row[7], + "created_by": str(row[8]) if row[8] else None, + "created_at": row[9].isoformat() if row[9] else None, + } + + +@router.delete( + "/tenants/{tenant_id}/maintenance-windows/{window_id}", + summary="Delete maintenance window", + status_code=status.HTTP_204_NO_CONTENT, +) +@limiter.limit("5/minute") +async def delete_maintenance_window( + request: Request, + tenant_id: uuid.UUID, + window_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> None: + await _check_tenant_access(current_user, tenant_id, db) + _require_operator(current_user) + + result = await db.execute( + text( + "DELETE FROM maintenance_windows WHERE id = CAST(:id AS uuid) RETURNING id" + ), + {"id": str(window_id)}, + ) + if not result.fetchone(): + raise HTTPException(404, "Maintenance window not found") + await db.commit() diff --git a/backend/app/routers/metrics.py b/backend/app/routers/metrics.py new file mode 100644 index 0000000..92ae3ea --- /dev/null +++ b/backend/app/routers/metrics.py @@ -0,0 +1,414 @@ +""" +Metrics API endpoints for querying TimescaleDB hypertables. + +All device-scoped routes are tenant-scoped under +/api/tenants/{tenant_id}/devices/{device_id}/metrics/*. +Fleet summary endpoints are under /api/tenants/{tenant_id}/fleet/summary +and /api/fleet/summary (super_admin cross-tenant). + +RLS is enforced via get_db() — the app_user engine applies tenant filtering +automatically based on the SET LOCAL app.current_tenant context. + +All endpoints require authentication (get_current_user) and enforce +tenant access via _check_tenant_access. +""" + +import uuid +from datetime import datetime, timedelta +from typing import Any, Optional + +from fastapi import APIRouter, Depends, HTTPException, Query, status +from sqlalchemy import text +from sqlalchemy.ext.asyncio import AsyncSession + +from app.database import get_db +from app.middleware.tenant_context import CurrentUser, get_current_user + +router = APIRouter(tags=["metrics"]) + + +def _bucket_for_range(start: datetime, end: datetime) -> timedelta: + """ + Select an appropriate time_bucket size based on the requested time range. + + Shorter ranges get finer granularity; longer ranges get coarser buckets + to keep result sets manageable. + + Returns a timedelta because asyncpg requires a Python timedelta (not a + string interval literal) when binding the first argument of time_bucket(). + """ + delta = end - start + hours = delta.total_seconds() / 3600 + if hours <= 1: + return timedelta(minutes=1) + elif hours <= 6: + return timedelta(minutes=5) + elif hours <= 24: + return timedelta(minutes=15) + elif hours <= 168: # 7 days + return timedelta(hours=1) + elif hours <= 720: # 30 days + return timedelta(hours=6) + else: + return timedelta(days=1) + + +async def _check_tenant_access( + current_user: CurrentUser, tenant_id: uuid.UUID, db: AsyncSession +) -> None: + """ + Verify the current user is allowed to access the given tenant. + + - super_admin can access any tenant — re-sets DB tenant context to target tenant. + - All other roles must match their own tenant_id. + """ + if current_user.is_super_admin: + # Re-set tenant context to the target tenant so RLS allows the operation + from app.database import set_tenant_context + await set_tenant_context(db, str(tenant_id)) + return + if current_user.tenant_id != tenant_id: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Access denied: you do not belong to this tenant.", + ) + + +# --------------------------------------------------------------------------- +# Health metrics +# --------------------------------------------------------------------------- + + +@router.get( + "/tenants/{tenant_id}/devices/{device_id}/metrics/health", + summary="Time-bucketed health metrics (CPU, memory, disk, temperature)", +) +async def device_health_metrics( + tenant_id: uuid.UUID, + device_id: uuid.UUID, + start: datetime = Query(..., description="Start of time range (ISO format)"), + end: datetime = Query(..., description="End of time range (ISO format)"), + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> list[dict[str, Any]]: + """ + Return time-bucketed CPU, memory, disk, and temperature metrics for a device. + + Bucket size adapts automatically to the requested time range. + """ + await _check_tenant_access(current_user, tenant_id, db) + bucket = _bucket_for_range(start, end) + + result = await db.execute( + text(""" + SELECT + time_bucket(:bucket, time) AS bucket, + avg(cpu_load)::smallint AS avg_cpu, + max(cpu_load)::smallint AS max_cpu, + avg(CASE WHEN total_memory > 0 + THEN round((1 - free_memory::float / total_memory) * 100) + ELSE NULL END)::smallint AS avg_mem_pct, + avg(CASE WHEN total_disk > 0 + THEN round((1 - free_disk::float / total_disk) * 100) + ELSE NULL END)::smallint AS avg_disk_pct, + avg(temperature)::smallint AS avg_temp + FROM health_metrics + WHERE device_id = :device_id + AND time >= :start AND time < :end + GROUP BY bucket + ORDER BY bucket ASC + """), + {"bucket": bucket, "device_id": str(device_id), "start": start, "end": end}, + ) + rows = result.mappings().all() + return [dict(row) for row in rows] + + +# --------------------------------------------------------------------------- +# Interface traffic metrics +# --------------------------------------------------------------------------- + + +@router.get( + "/tenants/{tenant_id}/devices/{device_id}/metrics/interfaces", + summary="Time-bucketed interface bandwidth metrics (bps from cumulative byte deltas)", +) +async def device_interface_metrics( + tenant_id: uuid.UUID, + device_id: uuid.UUID, + start: datetime = Query(..., description="Start of time range (ISO format)"), + end: datetime = Query(..., description="End of time range (ISO format)"), + interface: Optional[str] = Query(None, description="Filter to a specific interface name"), + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> list[dict[str, Any]]: + """ + Return time-bucketed interface traffic metrics for a device. + + Bandwidth (bps) is computed from raw cumulative byte counters using + SQL LAG() window functions — no poller-side state is required. + Counter wraps (rx_bytes < prev_rx) are treated as NULL to avoid + incorrect spikes. + """ + await _check_tenant_access(current_user, tenant_id, db) + bucket = _bucket_for_range(start, end) + + # Build interface filter clause conditionally. + # The interface name is passed as a bind parameter — never interpolated + # into the SQL string — so this is safe from SQL injection. + interface_filter = "AND interface = :interface" if interface else "" + + sql = f""" + WITH ordered AS ( + SELECT + time, + interface, + rx_bytes, + tx_bytes, + LAG(rx_bytes) OVER (PARTITION BY interface ORDER BY time) AS prev_rx, + LAG(tx_bytes) OVER (PARTITION BY interface ORDER BY time) AS prev_tx, + EXTRACT(EPOCH FROM time - LAG(time) OVER (PARTITION BY interface ORDER BY time)) AS dt + FROM interface_metrics + WHERE device_id = :device_id + AND time >= :start AND time < :end + {interface_filter} + ), + with_bps AS ( + SELECT + time, + interface, + rx_bytes, + tx_bytes, + CASE WHEN rx_bytes >= prev_rx AND dt > 0 + THEN ((rx_bytes - prev_rx) * 8 / dt)::bigint + ELSE NULL END AS rx_bps, + CASE WHEN tx_bytes >= prev_tx AND dt > 0 + THEN ((tx_bytes - prev_tx) * 8 / dt)::bigint + ELSE NULL END AS tx_bps + FROM ordered + WHERE prev_rx IS NOT NULL + ) + SELECT + time_bucket(:bucket, time) AS bucket, + interface, + avg(rx_bps)::bigint AS avg_rx_bps, + avg(tx_bps)::bigint AS avg_tx_bps, + max(rx_bps)::bigint AS max_rx_bps, + max(tx_bps)::bigint AS max_tx_bps + FROM with_bps + WHERE rx_bps IS NOT NULL + GROUP BY bucket, interface + ORDER BY interface, bucket ASC + """ + + params: dict[str, Any] = { + "bucket": bucket, + "device_id": str(device_id), + "start": start, + "end": end, + } + if interface: + params["interface"] = interface + + result = await db.execute(text(sql), params) + rows = result.mappings().all() + return [dict(row) for row in rows] + + +@router.get( + "/tenants/{tenant_id}/devices/{device_id}/metrics/interfaces/list", + summary="List distinct interface names for a device", +) +async def device_interface_list( + tenant_id: uuid.UUID, + device_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> list[str]: + """Return distinct interface names seen in interface_metrics for a device.""" + await _check_tenant_access(current_user, tenant_id, db) + + result = await db.execute( + text(""" + SELECT DISTINCT interface + FROM interface_metrics + WHERE device_id = :device_id + ORDER BY interface + """), + {"device_id": str(device_id)}, + ) + rows = result.scalars().all() + return list(rows) + + +# --------------------------------------------------------------------------- +# Wireless metrics +# --------------------------------------------------------------------------- + + +@router.get( + "/tenants/{tenant_id}/devices/{device_id}/metrics/wireless", + summary="Time-bucketed wireless metrics (clients, signal, CCQ)", +) +async def device_wireless_metrics( + tenant_id: uuid.UUID, + device_id: uuid.UUID, + start: datetime = Query(..., description="Start of time range (ISO format)"), + end: datetime = Query(..., description="End of time range (ISO format)"), + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> list[dict[str, Any]]: + """Return time-bucketed wireless metrics per interface for a device.""" + await _check_tenant_access(current_user, tenant_id, db) + bucket = _bucket_for_range(start, end) + + result = await db.execute( + text(""" + SELECT + time_bucket(:bucket, time) AS bucket, + interface, + avg(client_count)::smallint AS avg_clients, + max(client_count)::smallint AS max_clients, + avg(avg_signal)::smallint AS avg_signal, + avg(ccq)::smallint AS avg_ccq, + max(frequency) AS frequency + FROM wireless_metrics + WHERE device_id = :device_id + AND time >= :start AND time < :end + GROUP BY bucket, interface + ORDER BY interface, bucket ASC + """), + {"bucket": bucket, "device_id": str(device_id), "start": start, "end": end}, + ) + rows = result.mappings().all() + return [dict(row) for row in rows] + + +@router.get( + "/tenants/{tenant_id}/devices/{device_id}/metrics/wireless/latest", + summary="Latest wireless stats per interface (not time-bucketed)", +) +async def device_wireless_latest( + tenant_id: uuid.UUID, + device_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> list[dict[str, Any]]: + """Return the most recent wireless reading per interface for a device.""" + await _check_tenant_access(current_user, tenant_id, db) + + result = await db.execute( + text(""" + SELECT DISTINCT ON (interface) + interface, client_count, avg_signal, ccq, frequency, time + FROM wireless_metrics + WHERE device_id = :device_id + ORDER BY interface, time DESC + """), + {"device_id": str(device_id)}, + ) + rows = result.mappings().all() + return [dict(row) for row in rows] + + +# --------------------------------------------------------------------------- +# Sparkline +# --------------------------------------------------------------------------- + + +@router.get( + "/tenants/{tenant_id}/devices/{device_id}/metrics/sparkline", + summary="Last 12 health readings for sparkline display", +) +async def device_sparkline( + tenant_id: uuid.UUID, + device_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> list[dict[str, Any]]: + """ + Return the last 12 CPU readings (in chronological order) for sparkline + display in the fleet table. + """ + await _check_tenant_access(current_user, tenant_id, db) + + result = await db.execute( + text(""" + SELECT cpu_load, time + FROM ( + SELECT cpu_load, time + FROM health_metrics + WHERE device_id = :device_id + ORDER BY time DESC + LIMIT 12 + ) sub + ORDER BY time ASC + """), + {"device_id": str(device_id)}, + ) + rows = result.mappings().all() + return [dict(row) for row in rows] + + +# --------------------------------------------------------------------------- +# Fleet summary +# --------------------------------------------------------------------------- + +_FLEET_SUMMARY_SQL = """ + SELECT + d.id, d.hostname, d.ip_address, d.status, d.model, d.last_seen, + d.uptime_seconds, d.last_cpu_load, d.last_memory_used_pct, + d.latitude, d.longitude, + d.tenant_id, t.name AS tenant_name + FROM devices d + JOIN tenants t ON d.tenant_id = t.id + ORDER BY t.name, d.hostname +""" + + +@router.get( + "/tenants/{tenant_id}/fleet/summary", + summary="Fleet summary for a tenant (latest metrics per device)", +) +async def fleet_summary( + tenant_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> list[dict[str, Any]]: + """ + Return fleet summary for a single tenant. + + Queries the devices table (not hypertables) for speed. + RLS filters to only devices belonging to the tenant automatically. + """ + await _check_tenant_access(current_user, tenant_id, db) + + result = await db.execute(text(_FLEET_SUMMARY_SQL)) + rows = result.mappings().all() + return [dict(row) for row in rows] + + +@router.get( + "/fleet/summary", + summary="Cross-tenant fleet summary (super_admin only)", +) +async def fleet_summary_all( + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> list[dict[str, Any]]: + """ + Return fleet summary across ALL tenants. + + Requires super_admin role. The RLS policy for super_admin returns all + rows across all tenants, so the same SQL query works without modification. + This avoids the N+1 problem of fetching per-tenant summaries in a loop. + """ + if current_user.role != "super_admin": + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Super admin required", + ) + + result = await db.execute(text(_FLEET_SUMMARY_SQL)) + rows = result.mappings().all() + return [dict(row) for row in rows] diff --git a/backend/app/routers/reports.py b/backend/app/routers/reports.py new file mode 100644 index 0000000..e9bf72a --- /dev/null +++ b/backend/app/routers/reports.py @@ -0,0 +1,146 @@ +"""Report generation API endpoint. + +POST /api/tenants/{tenant_id}/reports/generate +Generates PDF or CSV reports for device inventory, metrics summary, +alert history, and change log. + +RLS enforced via get_db() (app_user engine with tenant context). +RBAC: require at least operator role. +""" + +import uuid +from datetime import datetime +from enum import Enum +from typing import Optional + +import structlog +from fastapi import APIRouter, Depends, HTTPException, status +from fastapi.responses import StreamingResponse +from pydantic import BaseModel, ConfigDict +from sqlalchemy.ext.asyncio import AsyncSession + +from app.database import get_db, set_tenant_context +from app.middleware.tenant_context import CurrentUser, get_current_user +from app.services.report_service import generate_report + +logger = structlog.get_logger(__name__) + +router = APIRouter(tags=["reports"]) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +async def _check_tenant_access( + current_user: CurrentUser, tenant_id: uuid.UUID, db: AsyncSession +) -> None: + """Verify the current user is allowed to access the given tenant.""" + if current_user.is_super_admin: + await set_tenant_context(db, str(tenant_id)) + elif current_user.tenant_id != tenant_id: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Access denied to this tenant", + ) + + +def _require_operator(current_user: CurrentUser) -> None: + """Raise 403 if user is a viewer (reports require operator+).""" + if current_user.role == "viewer": + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Reports require at least operator role.", + ) + + +# --------------------------------------------------------------------------- +# Request schema +# --------------------------------------------------------------------------- + + +class ReportType(str, Enum): + device_inventory = "device_inventory" + metrics_summary = "metrics_summary" + alert_history = "alert_history" + change_log = "change_log" + + +class ReportFormat(str, Enum): + pdf = "pdf" + csv = "csv" + + +class ReportRequest(BaseModel): + model_config = ConfigDict(extra="forbid") + + type: ReportType + date_from: Optional[datetime] = None + date_to: Optional[datetime] = None + format: ReportFormat = ReportFormat.pdf + + +# --------------------------------------------------------------------------- +# Endpoint +# --------------------------------------------------------------------------- + + +@router.post( + "/tenants/{tenant_id}/reports/generate", + summary="Generate a report (PDF or CSV)", + response_class=StreamingResponse, +) +async def generate_report_endpoint( + tenant_id: uuid.UUID, + body: ReportRequest, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> StreamingResponse: + """Generate and download a report as PDF or CSV. + + - device_inventory: no date range required + - metrics_summary, alert_history, change_log: date_from and date_to required + """ + await _check_tenant_access(current_user, tenant_id, db) + _require_operator(current_user) + + # Validate date range for time-based reports + if body.type != ReportType.device_inventory: + if not body.date_from or not body.date_to: + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail=f"date_from and date_to are required for {body.type.value} reports.", + ) + if body.date_from > body.date_to: + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail="date_from must be before date_to.", + ) + + try: + file_bytes, content_type, filename = await generate_report( + db=db, + tenant_id=tenant_id, + report_type=body.type.value, + date_from=body.date_from, + date_to=body.date_to, + fmt=body.format.value, + ) + except Exception as exc: + logger.error("report_generation_failed", error=str(exc), report_type=body.type.value) + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Report generation failed: {str(exc)}", + ) + + import io + + return StreamingResponse( + io.BytesIO(file_bytes), + media_type=content_type, + headers={ + "Content-Disposition": f'attachment; filename="{filename}"', + "Content-Length": str(len(file_bytes)), + }, + ) diff --git a/backend/app/routers/settings.py b/backend/app/routers/settings.py new file mode 100644 index 0000000..0bb6185 --- /dev/null +++ b/backend/app/routers/settings.py @@ -0,0 +1,155 @@ +"""System settings router — global SMTP configuration. + +Super-admin only. Stores SMTP settings in system_settings table with +Transit encryption for passwords. Falls back to .env values. +""" + +import logging +from typing import Optional + +from fastapi import APIRouter, Depends +from pydantic import BaseModel +from sqlalchemy import text + +from app.config import settings +from app.database import AdminAsyncSessionLocal +from app.middleware.rbac import require_role +from app.services.email_service import SMTPConfig, send_test_email, test_smtp_connection + +logger = logging.getLogger(__name__) +router = APIRouter(prefix="/settings", tags=["settings"]) + +SMTP_KEYS = [ + "smtp_host", + "smtp_port", + "smtp_user", + "smtp_password", + "smtp_use_tls", + "smtp_from_address", + "smtp_provider", +] + + +class SMTPSettingsUpdate(BaseModel): + smtp_host: str + smtp_port: int = 587 + smtp_user: Optional[str] = None + smtp_password: Optional[str] = None + smtp_use_tls: bool = False + smtp_from_address: str = "noreply@example.com" + smtp_provider: str = "custom" + + +class SMTPTestRequest(BaseModel): + to: str + smtp_host: Optional[str] = None + smtp_port: Optional[int] = None + smtp_user: Optional[str] = None + smtp_password: Optional[str] = None + smtp_use_tls: Optional[bool] = None + smtp_from_address: Optional[str] = None + + +async def _get_system_settings(keys: list[str]) -> dict: + """Read settings from system_settings table.""" + async with AdminAsyncSessionLocal() as session: + result = await session.execute( + text("SELECT key, value FROM system_settings WHERE key = ANY(:keys)"), + {"keys": keys}, + ) + return {row[0]: row[1] for row in result.fetchall()} + + +async def _set_system_settings(updates: dict, user_id: str) -> None: + """Upsert settings into system_settings table.""" + async with AdminAsyncSessionLocal() as session: + for key, value in updates.items(): + await session.execute( + text(""" + INSERT INTO system_settings (key, value, updated_by, updated_at) + VALUES (:key, :value, CAST(:user_id AS uuid), now()) + ON CONFLICT (key) DO UPDATE + SET value = :value, updated_by = CAST(:user_id AS uuid), updated_at = now() + """), + {"key": key, "value": str(value) if value is not None else None, "user_id": user_id}, + ) + await session.commit() + + +async def get_smtp_config() -> SMTPConfig: + """Get SMTP config from system_settings, falling back to .env.""" + db_settings = await _get_system_settings(SMTP_KEYS) + + return SMTPConfig( + host=db_settings.get("smtp_host") or settings.SMTP_HOST, + port=int(db_settings.get("smtp_port") or settings.SMTP_PORT), + user=db_settings.get("smtp_user") or settings.SMTP_USER, + password=db_settings.get("smtp_password") or settings.SMTP_PASSWORD, + use_tls=(db_settings.get("smtp_use_tls") or str(settings.SMTP_USE_TLS)).lower() == "true", + from_address=db_settings.get("smtp_from_address") or settings.SMTP_FROM_ADDRESS, + ) + + +@router.get("/smtp") +async def get_smtp_settings(user=Depends(require_role("super_admin"))): + """Get current global SMTP configuration. Password is redacted.""" + db_settings = await _get_system_settings(SMTP_KEYS) + + return { + "smtp_host": db_settings.get("smtp_host") or settings.SMTP_HOST, + "smtp_port": int(db_settings.get("smtp_port") or settings.SMTP_PORT), + "smtp_user": db_settings.get("smtp_user") or settings.SMTP_USER or "", + "smtp_use_tls": (db_settings.get("smtp_use_tls") or str(settings.SMTP_USE_TLS)).lower() == "true", + "smtp_from_address": db_settings.get("smtp_from_address") or settings.SMTP_FROM_ADDRESS, + "smtp_provider": db_settings.get("smtp_provider") or "custom", + "smtp_password_set": bool(db_settings.get("smtp_password") or settings.SMTP_PASSWORD), + "source": "database" if db_settings.get("smtp_host") else "environment", + } + + +@router.put("/smtp") +async def update_smtp_settings( + data: SMTPSettingsUpdate, + user=Depends(require_role("super_admin")), +): + """Update global SMTP configuration.""" + updates = { + "smtp_host": data.smtp_host, + "smtp_port": str(data.smtp_port), + "smtp_user": data.smtp_user, + "smtp_use_tls": str(data.smtp_use_tls).lower(), + "smtp_from_address": data.smtp_from_address, + "smtp_provider": data.smtp_provider, + } + if data.smtp_password is not None: + updates["smtp_password"] = data.smtp_password + + await _set_system_settings(updates, str(user.id)) + return {"status": "ok"} + + +@router.post("/smtp/test") +async def test_smtp( + data: SMTPTestRequest, + user=Depends(require_role("super_admin")), +): + """Test SMTP connection and optionally send a test email.""" + # Use provided values or fall back to saved config + saved = await get_smtp_config() + config = SMTPConfig( + host=data.smtp_host or saved.host, + port=data.smtp_port if data.smtp_port is not None else saved.port, + user=data.smtp_user if data.smtp_user is not None else saved.user, + password=data.smtp_password if data.smtp_password is not None else saved.password, + use_tls=data.smtp_use_tls if data.smtp_use_tls is not None else saved.use_tls, + from_address=data.smtp_from_address or saved.from_address, + ) + + conn_result = await test_smtp_connection(config) + if not conn_result["success"]: + return conn_result + + if data.to: + return await send_test_email(data.to, config) + + return conn_result diff --git a/backend/app/routers/sse.py b/backend/app/routers/sse.py new file mode 100644 index 0000000..8ea9ad6 --- /dev/null +++ b/backend/app/routers/sse.py @@ -0,0 +1,141 @@ +"""SSE streaming endpoint for real-time event delivery. + +Provides a Server-Sent Events endpoint per tenant that streams device status, +alert, config push, and firmware progress events in real time. Authentication +is via a short-lived, single-use exchange token (obtained from POST /auth/sse-token) +to avoid exposing the full JWT in query parameters. +""" + +import asyncio +import json +import uuid +from typing import AsyncGenerator, Optional + +import redis.asyncio as aioredis +import structlog +from fastapi import APIRouter, HTTPException, Query, Request, status +from sse_starlette.sse import EventSourceResponse, ServerSentEvent + +from app.services.sse_manager import SSEConnectionManager + +logger = structlog.get_logger(__name__) + +router = APIRouter(tags=["sse"]) + +# ─── Redis for SSE token validation ─────────────────────────────────────────── + +_redis: aioredis.Redis | None = None + + +async def _get_sse_redis() -> aioredis.Redis: + """Lazily initialise and return the SSE Redis client.""" + global _redis + if _redis is None: + from app.config import settings + _redis = aioredis.from_url(settings.REDIS_URL, decode_responses=True) + return _redis + + +async def _validate_sse_token(token: str) -> dict: + """Validate a short-lived SSE exchange token via Redis. + + The token is single-use: retrieved and deleted atomically with GETDEL. + If the token is not found (expired or already used), raises 401. + + Args: + token: SSE exchange token string (from query param). + + Returns: + Dict with user_id, tenant_id, and role. + + Raises: + HTTPException 401: If the token is invalid, expired, or already used. + """ + redis = await _get_sse_redis() + key = f"sse_token:{token}" + data = await redis.getdel(key) # Single-use: delete on retrieval + if not data: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid or expired SSE token", + ) + return json.loads(data) + + +@router.get( + "/tenants/{tenant_id}/events/stream", + summary="SSE event stream for real-time tenant events", + response_class=EventSourceResponse, +) +async def event_stream( + request: Request, + tenant_id: uuid.UUID, + token: str = Query(..., description="Short-lived SSE exchange token (from POST /auth/sse-token)"), +) -> EventSourceResponse: + """Stream real-time events for a tenant via Server-Sent Events. + + Event types: device_status, alert_fired, alert_resolved, config_push, + firmware_progress, metric_update. + + Supports Last-Event-ID header for reconnection replay. + Sends heartbeat comments every 15 seconds on idle connections. + """ + # Validate exchange token from query parameter (single-use, 30s TTL) + user_context = await _validate_sse_token(token) + user_role = user_context.get("role", "") + user_tenant_id = user_context.get("tenant_id") + user_id = user_context.get("user_id", "") + + # Authorization: user must belong to the requested tenant or be super_admin + if user_role != "super_admin" and (user_tenant_id is None or str(user_tenant_id) != str(tenant_id)): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Not authorized for this tenant", + ) + + # super_admin receives events from ALL tenants (tenant_id filter = None) + filter_tenant_id: Optional[str] = None if user_role == "super_admin" else str(tenant_id) + + # Generate unique connection ID + connection_id = f"sse-{uuid.uuid4().hex[:12]}" + + # Check for Last-Event-ID header (reconnection replay) + last_event_id = request.headers.get("Last-Event-ID") + + logger.info( + "sse.stream_requested", + connection_id=connection_id, + tenant_id=str(tenant_id), + user_id=user_id, + role=user_role, + last_event_id=last_event_id, + ) + + manager = SSEConnectionManager() + queue = await manager.connect( + connection_id=connection_id, + tenant_id=filter_tenant_id, + last_event_id=last_event_id, + ) + + async def event_generator() -> AsyncGenerator[ServerSentEvent, None]: + """Yield SSE events from the queue with 15s heartbeat on idle.""" + try: + while True: + try: + event = await asyncio.wait_for(queue.get(), timeout=15.0) + yield ServerSentEvent( + data=event["data"], + event=event["event"], + id=event["id"], + ) + except asyncio.TimeoutError: + # Send heartbeat comment to keep connection alive + yield ServerSentEvent(comment="heartbeat") + except asyncio.CancelledError: + break + finally: + await manager.disconnect() + logger.info("sse.stream_closed", connection_id=connection_id) + + return EventSourceResponse(event_generator()) diff --git a/backend/app/routers/templates.py b/backend/app/routers/templates.py new file mode 100644 index 0000000..eb56267 --- /dev/null +++ b/backend/app/routers/templates.py @@ -0,0 +1,613 @@ +""" +Config template CRUD, preview, and push API endpoints. + +All routes are tenant-scoped under: + /api/tenants/{tenant_id}/templates/ + +Provides: + - GET /templates -- list templates (optional tag filter) + - POST /templates -- create a template + - GET /templates/{id} -- get single template + - PUT /templates/{id} -- update a template + - DELETE /templates/{id} -- delete a template + - POST /templates/{id}/preview -- preview rendered template for a device + - POST /templates/{id}/push -- push template to devices (sequential rollout) + - GET /templates/push-status/{rollout_id} -- poll push progress + +RLS is enforced via get_db() (app_user engine with tenant context). +RBAC: viewer = read (GET/preview); operator and above = write (POST/PUT/DELETE/push). +""" + +import asyncio +import logging +import uuid +from datetime import datetime, timezone +from typing import Any, Optional + +from fastapi import APIRouter, Depends, HTTPException, Query, Request, status +from pydantic import BaseModel, ConfigDict +from sqlalchemy import delete, select +from sqlalchemy.ext.asyncio import AsyncSession +from sqlalchemy.orm import selectinload + +from app.database import get_db +from app.middleware.rate_limit import limiter +from app.middleware.rbac import require_min_role, require_scope +from app.middleware.tenant_context import CurrentUser, get_current_user +from app.models.config_template import ConfigTemplate, ConfigTemplateTag, TemplatePushJob +from app.models.device import Device +from app.services import template_service + +logger = logging.getLogger(__name__) + +router = APIRouter(tags=["templates"]) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +async def _check_tenant_access( + current_user: CurrentUser, tenant_id: uuid.UUID, db: AsyncSession +) -> None: + """Verify the current user is allowed to access the given tenant.""" + if current_user.is_super_admin: + from app.database import set_tenant_context + await set_tenant_context(db, str(tenant_id)) + return + if current_user.tenant_id != tenant_id: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Access denied: you do not belong to this tenant.", + ) + + +def _serialize_template(template: ConfigTemplate, include_content: bool = False) -> dict: + """Serialize a ConfigTemplate to a response dict.""" + result: dict[str, Any] = { + "id": str(template.id), + "name": template.name, + "description": template.description, + "tags": [tag.name for tag in template.tags], + "variable_count": len(template.variables) if template.variables else 0, + "created_at": template.created_at.isoformat(), + "updated_at": template.updated_at.isoformat(), + } + if include_content: + result["content"] = template.content + result["variables"] = template.variables or [] + return result + + +# --------------------------------------------------------------------------- +# Request/Response schemas +# --------------------------------------------------------------------------- + + +class VariableDef(BaseModel): + model_config = ConfigDict(extra="forbid") + name: str + type: str = "string" # string | ip | integer | boolean | subnet + default: Optional[str] = None + description: Optional[str] = None + + +class TemplateCreateRequest(BaseModel): + model_config = ConfigDict(extra="forbid") + name: str + description: Optional[str] = None + content: str + variables: list[VariableDef] = [] + tags: list[str] = [] + + +class TemplateUpdateRequest(BaseModel): + model_config = ConfigDict(extra="forbid") + name: str + description: Optional[str] = None + content: str + variables: list[VariableDef] = [] + tags: list[str] = [] + + +class PreviewRequest(BaseModel): + model_config = ConfigDict(extra="forbid") + device_id: str + variables: dict[str, str] = {} + + +class PushRequest(BaseModel): + model_config = ConfigDict(extra="forbid") + device_ids: list[str] + variables: dict[str, str] = {} + + +# --------------------------------------------------------------------------- +# CRUD endpoints +# --------------------------------------------------------------------------- + + +@router.get( + "/tenants/{tenant_id}/templates", + summary="List config templates", + dependencies=[require_scope("config:read")], +) +async def list_templates( + tenant_id: uuid.UUID, + tag: Optional[str] = Query(None, description="Filter by tag name"), + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("viewer")), + db: AsyncSession = Depends(get_db), +) -> list[dict]: + """List all config templates for a tenant with optional tag filtering.""" + await _check_tenant_access(current_user, tenant_id, db) + + query = ( + select(ConfigTemplate) + .options(selectinload(ConfigTemplate.tags)) + .where(ConfigTemplate.tenant_id == tenant_id) # type: ignore[arg-type] + .order_by(ConfigTemplate.updated_at.desc()) + ) + + if tag: + query = query.where( + ConfigTemplate.id.in_( # type: ignore[attr-defined] + select(ConfigTemplateTag.template_id).where( + ConfigTemplateTag.name == tag, + ConfigTemplateTag.tenant_id == tenant_id, # type: ignore[arg-type] + ) + ) + ) + + result = await db.execute(query) + templates = result.scalars().all() + + return [_serialize_template(t) for t in templates] + + +@router.post( + "/tenants/{tenant_id}/templates", + summary="Create a config template", + status_code=status.HTTP_201_CREATED, + dependencies=[require_scope("config:write")], +) +@limiter.limit("20/minute") +async def create_template( + request: Request, + tenant_id: uuid.UUID, + body: TemplateCreateRequest, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("operator")), + db: AsyncSession = Depends(get_db), +) -> dict: + """Create a new config template with Jinja2 content and variable definitions.""" + await _check_tenant_access(current_user, tenant_id, db) + + # Auto-extract variables from content for comparison + detected = template_service.extract_variables(body.content) + provided_names = {v.name for v in body.variables} + unmatched = set(detected) - provided_names + if unmatched: + logger.warning( + "Template '%s' has undeclared variables: %s (auto-adding as string type)", + body.name, unmatched, + ) + + # Create template + template = ConfigTemplate( + tenant_id=tenant_id, + name=body.name, + description=body.description, + content=body.content, + variables=[v.model_dump() for v in body.variables], + ) + db.add(template) + await db.flush() # Get the generated ID + + # Create tags + for tag_name in body.tags: + tag = ConfigTemplateTag( + tenant_id=tenant_id, + name=tag_name, + template_id=template.id, + ) + db.add(tag) + + await db.flush() + + # Re-query with tags loaded + result = await db.execute( + select(ConfigTemplate) + .options(selectinload(ConfigTemplate.tags)) + .where(ConfigTemplate.id == template.id) # type: ignore[arg-type] + ) + template = result.scalar_one() + + return _serialize_template(template, include_content=True) + + +@router.get( + "/tenants/{tenant_id}/templates/{template_id}", + summary="Get a single config template", + dependencies=[require_scope("config:read")], +) +async def get_template( + tenant_id: uuid.UUID, + template_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("viewer")), + db: AsyncSession = Depends(get_db), +) -> dict: + """Get a config template with full content, variables, and tags.""" + await _check_tenant_access(current_user, tenant_id, db) + + result = await db.execute( + select(ConfigTemplate) + .options(selectinload(ConfigTemplate.tags)) + .where( + ConfigTemplate.id == template_id, # type: ignore[arg-type] + ConfigTemplate.tenant_id == tenant_id, # type: ignore[arg-type] + ) + ) + template = result.scalar_one_or_none() + + if template is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Template {template_id} not found", + ) + + return _serialize_template(template, include_content=True) + + +@router.put( + "/tenants/{tenant_id}/templates/{template_id}", + summary="Update a config template", + dependencies=[require_scope("config:write")], +) +@limiter.limit("20/minute") +async def update_template( + request: Request, + tenant_id: uuid.UUID, + template_id: uuid.UUID, + body: TemplateUpdateRequest, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("operator")), + db: AsyncSession = Depends(get_db), +) -> dict: + """Update an existing config template.""" + await _check_tenant_access(current_user, tenant_id, db) + + result = await db.execute( + select(ConfigTemplate) + .options(selectinload(ConfigTemplate.tags)) + .where( + ConfigTemplate.id == template_id, # type: ignore[arg-type] + ConfigTemplate.tenant_id == tenant_id, # type: ignore[arg-type] + ) + ) + template = result.scalar_one_or_none() + + if template is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Template {template_id} not found", + ) + + # Update fields + template.name = body.name + template.description = body.description + template.content = body.content + template.variables = [v.model_dump() for v in body.variables] + + # Replace tags: delete old, create new + await db.execute( + delete(ConfigTemplateTag).where( + ConfigTemplateTag.template_id == template_id # type: ignore[arg-type] + ) + ) + for tag_name in body.tags: + tag = ConfigTemplateTag( + tenant_id=tenant_id, + name=tag_name, + template_id=template.id, + ) + db.add(tag) + + await db.flush() + + # Re-query with fresh tags + result = await db.execute( + select(ConfigTemplate) + .options(selectinload(ConfigTemplate.tags)) + .where(ConfigTemplate.id == template.id) # type: ignore[arg-type] + ) + template = result.scalar_one() + + return _serialize_template(template, include_content=True) + + +@router.delete( + "/tenants/{tenant_id}/templates/{template_id}", + status_code=status.HTTP_204_NO_CONTENT, + summary="Delete a config template", + dependencies=[require_scope("config:write")], +) +@limiter.limit("5/minute") +async def delete_template( + request: Request, + tenant_id: uuid.UUID, + template_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("operator")), + db: AsyncSession = Depends(get_db), +) -> None: + """Delete a config template. Tags are cascade-deleted. Push jobs are SET NULL.""" + await _check_tenant_access(current_user, tenant_id, db) + + result = await db.execute( + select(ConfigTemplate).where( + ConfigTemplate.id == template_id, # type: ignore[arg-type] + ConfigTemplate.tenant_id == tenant_id, # type: ignore[arg-type] + ) + ) + template = result.scalar_one_or_none() + + if template is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Template {template_id} not found", + ) + + await db.delete(template) + + +# --------------------------------------------------------------------------- +# Preview & Push endpoints +# --------------------------------------------------------------------------- + + +@router.post( + "/tenants/{tenant_id}/templates/{template_id}/preview", + summary="Preview template rendered for a specific device", + dependencies=[require_scope("config:read")], +) +async def preview_template( + tenant_id: uuid.UUID, + template_id: uuid.UUID, + body: PreviewRequest, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("viewer")), + db: AsyncSession = Depends(get_db), +) -> dict: + """Render a template with device context and custom variables for preview.""" + await _check_tenant_access(current_user, tenant_id, db) + + # Load template + result = await db.execute( + select(ConfigTemplate).where( + ConfigTemplate.id == template_id, # type: ignore[arg-type] + ConfigTemplate.tenant_id == tenant_id, # type: ignore[arg-type] + ) + ) + template = result.scalar_one_or_none() + if template is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Template {template_id} not found", + ) + + # Load device + result = await db.execute( + select(Device).where(Device.id == body.device_id) # type: ignore[arg-type] + ) + device = result.scalar_one_or_none() + if device is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Device {body.device_id} not found", + ) + + # Validate variables against type definitions + if template.variables: + for var_def in template.variables: + var_name = var_def.get("name", "") + var_type = var_def.get("type", "string") + value = body.variables.get(var_name) + if value is None: + # Use default if available + default = var_def.get("default") + if default is not None: + body.variables[var_name] = default + continue + error = template_service.validate_variable(var_name, value, var_type) + if error: + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail=error, + ) + + # Render + try: + rendered = template_service.render_template( + template.content, + { + "hostname": device.hostname, + "ip_address": device.ip_address, + "model": device.model, + }, + body.variables, + ) + except Exception as exc: + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail=f"Template rendering failed: {exc}", + ) + + return { + "rendered": rendered, + "device_hostname": device.hostname, + } + + +@router.post( + "/tenants/{tenant_id}/templates/{template_id}/push", + summary="Push template to devices (sequential rollout with panic-revert)", + dependencies=[require_scope("config:write")], +) +@limiter.limit("5/minute") +async def push_template( + request: Request, + tenant_id: uuid.UUID, + template_id: uuid.UUID, + body: PushRequest, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("operator")), + db: AsyncSession = Depends(get_db), +) -> dict: + """Start a template push to one or more devices. + + Creates push jobs for each device and starts a background sequential rollout. + Returns the rollout_id for status polling. + """ + await _check_tenant_access(current_user, tenant_id, db) + + # Load template + result = await db.execute( + select(ConfigTemplate).where( + ConfigTemplate.id == template_id, # type: ignore[arg-type] + ConfigTemplate.tenant_id == tenant_id, # type: ignore[arg-type] + ) + ) + template = result.scalar_one_or_none() + if template is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Template {template_id} not found", + ) + + if not body.device_ids: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="At least one device_id is required", + ) + + # Validate variables + if template.variables: + for var_def in template.variables: + var_name = var_def.get("name", "") + var_type = var_def.get("type", "string") + value = body.variables.get(var_name) + if value is None: + default = var_def.get("default") + if default is not None: + body.variables[var_name] = default + continue + error = template_service.validate_variable(var_name, value, var_type) + if error: + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail=error, + ) + + rollout_id = uuid.uuid4() + jobs_created = [] + + for device_id_str in body.device_ids: + # Load device to render template per-device + result = await db.execute( + select(Device).where(Device.id == device_id_str) # type: ignore[arg-type] + ) + device = result.scalar_one_or_none() + if device is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Device {device_id_str} not found", + ) + + # Render template with this device's context + try: + rendered = template_service.render_template( + template.content, + { + "hostname": device.hostname, + "ip_address": device.ip_address, + "model": device.model, + }, + body.variables, + ) + except Exception as exc: + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail=f"Template rendering failed for device {device.hostname}: {exc}", + ) + + # Create push job + job = TemplatePushJob( + tenant_id=tenant_id, + template_id=template_id, + device_id=device.id, + rollout_id=rollout_id, + rendered_content=rendered, + status="pending", + ) + db.add(job) + jobs_created.append({ + "job_id": str(job.id), + "device_id": str(device.id), + "device_hostname": device.hostname, + }) + + await db.flush() + + # Start background push task + asyncio.create_task(template_service.push_to_devices(str(rollout_id))) + + return { + "rollout_id": str(rollout_id), + "jobs": jobs_created, + } + + +@router.get( + "/tenants/{tenant_id}/templates/push-status/{rollout_id}", + summary="Poll push progress for a rollout", + dependencies=[require_scope("config:read")], +) +async def push_status( + tenant_id: uuid.UUID, + rollout_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("viewer")), + db: AsyncSession = Depends(get_db), +) -> dict: + """Return all push job statuses for a rollout with device hostnames.""" + await _check_tenant_access(current_user, tenant_id, db) + + result = await db.execute( + select(TemplatePushJob, Device.hostname) + .join(Device, TemplatePushJob.device_id == Device.id) # type: ignore[arg-type] + .where( + TemplatePushJob.rollout_id == rollout_id, # type: ignore[arg-type] + TemplatePushJob.tenant_id == tenant_id, # type: ignore[arg-type] + ) + .order_by(TemplatePushJob.created_at.asc()) + ) + rows = result.all() + + jobs = [] + for job, hostname in rows: + jobs.append({ + "device_id": str(job.device_id), + "hostname": hostname, + "status": job.status, + "error_message": job.error_message, + "started_at": job.started_at.isoformat() if job.started_at else None, + "completed_at": job.completed_at.isoformat() if job.completed_at else None, + }) + + return { + "rollout_id": str(rollout_id), + "jobs": jobs, + } diff --git a/backend/app/routers/tenants.py b/backend/app/routers/tenants.py new file mode 100644 index 0000000..f868779 --- /dev/null +++ b/backend/app/routers/tenants.py @@ -0,0 +1,367 @@ +""" +Tenant management endpoints. + +GET /api/tenants — list tenants (super_admin: all; tenant_admin: own only) +POST /api/tenants — create tenant (super_admin only) +GET /api/tenants/{id} — get tenant detail +PUT /api/tenants/{id} — update tenant (super_admin only) +DELETE /api/tenants/{id} — delete tenant (super_admin only) +""" + +import uuid +from typing import Optional + +from fastapi import APIRouter, Depends, HTTPException, Request, status +from sqlalchemy import func, select, text +from sqlalchemy.ext.asyncio import AsyncSession + +from app.middleware.rate_limit import limiter + +from app.database import get_admin_db, get_db +from app.middleware.rbac import require_super_admin, require_tenant_admin_or_above +from app.middleware.tenant_context import CurrentUser +from app.models.device import Device +from app.models.tenant import Tenant +from app.models.user import User +from app.schemas.tenant import TenantCreate, TenantResponse, TenantUpdate + +router = APIRouter(prefix="/tenants", tags=["tenants"]) + + +async def _get_tenant_response( + tenant: Tenant, + db: AsyncSession, +) -> TenantResponse: + """Build a TenantResponse with user and device counts.""" + user_count_result = await db.execute( + select(func.count(User.id)).where(User.tenant_id == tenant.id) + ) + user_count = user_count_result.scalar_one() or 0 + + device_count_result = await db.execute( + select(func.count(Device.id)).where(Device.tenant_id == tenant.id) + ) + device_count = device_count_result.scalar_one() or 0 + + return TenantResponse( + id=tenant.id, + name=tenant.name, + description=tenant.description, + contact_email=tenant.contact_email, + user_count=user_count, + device_count=device_count, + created_at=tenant.created_at, + ) + + +@router.get("", response_model=list[TenantResponse], summary="List tenants") +async def list_tenants( + current_user: CurrentUser = Depends(require_tenant_admin_or_above), + db: AsyncSession = Depends(get_admin_db), +) -> list[TenantResponse]: + """ + List tenants. + - super_admin: sees all tenants + - tenant_admin: sees only their own tenant + """ + if current_user.is_super_admin: + result = await db.execute(select(Tenant).order_by(Tenant.name)) + tenants = result.scalars().all() + else: + if not current_user.tenant_id: + return [] + result = await db.execute( + select(Tenant).where(Tenant.id == current_user.tenant_id) + ) + tenants = result.scalars().all() + + return [await _get_tenant_response(tenant, db) for tenant in tenants] + + +@router.post("", response_model=TenantResponse, status_code=status.HTTP_201_CREATED, summary="Create a tenant") +@limiter.limit("20/minute") +async def create_tenant( + request: Request, + data: TenantCreate, + current_user: CurrentUser = Depends(require_super_admin), + db: AsyncSession = Depends(get_admin_db), +) -> TenantResponse: + """Create a new tenant (super_admin only).""" + # Check for name uniqueness + existing = await db.execute(select(Tenant).where(Tenant.name == data.name)) + if existing.scalar_one_or_none(): + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail=f"Tenant with name '{data.name}' already exists", + ) + + tenant = Tenant(name=data.name, description=data.description, contact_email=data.contact_email) + db.add(tenant) + await db.commit() + await db.refresh(tenant) + + # Seed default alert rules for new tenant + default_rules = [ + ("High CPU Usage", "cpu_load", "gt", 90, 5, "warning"), + ("High Memory Usage", "memory_used_pct", "gt", 90, 5, "warning"), + ("High Disk Usage", "disk_used_pct", "gt", 85, 3, "warning"), + ("Device Offline", "device_offline", "eq", 1, 1, "critical"), + ] + for name, metric, operator, threshold, duration, sev in default_rules: + await db.execute(text(""" + INSERT INTO alert_rules (id, tenant_id, name, metric, operator, threshold, duration_polls, severity, enabled, is_default) + VALUES (gen_random_uuid(), CAST(:tenant_id AS uuid), :name, :metric, :operator, :threshold, :duration, :severity, TRUE, TRUE) + """), { + "tenant_id": str(tenant.id), "name": name, "metric": metric, + "operator": operator, "threshold": threshold, "duration": duration, "severity": sev, + }) + await db.commit() + + # Seed starter config templates for new tenant + await _seed_starter_templates(db, tenant.id) + await db.commit() + + # Provision OpenBao Transit key for the new tenant (non-blocking) + try: + from app.config import settings + from app.services.key_service import provision_tenant_key + + if settings.OPENBAO_ADDR: + await provision_tenant_key(db, tenant.id) + await db.commit() + except Exception as exc: + import logging + logging.getLogger(__name__).warning( + "OpenBao key provisioning failed for tenant %s (will be provisioned on next startup): %s", + tenant.id, + exc, + ) + + return await _get_tenant_response(tenant, db) + + +@router.get("/{tenant_id}", response_model=TenantResponse, summary="Get tenant detail") +async def get_tenant( + tenant_id: uuid.UUID, + current_user: CurrentUser = Depends(require_tenant_admin_or_above), + db: AsyncSession = Depends(get_admin_db), +) -> TenantResponse: + """Get tenant detail. Tenant admins can only view their own tenant.""" + # Enforce tenant_admin can only see their own tenant + if not current_user.is_super_admin and current_user.tenant_id != tenant_id: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Access denied to this tenant", + ) + + result = await db.execute(select(Tenant).where(Tenant.id == tenant_id)) + tenant = result.scalar_one_or_none() + + if not tenant: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="Tenant not found", + ) + + return await _get_tenant_response(tenant, db) + + +@router.put("/{tenant_id}", response_model=TenantResponse, summary="Update a tenant") +@limiter.limit("20/minute") +async def update_tenant( + request: Request, + tenant_id: uuid.UUID, + data: TenantUpdate, + current_user: CurrentUser = Depends(require_super_admin), + db: AsyncSession = Depends(get_admin_db), +) -> TenantResponse: + """Update tenant (super_admin only).""" + result = await db.execute(select(Tenant).where(Tenant.id == tenant_id)) + tenant = result.scalar_one_or_none() + + if not tenant: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="Tenant not found", + ) + + if data.name is not None: + # Check name uniqueness + name_check = await db.execute( + select(Tenant).where(Tenant.name == data.name, Tenant.id != tenant_id) + ) + if name_check.scalar_one_or_none(): + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail=f"Tenant with name '{data.name}' already exists", + ) + tenant.name = data.name + + if data.description is not None: + tenant.description = data.description + + if data.contact_email is not None: + tenant.contact_email = data.contact_email + + await db.commit() + await db.refresh(tenant) + + return await _get_tenant_response(tenant, db) + + +@router.delete("/{tenant_id}", status_code=status.HTTP_204_NO_CONTENT, summary="Delete a tenant") +@limiter.limit("5/minute") +async def delete_tenant( + request: Request, + tenant_id: uuid.UUID, + current_user: CurrentUser = Depends(require_super_admin), + db: AsyncSession = Depends(get_admin_db), +) -> None: + """Delete tenant (super_admin only). Cascades to all users and devices.""" + result = await db.execute(select(Tenant).where(Tenant.id == tenant_id)) + tenant = result.scalar_one_or_none() + + if not tenant: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="Tenant not found", + ) + + await db.delete(tenant) + await db.commit() + + +# --------------------------------------------------------------------------- +# Starter template seeding +# --------------------------------------------------------------------------- + +_STARTER_TEMPLATES = [ + { + "name": "Basic Router", + "description": "Complete SOHO/branch router setup: WAN on ether1, LAN bridge, DHCP, DNS, NAT, basic firewall", + "content": """/interface bridge add name=bridge-lan comment="LAN bridge" +/interface bridge port add bridge=bridge-lan interface=ether2 +/interface bridge port add bridge=bridge-lan interface=ether3 +/interface bridge port add bridge=bridge-lan interface=ether4 +/interface bridge port add bridge=bridge-lan interface=ether5 + +# WAN — DHCP client on ether1 +/ip dhcp-client add interface={{ wan_interface }} disabled=no comment="WAN uplink" + +# LAN address +/ip address add address={{ lan_gateway }}/{{ lan_cidr }} interface=bridge-lan + +# DNS +/ip dns set servers={{ dns_servers }} allow-remote-requests=yes + +# DHCP server for LAN +/ip pool add name=lan-pool ranges={{ dhcp_start }}-{{ dhcp_end }} +/ip dhcp-server network add address={{ lan_network }}/{{ lan_cidr }} gateway={{ lan_gateway }} dns-server={{ lan_gateway }} +/ip dhcp-server add name=lan-dhcp interface=bridge-lan address-pool=lan-pool disabled=no + +# NAT masquerade +/ip firewall nat add chain=srcnat out-interface={{ wan_interface }} action=masquerade + +# Firewall — input chain +/ip firewall filter +add chain=input connection-state=established,related action=accept +add chain=input connection-state=invalid action=drop +add chain=input in-interface={{ wan_interface }} action=drop comment="Drop all other WAN input" + +# Firewall — forward chain +add chain=forward connection-state=established,related action=accept +add chain=forward connection-state=invalid action=drop +add chain=forward in-interface=bridge-lan out-interface={{ wan_interface }} action=accept comment="Allow LAN to WAN" +add chain=forward action=drop comment="Drop everything else" + +# NTP +/system ntp client set enabled=yes servers={{ ntp_server }} + +# Identity +/system identity set name={{ device.hostname }}""", + "variables": [ + {"name": "wan_interface", "type": "string", "default": "ether1", "description": "WAN-facing interface"}, + {"name": "lan_gateway", "type": "ip", "default": "192.168.88.1", "description": "LAN gateway IP"}, + {"name": "lan_cidr", "type": "integer", "default": "24", "description": "LAN subnet mask bits"}, + {"name": "lan_network", "type": "ip", "default": "192.168.88.0", "description": "LAN network address"}, + {"name": "dhcp_start", "type": "ip", "default": "192.168.88.100", "description": "DHCP pool start"}, + {"name": "dhcp_end", "type": "ip", "default": "192.168.88.254", "description": "DHCP pool end"}, + {"name": "dns_servers", "type": "string", "default": "8.8.8.8,8.8.4.4", "description": "Upstream DNS servers"}, + {"name": "ntp_server", "type": "string", "default": "pool.ntp.org", "description": "NTP server"}, + ], + }, + { + "name": "Basic Firewall", + "description": "Standard firewall ruleset with WAN protection and LAN forwarding", + "content": """/ip firewall filter +add chain=input connection-state=established,related action=accept +add chain=input connection-state=invalid action=drop +add chain=input in-interface={{ wan_interface }} protocol=tcp dst-port=8291 action=drop comment="Block Winbox from WAN" +add chain=input in-interface={{ wan_interface }} protocol=tcp dst-port=22 action=drop comment="Block SSH from WAN" +add chain=forward connection-state=established,related action=accept +add chain=forward connection-state=invalid action=drop +add chain=forward src-address={{ allowed_network }} action=accept +add chain=forward action=drop""", + "variables": [ + {"name": "wan_interface", "type": "string", "default": "ether1", "description": "WAN-facing interface"}, + {"name": "allowed_network", "type": "subnet", "default": "192.168.88.0/24", "description": "Allowed source network"}, + ], + }, + { + "name": "DHCP Server Setup", + "description": "Configure DHCP server with address pool, DNS, and gateway", + "content": """/ip pool add name=dhcp-pool ranges={{ pool_start }}-{{ pool_end }} +/ip dhcp-server network add address={{ gateway }}/24 gateway={{ gateway }} dns-server={{ dns_server }} +/ip dhcp-server add name=dhcp1 interface={{ interface }} address-pool=dhcp-pool disabled=no""", + "variables": [ + {"name": "pool_start", "type": "ip", "default": "192.168.88.100", "description": "DHCP pool start address"}, + {"name": "pool_end", "type": "ip", "default": "192.168.88.254", "description": "DHCP pool end address"}, + {"name": "gateway", "type": "ip", "default": "192.168.88.1", "description": "Default gateway"}, + {"name": "dns_server", "type": "ip", "default": "8.8.8.8", "description": "DNS server address"}, + {"name": "interface", "type": "string", "default": "bridge-lan", "description": "Interface to serve DHCP on"}, + ], + }, + { + "name": "Wireless AP Config", + "description": "Configure wireless access point with WPA2 security", + "content": """/interface wireless security-profiles add name=portal-wpa2 mode=dynamic-keys authentication-types=wpa2-psk wpa2-pre-shared-key={{ password }} +/interface wireless set wlan1 mode=ap-bridge ssid={{ ssid }} security-profile=portal-wpa2 frequency={{ frequency }} channel-width={{ channel_width }} disabled=no""", + "variables": [ + {"name": "ssid", "type": "string", "default": "MikroTik-AP", "description": "Wireless network name"}, + {"name": "password", "type": "string", "default": "", "description": "WPA2 pre-shared key (min 8 characters)"}, + {"name": "frequency", "type": "integer", "default": "2412", "description": "Wireless frequency in MHz"}, + {"name": "channel_width", "type": "string", "default": "20/40mhz-XX", "description": "Channel width setting"}, + ], + }, + { + "name": "Initial Device Setup", + "description": "Set device identity, NTP, DNS, and disable unused services", + "content": """/system identity set name={{ device.hostname }} +/system ntp client set enabled=yes servers={{ ntp_server }} +/ip dns set servers={{ dns_servers }} allow-remote-requests=no +/ip service disable telnet,ftp,www,api-ssl +/ip service set ssh port=22 +/ip service set winbox port=8291""", + "variables": [ + {"name": "ntp_server", "type": "ip", "default": "pool.ntp.org", "description": "NTP server address"}, + {"name": "dns_servers", "type": "string", "default": "8.8.8.8,8.8.4.4", "description": "Comma-separated DNS servers"}, + ], + }, +] + + +async def _seed_starter_templates(db, tenant_id) -> None: + """Insert starter config templates for a newly created tenant.""" + import json as _json + + for tmpl in _STARTER_TEMPLATES: + await db.execute(text(""" + INSERT INTO config_templates (id, tenant_id, name, description, content, variables) + VALUES (gen_random_uuid(), CAST(:tid AS uuid), :name, :desc, :content, CAST(:vars AS jsonb)) + """), { + "tid": str(tenant_id), + "name": tmpl["name"], + "desc": tmpl["description"], + "content": tmpl["content"], + "vars": _json.dumps(tmpl["variables"]), + }) diff --git a/backend/app/routers/topology.py b/backend/app/routers/topology.py new file mode 100644 index 0000000..ab928d1 --- /dev/null +++ b/backend/app/routers/topology.py @@ -0,0 +1,374 @@ +""" +Network topology inference endpoint. + +Endpoint: GET /api/tenants/{tenant_id}/topology + +Builds a topology graph of managed devices by: +1. Querying all devices for the tenant (via RLS) +2. Fetching /ip/neighbor tables from online devices via NATS +3. Matching neighbor addresses to known devices +4. Falling back to shared /24 subnet inference when neighbor data is unavailable +5. Caching results in Redis with 5-minute TTL +""" + +import asyncio +import ipaddress +import json +import logging +import uuid +from typing import Any + +import redis.asyncio as aioredis +import structlog +from fastapi import APIRouter, Depends, HTTPException, status +from pydantic import BaseModel +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.config import settings +from app.database import get_db, set_tenant_context +from app.middleware.rbac import require_min_role +from app.middleware.tenant_context import CurrentUser, get_current_user +from app.models.device import Device +from app.models.vpn import VpnPeer +from app.services import routeros_proxy + +logger = structlog.get_logger(__name__) + +router = APIRouter(tags=["topology"]) + +# --------------------------------------------------------------------------- +# Redis connection (lazy initialized, same pattern as routeros_proxy NATS) +# --------------------------------------------------------------------------- + +_redis: aioredis.Redis | None = None +TOPOLOGY_CACHE_TTL = 300 # 5 minutes + + +async def _get_redis() -> aioredis.Redis: + """Get or create a Redis connection for topology caching.""" + global _redis + if _redis is None: + _redis = aioredis.from_url(settings.REDIS_URL, decode_responses=True) + logger.info("Topology Redis connection established") + return _redis + + +# --------------------------------------------------------------------------- +# Response schemas +# --------------------------------------------------------------------------- + + +class TopologyNode(BaseModel): + id: str + hostname: str + ip: str + status: str + model: str | None + uptime: str | None + + +class TopologyEdge(BaseModel): + source: str + target: str + label: str + + +class TopologyResponse(BaseModel): + nodes: list[TopologyNode] + edges: list[TopologyEdge] + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +async def _check_tenant_access( + current_user: CurrentUser, tenant_id: uuid.UUID, db: AsyncSession +) -> None: + """Verify the current user is allowed to access the given tenant.""" + if current_user.is_super_admin: + await set_tenant_context(db, str(tenant_id)) + return + if current_user.tenant_id != tenant_id: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Access denied: you do not belong to this tenant.", + ) + + +def _format_uptime(seconds: int | None) -> str | None: + """Convert uptime seconds to a human-readable string.""" + if seconds is None: + return None + days = seconds // 86400 + hours = (seconds % 86400) // 3600 + minutes = (seconds % 3600) // 60 + if days > 0: + return f"{days}d {hours}h {minutes}m" + if hours > 0: + return f"{hours}h {minutes}m" + return f"{minutes}m" + + +def _get_subnet_key(ip_str: str) -> str | None: + """Return the /24 network key for an IPv4 address, or None if invalid.""" + try: + addr = ipaddress.ip_address(ip_str) + if isinstance(addr, ipaddress.IPv4Address): + network = ipaddress.ip_network(f"{ip_str}/24", strict=False) + return str(network) + except ValueError: + pass + return None + + +def _build_edges_from_neighbors( + neighbor_data: dict[str, list[dict[str, Any]]], + ip_to_device: dict[str, str], +) -> list[TopologyEdge]: + """Build topology edges from neighbor discovery results. + + Args: + neighbor_data: Mapping of device_id -> list of neighbor entries. + ip_to_device: Mapping of IP address -> device_id for known devices. + + Returns: + De-duplicated list of topology edges. + """ + seen_edges: set[tuple[str, str]] = set() + edges: list[TopologyEdge] = [] + + for device_id, neighbors in neighbor_data.items(): + for neighbor in neighbors: + # RouterOS neighbor entry has 'address' (or 'address4') field + neighbor_ip = neighbor.get("address") or neighbor.get("address4", "") + if not neighbor_ip: + continue + + target_device_id = ip_to_device.get(neighbor_ip) + if target_device_id is None or target_device_id == device_id: + continue + + # De-duplicate bidirectional edges (A->B and B->A become one edge) + edge_key = tuple(sorted([device_id, target_device_id])) + if edge_key in seen_edges: + continue + seen_edges.add(edge_key) + + interface_name = neighbor.get("interface", "neighbor") + edges.append( + TopologyEdge( + source=device_id, + target=target_device_id, + label=interface_name, + ) + ) + + return edges + + +def _build_edges_from_subnets( + devices: list[Device], + existing_connected: set[tuple[str, str]], +) -> list[TopologyEdge]: + """Infer edges from shared /24 subnets for devices without neighbor data. + + Only adds subnet-based edges for device pairs that are NOT already connected + via neighbor discovery. + """ + # Group devices by /24 subnet + subnet_groups: dict[str, list[str]] = {} + for device in devices: + subnet_key = _get_subnet_key(device.ip_address) + if subnet_key: + subnet_groups.setdefault(subnet_key, []).append(str(device.id)) + + edges: list[TopologyEdge] = [] + for subnet, device_ids in subnet_groups.items(): + if len(device_ids) < 2: + continue + # Connect all pairs in the subnet + for i, src in enumerate(device_ids): + for tgt in device_ids[i + 1 :]: + edge_key = tuple(sorted([src, tgt])) + if edge_key in existing_connected: + continue + edges.append( + TopologyEdge( + source=src, + target=tgt, + label="shared subnet", + ) + ) + existing_connected.add(edge_key) + + return edges + + +# --------------------------------------------------------------------------- +# Endpoint +# --------------------------------------------------------------------------- + + +@router.get( + "/tenants/{tenant_id}/topology", + response_model=TopologyResponse, + summary="Get network topology for a tenant", +) +async def get_topology( + tenant_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + _role: CurrentUser = Depends(require_min_role("viewer")), + db: AsyncSession = Depends(get_db), +) -> TopologyResponse: + """Build and return a network topology graph for the given tenant. + + The topology is inferred from: + 1. LLDP/CDP/MNDP neighbor discovery on online devices + 2. Shared /24 subnet fallback for devices without neighbor data + + Results are cached in Redis with a 5-minute TTL. + """ + await _check_tenant_access(current_user, tenant_id, db) + + cache_key = f"topology:{tenant_id}" + + # Check Redis cache + try: + rd = await _get_redis() + cached = await rd.get(cache_key) + if cached: + data = json.loads(cached) + return TopologyResponse(**data) + except Exception as exc: + logger.warning("Redis cache read failed, computing topology fresh", error=str(exc)) + + # Fetch all devices for tenant (RLS enforced via get_db) + result = await db.execute( + select( + Device.id, + Device.hostname, + Device.ip_address, + Device.status, + Device.model, + Device.uptime_seconds, + ) + ) + rows = result.all() + + if not rows: + return TopologyResponse(nodes=[], edges=[]) + + # Build nodes + nodes: list[TopologyNode] = [] + ip_to_device: dict[str, str] = {} + online_device_ids: list[str] = [] + devices_by_id: dict[str, Any] = {} + + for row in rows: + device_id = str(row.id) + nodes.append( + TopologyNode( + id=device_id, + hostname=row.hostname, + ip=row.ip_address, + status=row.status, + model=row.model, + uptime=_format_uptime(row.uptime_seconds), + ) + ) + ip_to_device[row.ip_address] = device_id + if row.status == "online": + online_device_ids.append(device_id) + + # Fetch neighbor tables from online devices in parallel + neighbor_data: dict[str, list[dict[str, Any]]] = {} + + if online_device_ids: + tasks = [ + routeros_proxy.execute_command( + device_id, "/ip/neighbor/print", timeout=10.0 + ) + for device_id in online_device_ids + ] + results = await asyncio.gather(*tasks, return_exceptions=True) + + for device_id, res in zip(online_device_ids, results): + if isinstance(res, Exception): + logger.warning( + "Neighbor fetch failed", + device_id=device_id, + error=str(res), + ) + continue + if isinstance(res, dict) and res.get("success") and res.get("data"): + neighbor_data[device_id] = res["data"] + + # Build edges from neighbor discovery + neighbor_edges = _build_edges_from_neighbors(neighbor_data, ip_to_device) + + # Track connected pairs for subnet fallback + connected_pairs: set[tuple[str, str]] = set() + for edge in neighbor_edges: + connected_pairs.add(tuple(sorted([edge.source, edge.target]))) + + # VPN-based edges: query WireGuard peers to infer hub-spoke topology. + # VPN peers all connect to the same WireGuard server. The gateway device + # is the managed device NOT in the VPN peers list (it's the server, not a + # client). If found, create star edges from gateway to each VPN peer device. + vpn_edges: list[TopologyEdge] = [] + vpn_peer_device_ids: set[str] = set() + try: + peer_result = await db.execute( + select(VpnPeer.device_id).where(VpnPeer.is_enabled.is_(True)) + ) + vpn_peer_device_ids = {str(row[0]) for row in peer_result.all()} + + if vpn_peer_device_ids: + # Gateway = managed devices NOT in VPN peers (typically the Core router) + all_device_ids = {str(row.id) for row in rows} + gateway_ids = all_device_ids - vpn_peer_device_ids + # Pick the gateway that's online (prefer online devices) + gateway_id = None + for gid in gateway_ids: + if gid in online_device_ids: + gateway_id = gid + break + if not gateway_id and gateway_ids: + gateway_id = next(iter(gateway_ids)) + + if gateway_id: + for peer_device_id in vpn_peer_device_ids: + edge_key = tuple(sorted([gateway_id, peer_device_id])) + if edge_key not in connected_pairs: + vpn_edges.append( + TopologyEdge( + source=gateway_id, + target=peer_device_id, + label="vpn tunnel", + ) + ) + connected_pairs.add(edge_key) + except Exception as exc: + logger.warning("VPN edge detection failed", error=str(exc)) + + # Fallback: infer connections from shared /24 subnets + # Query full Device objects for subnet analysis + device_result = await db.execute(select(Device)) + all_devices = list(device_result.scalars().all()) + subnet_edges = _build_edges_from_subnets(all_devices, connected_pairs) + + all_edges = neighbor_edges + vpn_edges + subnet_edges + + topology = TopologyResponse(nodes=nodes, edges=all_edges) + + # Cache result in Redis + try: + rd = await _get_redis() + await rd.set(cache_key, topology.model_dump_json(), ex=TOPOLOGY_CACHE_TTL) + except Exception as exc: + logger.warning("Redis cache write failed", error=str(exc)) + + return topology diff --git a/backend/app/routers/transparency.py b/backend/app/routers/transparency.py new file mode 100644 index 0000000..06ad16c --- /dev/null +++ b/backend/app/routers/transparency.py @@ -0,0 +1,391 @@ +"""Transparency log API endpoints. + +Tenant-scoped routes under /api/tenants/{tenant_id}/ for: +- Paginated, filterable key access transparency log listing +- Transparency log statistics (total events, last 24h, unique devices, justification breakdown) +- CSV export of transparency logs + +RLS enforced via get_db() (app_user engine with tenant context). +RBAC: admin and above can view transparency logs (tenant_admin or super_admin). + +Phase 31: Data Access Transparency Dashboard - TRUST-01, TRUST-02 +Shows tenant admins every KMS credential access event for their tenant. +""" + +import csv +import io +import logging +import uuid +from datetime import datetime +from typing import Any, Optional + +from fastapi import APIRouter, Depends, HTTPException, Query, status +from fastapi.responses import StreamingResponse +from pydantic import BaseModel +from sqlalchemy import and_, func, select, text +from sqlalchemy.ext.asyncio import AsyncSession + +from app.database import get_db, set_tenant_context +from app.middleware.tenant_context import CurrentUser, get_current_user + +logger = logging.getLogger(__name__) + +router = APIRouter(tags=["transparency"]) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +async def _check_tenant_access( + current_user: CurrentUser, tenant_id: uuid.UUID, db: AsyncSession +) -> None: + """Verify the current user is allowed to access the given tenant.""" + if current_user.is_super_admin: + await set_tenant_context(db, str(tenant_id)) + elif current_user.tenant_id != tenant_id: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Access denied to this tenant", + ) + + +def _require_admin(current_user: CurrentUser) -> None: + """Raise 403 if user does not have at least admin role. + + Transparency data is sensitive operational intelligence -- + only tenant_admin and super_admin can view it. + """ + allowed = {"super_admin", "admin", "tenant_admin"} + if current_user.role not in allowed: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="At least admin role required to view transparency logs.", + ) + + +# --------------------------------------------------------------------------- +# Response models +# --------------------------------------------------------------------------- + + +class TransparencyLogItem(BaseModel): + id: str + action: str + device_name: Optional[str] = None + device_id: Optional[str] = None + justification: Optional[str] = None + operator_email: Optional[str] = None + correlation_id: Optional[str] = None + resource_type: Optional[str] = None + resource_id: Optional[str] = None + ip_address: Optional[str] = None + created_at: str + + +class TransparencyLogResponse(BaseModel): + items: list[TransparencyLogItem] + total: int + page: int + per_page: int + + +class TransparencyStats(BaseModel): + total_events: int + events_last_24h: int + unique_devices: int + justification_breakdown: dict[str, int] + + +# --------------------------------------------------------------------------- +# Endpoints +# --------------------------------------------------------------------------- + + +@router.get( + "/tenants/{tenant_id}/transparency-logs", + response_model=TransparencyLogResponse, + summary="List KMS credential access events for tenant", +) +async def list_transparency_logs( + tenant_id: uuid.UUID, + page: int = Query(default=1, ge=1), + per_page: int = Query(default=50, ge=1, le=100), + device_id: Optional[uuid.UUID] = Query(default=None), + justification: Optional[str] = Query(default=None), + action: Optional[str] = Query(default=None), + date_from: Optional[datetime] = Query(default=None), + date_to: Optional[datetime] = Query(default=None), + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> Any: + _require_admin(current_user) + await _check_tenant_access(current_user, tenant_id, db) + + # Build filter conditions using parameterized text fragments + conditions = [text("k.tenant_id = :tenant_id")] + params: dict[str, Any] = {"tenant_id": str(tenant_id)} + + if device_id: + conditions.append(text("k.device_id = :device_id")) + params["device_id"] = str(device_id) + + if justification: + conditions.append(text("k.justification = :justification")) + params["justification"] = justification + + if action: + conditions.append(text("k.action = :action")) + params["action"] = action + + if date_from: + conditions.append(text("k.created_at >= :date_from")) + params["date_from"] = date_from.isoformat() + + if date_to: + conditions.append(text("k.created_at <= :date_to")) + params["date_to"] = date_to.isoformat() + + where_clause = and_(*conditions) + + # Shared SELECT columns for data queries + _data_columns = text( + "k.id, k.action, d.hostname AS device_name, " + "k.device_id, k.justification, u.email AS operator_email, " + "k.correlation_id, k.resource_type, k.resource_id, " + "k.ip_address, k.created_at" + ) + _data_from = text( + "key_access_log k " + "LEFT JOIN users u ON k.user_id = u.id " + "LEFT JOIN devices d ON k.device_id = d.id" + ) + + # Count total + count_result = await db.execute( + select(func.count()) + .select_from(text("key_access_log k")) + .where(where_clause), + params, + ) + total = count_result.scalar() or 0 + + # Paginated query + offset = (page - 1) * per_page + params["limit"] = per_page + params["offset"] = offset + + result = await db.execute( + select(_data_columns) + .select_from(_data_from) + .where(where_clause) + .order_by(text("k.created_at DESC")) + .limit(per_page) + .offset(offset), + params, + ) + rows = result.mappings().all() + + items = [ + TransparencyLogItem( + id=str(row["id"]), + action=row["action"], + device_name=row["device_name"], + device_id=str(row["device_id"]) if row["device_id"] else None, + justification=row["justification"], + operator_email=row["operator_email"], + correlation_id=row["correlation_id"], + resource_type=row["resource_type"], + resource_id=row["resource_id"], + ip_address=row["ip_address"], + created_at=row["created_at"].isoformat() if row["created_at"] else "", + ) + for row in rows + ] + + return TransparencyLogResponse( + items=items, + total=total, + page=page, + per_page=per_page, + ) + + +@router.get( + "/tenants/{tenant_id}/transparency-logs/stats", + response_model=TransparencyStats, + summary="Get transparency log statistics", +) +async def get_transparency_stats( + tenant_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> TransparencyStats: + _require_admin(current_user) + await _check_tenant_access(current_user, tenant_id, db) + + params: dict[str, Any] = {"tenant_id": str(tenant_id)} + + # Total events + total_result = await db.execute( + select(func.count()) + .select_from(text("key_access_log")) + .where(text("tenant_id = :tenant_id")), + params, + ) + total_events = total_result.scalar() or 0 + + # Events in last 24 hours + last_24h_result = await db.execute( + select(func.count()) + .select_from(text("key_access_log")) + .where( + and_( + text("tenant_id = :tenant_id"), + text("created_at >= NOW() - INTERVAL '24 hours'"), + ) + ), + params, + ) + events_last_24h = last_24h_result.scalar() or 0 + + # Unique devices + unique_devices_result = await db.execute( + select(func.count(text("DISTINCT device_id"))) + .select_from(text("key_access_log")) + .where( + and_( + text("tenant_id = :tenant_id"), + text("device_id IS NOT NULL"), + ) + ), + params, + ) + unique_devices = unique_devices_result.scalar() or 0 + + # Justification breakdown + breakdown_result = await db.execute( + select( + text("COALESCE(justification, 'system') AS justification_label"), + func.count().label("count"), + ) + .select_from(text("key_access_log")) + .where(text("tenant_id = :tenant_id")) + .group_by(text("justification_label")), + params, + ) + justification_breakdown: dict[str, int] = {} + for row in breakdown_result.mappings().all(): + justification_breakdown[row["justification_label"]] = row["count"] + + return TransparencyStats( + total_events=total_events, + events_last_24h=events_last_24h, + unique_devices=unique_devices, + justification_breakdown=justification_breakdown, + ) + + +@router.get( + "/tenants/{tenant_id}/transparency-logs/export", + summary="Export transparency logs as CSV", +) +async def export_transparency_logs( + tenant_id: uuid.UUID, + device_id: Optional[uuid.UUID] = Query(default=None), + justification: Optional[str] = Query(default=None), + action: Optional[str] = Query(default=None), + date_from: Optional[datetime] = Query(default=None), + date_to: Optional[datetime] = Query(default=None), + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> StreamingResponse: + _require_admin(current_user) + await _check_tenant_access(current_user, tenant_id, db) + + # Build filter conditions + conditions = [text("k.tenant_id = :tenant_id")] + params: dict[str, Any] = {"tenant_id": str(tenant_id)} + + if device_id: + conditions.append(text("k.device_id = :device_id")) + params["device_id"] = str(device_id) + + if justification: + conditions.append(text("k.justification = :justification")) + params["justification"] = justification + + if action: + conditions.append(text("k.action = :action")) + params["action"] = action + + if date_from: + conditions.append(text("k.created_at >= :date_from")) + params["date_from"] = date_from.isoformat() + + if date_to: + conditions.append(text("k.created_at <= :date_to")) + params["date_to"] = date_to.isoformat() + + where_clause = and_(*conditions) + + _data_columns = text( + "k.id, k.action, d.hostname AS device_name, " + "k.device_id, k.justification, u.email AS operator_email, " + "k.correlation_id, k.resource_type, k.resource_id, " + "k.ip_address, k.created_at" + ) + _data_from = text( + "key_access_log k " + "LEFT JOIN users u ON k.user_id = u.id " + "LEFT JOIN devices d ON k.device_id = d.id" + ) + + result = await db.execute( + select(_data_columns) + .select_from(_data_from) + .where(where_clause) + .order_by(text("k.created_at DESC")), + params, + ) + all_rows = result.mappings().all() + + output = io.StringIO() + writer = csv.writer(output) + writer.writerow([ + "ID", + "Action", + "Device Name", + "Device ID", + "Justification", + "Operator Email", + "Correlation ID", + "Resource Type", + "Resource ID", + "IP Address", + "Timestamp", + ]) + for row in all_rows: + writer.writerow([ + str(row["id"]), + row["action"], + row["device_name"] or "", + str(row["device_id"]) if row["device_id"] else "", + row["justification"] or "", + row["operator_email"] or "", + row["correlation_id"] or "", + row["resource_type"] or "", + row["resource_id"] or "", + row["ip_address"] or "", + str(row["created_at"]), + ]) + + output.seek(0) + return StreamingResponse( + iter([output.getvalue()]), + media_type="text/csv", + headers={ + "Content-Disposition": "attachment; filename=transparency-logs.csv" + }, + ) diff --git a/backend/app/routers/users.py b/backend/app/routers/users.py new file mode 100644 index 0000000..0d85fe2 --- /dev/null +++ b/backend/app/routers/users.py @@ -0,0 +1,231 @@ +""" +User management endpoints (scoped to tenant). + +GET /api/tenants/{tenant_id}/users — list users in tenant +POST /api/tenants/{tenant_id}/users — create user in tenant +GET /api/tenants/{tenant_id}/users/{id} — get user detail +PUT /api/tenants/{tenant_id}/users/{id} — update user +DELETE /api/tenants/{tenant_id}/users/{id} — deactivate user +""" + +import uuid + +from fastapi import APIRouter, Depends, HTTPException, Request, status +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.middleware.rate_limit import limiter + +from app.database import get_admin_db +from app.middleware.rbac import require_tenant_admin_or_above +from app.middleware.tenant_context import CurrentUser +from app.models.tenant import Tenant +from app.models.user import User, UserRole +from app.schemas.user import UserCreate, UserResponse, UserUpdate +from app.services.auth import hash_password + +router = APIRouter(prefix="/tenants", tags=["users"]) + + +async def _check_tenant_access( + tenant_id: uuid.UUID, + current_user: CurrentUser, + db: AsyncSession, +) -> Tenant: + """ + Verify the tenant exists and the current user has access to it. + + super_admin can access any tenant. + tenant_admin can only access their own tenant. + """ + if not current_user.is_super_admin and current_user.tenant_id != tenant_id: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Access denied to this tenant", + ) + + result = await db.execute(select(Tenant).where(Tenant.id == tenant_id)) + tenant = result.scalar_one_or_none() + + if not tenant: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="Tenant not found", + ) + + return tenant + + +@router.get("/{tenant_id}/users", response_model=list[UserResponse], summary="List users in tenant") +async def list_users( + tenant_id: uuid.UUID, + current_user: CurrentUser = Depends(require_tenant_admin_or_above), + db: AsyncSession = Depends(get_admin_db), +) -> list[UserResponse]: + """ + List users in a tenant. + - super_admin: can list users in any tenant + - tenant_admin: can only list users in their own tenant + """ + await _check_tenant_access(tenant_id, current_user, db) + + result = await db.execute( + select(User) + .where(User.tenant_id == tenant_id) + .order_by(User.name) + ) + users = result.scalars().all() + + return [UserResponse.model_validate(user) for user in users] + + +@router.post( + "/{tenant_id}/users", + response_model=UserResponse, + status_code=status.HTTP_201_CREATED, + summary="Create a user in tenant", +) +@limiter.limit("20/minute") +async def create_user( + request: Request, + tenant_id: uuid.UUID, + data: UserCreate, + current_user: CurrentUser = Depends(require_tenant_admin_or_above), + db: AsyncSession = Depends(get_admin_db), +) -> UserResponse: + """ + Create a user within a tenant. + + - super_admin: can create users in any tenant + - tenant_admin: can only create users in their own tenant + - No email invitation flow — admin creates accounts with temporary passwords + """ + await _check_tenant_access(tenant_id, current_user, db) + + # Check email uniqueness (global, not per-tenant) + existing = await db.execute( + select(User).where(User.email == data.email.lower()) + ) + if existing.scalar_one_or_none(): + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail="A user with this email already exists", + ) + + user = User( + email=data.email.lower(), + hashed_password=hash_password(data.password), + name=data.name, + role=data.role.value, + tenant_id=tenant_id, + is_active=True, + must_upgrade_auth=True, + ) + db.add(user) + await db.commit() + await db.refresh(user) + + return UserResponse.model_validate(user) + + +@router.get("/{tenant_id}/users/{user_id}", response_model=UserResponse, summary="Get user detail") +async def get_user( + tenant_id: uuid.UUID, + user_id: uuid.UUID, + current_user: CurrentUser = Depends(require_tenant_admin_or_above), + db: AsyncSession = Depends(get_admin_db), +) -> UserResponse: + """Get user detail.""" + await _check_tenant_access(tenant_id, current_user, db) + + result = await db.execute( + select(User).where(User.id == user_id, User.tenant_id == tenant_id) + ) + user = result.scalar_one_or_none() + + if not user: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="User not found", + ) + + return UserResponse.model_validate(user) + + +@router.put("/{tenant_id}/users/{user_id}", response_model=UserResponse, summary="Update a user") +@limiter.limit("20/minute") +async def update_user( + request: Request, + tenant_id: uuid.UUID, + user_id: uuid.UUID, + data: UserUpdate, + current_user: CurrentUser = Depends(require_tenant_admin_or_above), + db: AsyncSession = Depends(get_admin_db), +) -> UserResponse: + """ + Update user attributes (name, role, is_active). + Role assignment is editable by admins. + """ + await _check_tenant_access(tenant_id, current_user, db) + + result = await db.execute( + select(User).where(User.id == user_id, User.tenant_id == tenant_id) + ) + user = result.scalar_one_or_none() + + if not user: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="User not found", + ) + + if data.name is not None: + user.name = data.name + + if data.role is not None: + user.role = data.role.value + + if data.is_active is not None: + user.is_active = data.is_active + + await db.commit() + await db.refresh(user) + + return UserResponse.model_validate(user) + + +@router.delete("/{tenant_id}/users/{user_id}", status_code=status.HTTP_204_NO_CONTENT, summary="Deactivate a user") +@limiter.limit("5/minute") +async def deactivate_user( + request: Request, + tenant_id: uuid.UUID, + user_id: uuid.UUID, + current_user: CurrentUser = Depends(require_tenant_admin_or_above), + db: AsyncSession = Depends(get_admin_db), +) -> None: + """ + Deactivate a user (soft delete — sets is_active=False). + This preserves audit trail while preventing login. + """ + await _check_tenant_access(tenant_id, current_user, db) + + result = await db.execute( + select(User).where(User.id == user_id, User.tenant_id == tenant_id) + ) + user = result.scalar_one_or_none() + + if not user: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="User not found", + ) + + # Prevent self-deactivation + if user.id == current_user.user_id: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Cannot deactivate your own account", + ) + + user.is_active = False + await db.commit() diff --git a/backend/app/routers/vpn.py b/backend/app/routers/vpn.py new file mode 100644 index 0000000..131aa4f --- /dev/null +++ b/backend/app/routers/vpn.py @@ -0,0 +1,236 @@ +"""WireGuard VPN API endpoints. + +Tenant-scoped routes under /api/tenants/{tenant_id}/vpn/ for: +- VPN setup (enable WireGuard for tenant) +- VPN config management (update endpoint, enable/disable) +- Peer management (add device, remove, get config) + +RLS enforced via get_db() (app_user engine with tenant context). +RBAC: operator and above for all operations. +""" + +import uuid + +from fastapi import APIRouter, Depends, HTTPException, Request, status +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.database import get_db, set_tenant_context +from app.middleware.rate_limit import limiter +from app.middleware.tenant_context import CurrentUser, get_current_user +from app.models.device import Device +from app.schemas.vpn import ( + VpnConfigResponse, + VpnConfigUpdate, + VpnOnboardRequest, + VpnOnboardResponse, + VpnPeerConfig, + VpnPeerCreate, + VpnPeerResponse, + VpnSetupRequest, +) +from app.services import vpn_service + +router = APIRouter(tags=["vpn"]) + + +async def _check_tenant_access( + current_user: CurrentUser, tenant_id: uuid.UUID, db: AsyncSession +) -> None: + if current_user.is_super_admin: + await set_tenant_context(db, str(tenant_id)) + elif current_user.tenant_id != tenant_id: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Access denied") + + +def _require_operator(current_user: CurrentUser) -> None: + if current_user.role == "viewer": + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Operator role required") + + +# ── VPN Config ── + + +@router.get("/tenants/{tenant_id}/vpn", response_model=VpnConfigResponse | None) +async def get_vpn_config( + tenant_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +): + """Get VPN configuration for this tenant.""" + await _check_tenant_access(current_user, tenant_id, db) + config = await vpn_service.get_vpn_config(db, tenant_id) + if not config: + return None + peers = await vpn_service.get_peers(db, tenant_id) + resp = VpnConfigResponse.model_validate(config) + resp.peer_count = len(peers) + return resp + + +@router.post("/tenants/{tenant_id}/vpn", response_model=VpnConfigResponse, status_code=status.HTTP_201_CREATED) +@limiter.limit("20/minute") +async def setup_vpn( + request: Request, + tenant_id: uuid.UUID, + body: VpnSetupRequest, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +): + """Enable VPN for this tenant — generates server keys.""" + await _check_tenant_access(current_user, tenant_id, db) + _require_operator(current_user) + try: + config = await vpn_service.setup_vpn(db, tenant_id, endpoint=body.endpoint) + except ValueError as e: + raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail=str(e)) + return VpnConfigResponse.model_validate(config) + + +@router.patch("/tenants/{tenant_id}/vpn", response_model=VpnConfigResponse) +@limiter.limit("20/minute") +async def update_vpn_config( + request: Request, + tenant_id: uuid.UUID, + body: VpnConfigUpdate, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +): + """Update VPN settings (endpoint, enable/disable).""" + await _check_tenant_access(current_user, tenant_id, db) + _require_operator(current_user) + try: + config = await vpn_service.update_vpn_config( + db, tenant_id, endpoint=body.endpoint, is_enabled=body.is_enabled + ) + except ValueError as e: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(e)) + peers = await vpn_service.get_peers(db, tenant_id) + resp = VpnConfigResponse.model_validate(config) + resp.peer_count = len(peers) + return resp + + +# ── VPN Peers ── + + +@router.get("/tenants/{tenant_id}/vpn/peers", response_model=list[VpnPeerResponse]) +async def list_peers( + tenant_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +): + """List all VPN peers for this tenant.""" + await _check_tenant_access(current_user, tenant_id, db) + peers = await vpn_service.get_peers(db, tenant_id) + + # Enrich with device info + device_ids = [p.device_id for p in peers] + devices = {} + if device_ids: + result = await db.execute(select(Device).where(Device.id.in_(device_ids))) + devices = {d.id: d for d in result.scalars().all()} + + # Read live WireGuard status for handshake enrichment + wg_status = vpn_service.read_wg_status() + + responses = [] + for peer in peers: + resp = VpnPeerResponse.model_validate(peer) + device = devices.get(peer.device_id) + if device: + resp.device_hostname = device.hostname + resp.device_ip = device.ip_address + # Enrich with live handshake from WireGuard container + live_handshake = vpn_service.get_peer_handshake(wg_status, peer.peer_public_key) + if live_handshake: + resp.last_handshake = live_handshake + responses.append(resp) + return responses + + +@router.post("/tenants/{tenant_id}/vpn/peers", response_model=VpnPeerResponse, status_code=status.HTTP_201_CREATED) +@limiter.limit("20/minute") +async def add_peer( + request: Request, + tenant_id: uuid.UUID, + body: VpnPeerCreate, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +): + """Add a device as a VPN peer.""" + await _check_tenant_access(current_user, tenant_id, db) + _require_operator(current_user) + try: + peer = await vpn_service.add_peer(db, tenant_id, body.device_id, additional_allowed_ips=body.additional_allowed_ips) + except ValueError as e: + raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail=str(e)) + + # Enrich with device info + result = await db.execute(select(Device).where(Device.id == peer.device_id)) + device = result.scalar_one_or_none() + + resp = VpnPeerResponse.model_validate(peer) + if device: + resp.device_hostname = device.hostname + resp.device_ip = device.ip_address + return resp + + +@router.post("/tenants/{tenant_id}/vpn/peers/onboard", response_model=VpnOnboardResponse, status_code=status.HTTP_201_CREATED) +@limiter.limit("10/minute") +async def onboard_device( + request: Request, + tenant_id: uuid.UUID, + body: VpnOnboardRequest, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +): + """Create device + VPN peer in one step. Returns RouterOS commands for tunnel setup.""" + await _check_tenant_access(current_user, tenant_id, db) + _require_operator(current_user) + try: + result = await vpn_service.onboard_device( + db, tenant_id, + hostname=body.hostname, + username=body.username, + password=body.password, + ) + except ValueError as e: + raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail=str(e)) + return VpnOnboardResponse(**result) + + +@router.delete("/tenants/{tenant_id}/vpn/peers/{peer_id}", status_code=status.HTTP_204_NO_CONTENT) +@limiter.limit("5/minute") +async def remove_peer( + request: Request, + tenant_id: uuid.UUID, + peer_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +): + """Remove a VPN peer.""" + await _check_tenant_access(current_user, tenant_id, db) + _require_operator(current_user) + try: + await vpn_service.remove_peer(db, tenant_id, peer_id) + except ValueError as e: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(e)) + + +@router.get("/tenants/{tenant_id}/vpn/peers/{peer_id}/config", response_model=VpnPeerConfig) +async def get_peer_device_config( + tenant_id: uuid.UUID, + peer_id: uuid.UUID, + current_user: CurrentUser = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +): + """Get the full config for a peer — includes private key and RouterOS commands.""" + await _check_tenant_access(current_user, tenant_id, db) + _require_operator(current_user) + try: + config = await vpn_service.get_peer_config(db, tenant_id, peer_id) + except ValueError as e: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(e)) + return VpnPeerConfig(**config) diff --git a/backend/app/schemas/__init__.py b/backend/app/schemas/__init__.py new file mode 100644 index 0000000..12673b3 --- /dev/null +++ b/backend/app/schemas/__init__.py @@ -0,0 +1,18 @@ +"""Pydantic schemas for request/response validation.""" + +from app.schemas.auth import LoginRequest, TokenResponse, RefreshRequest, UserMeResponse +from app.schemas.tenant import TenantCreate, TenantResponse, TenantUpdate +from app.schemas.user import UserCreate, UserResponse, UserUpdate + +__all__ = [ + "LoginRequest", + "TokenResponse", + "RefreshRequest", + "UserMeResponse", + "TenantCreate", + "TenantResponse", + "TenantUpdate", + "UserCreate", + "UserResponse", + "UserUpdate", +] diff --git a/backend/app/schemas/auth.py b/backend/app/schemas/auth.py new file mode 100644 index 0000000..9e7d9b2 --- /dev/null +++ b/backend/app/schemas/auth.py @@ -0,0 +1,123 @@ +"""Authentication request/response schemas.""" + +import uuid +from typing import Optional + +from pydantic import BaseModel, EmailStr + + +class LoginRequest(BaseModel): + email: EmailStr + password: str + + +class TokenResponse(BaseModel): + access_token: str + refresh_token: str + token_type: str = "bearer" + auth_upgrade_required: bool = False # True when bcrypt user needs SRP registration + + +class RefreshRequest(BaseModel): + refresh_token: str + + +class UserMeResponse(BaseModel): + id: uuid.UUID + email: str + name: str + role: str + tenant_id: Optional[uuid.UUID] = None + auth_version: int = 1 + + model_config = {"from_attributes": True} + + +class ChangePasswordRequest(BaseModel): + current_password: str + new_password: str + # SRP users must provide re-derived credentials + new_srp_salt: Optional[str] = None + new_srp_verifier: Optional[str] = None + # Re-wrapped key bundle (SRP users re-encrypt with new AUK) + encrypted_private_key: Optional[str] = None + private_key_nonce: Optional[str] = None + encrypted_vault_key: Optional[str] = None + vault_key_nonce: Optional[str] = None + public_key: Optional[str] = None + pbkdf2_salt: Optional[str] = None + hkdf_salt: Optional[str] = None + + +class ForgotPasswordRequest(BaseModel): + email: EmailStr + + +class ResetPasswordRequest(BaseModel): + token: str + new_password: str + + +class MessageResponse(BaseModel): + message: str + + +# --- SRP Zero-Knowledge Authentication Schemas --- + + +class SRPInitRequest(BaseModel): + """Step 1 request: client sends email to begin SRP handshake.""" + email: EmailStr + + +class SRPInitResponse(BaseModel): + """Step 1 response: server returns ephemeral B and key derivation salts.""" + salt: str # hex-encoded SRP salt + server_public: str # hex-encoded server ephemeral B + session_id: str # Redis session key nonce + pbkdf2_salt: str # base64-encoded, from user_key_sets (needed for 2SKD before SRP verify) + hkdf_salt: str # base64-encoded, from user_key_sets (needed for 2SKD before SRP verify) + + +class SRPVerifyRequest(BaseModel): + """Step 2 request: client sends proof M1 to complete handshake.""" + email: EmailStr + session_id: str + client_public: str # hex-encoded client ephemeral A + client_proof: str # hex-encoded client proof M1 + + +class SRPVerifyResponse(BaseModel): + """Step 2 response: server returns tokens and proof M2.""" + access_token: str + refresh_token: str + token_type: str = "bearer" + server_proof: str # hex-encoded server proof M2 + encrypted_key_set: Optional[dict] = None # Key bundle for client-side decryption + + +class SRPRegisterRequest(BaseModel): + """Used during registration to store SRP verifier and key set.""" + srp_salt: str # hex-encoded + srp_verifier: str # hex-encoded + encrypted_private_key: str # base64-encoded + private_key_nonce: str # base64-encoded + encrypted_vault_key: str # base64-encoded + vault_key_nonce: str # base64-encoded + public_key: str # base64-encoded + pbkdf2_salt: str # base64-encoded + hkdf_salt: str # base64-encoded + + +# --- Account Self-Service Schemas --- + + +class DeleteAccountRequest(BaseModel): + """Request body for account self-deletion. User must type 'DELETE' to confirm.""" + confirmation: str # Must be "DELETE" to confirm + + +class DeleteAccountResponse(BaseModel): + """Response after successful account deletion.""" + message: str + deleted: bool diff --git a/backend/app/schemas/certificate.py b/backend/app/schemas/certificate.py new file mode 100644 index 0000000..08aa9c1 --- /dev/null +++ b/backend/app/schemas/certificate.py @@ -0,0 +1,78 @@ +"""Pydantic request/response schemas for the Internal Certificate Authority.""" + +from datetime import datetime +from uuid import UUID + +from pydantic import BaseModel, ConfigDict + + +# --------------------------------------------------------------------------- +# Request schemas +# --------------------------------------------------------------------------- + +class CACreateRequest(BaseModel): + """Request to generate a new root CA for the tenant.""" + + common_name: str = "Portal Root CA" + validity_years: int = 10 # Default 10 years for CA + + +class CertSignRequest(BaseModel): + """Request to sign a per-device certificate using the tenant CA.""" + + device_id: UUID + validity_days: int = 730 # Default 2 years for device certs + + +class BulkCertDeployRequest(BaseModel): + """Request to deploy certificates to multiple devices.""" + + device_ids: list[UUID] + + +# --------------------------------------------------------------------------- +# Response schemas +# --------------------------------------------------------------------------- + +class CAResponse(BaseModel): + """Public details of a tenant's Certificate Authority (no private key).""" + + id: UUID + tenant_id: UUID + common_name: str + fingerprint_sha256: str + serial_number: str + not_valid_before: datetime + not_valid_after: datetime + created_at: datetime + + model_config = ConfigDict(from_attributes=True) + + +class DeviceCertResponse(BaseModel): + """Public details of a device certificate (no private key).""" + + id: UUID + tenant_id: UUID + device_id: UUID + ca_id: UUID + common_name: str + fingerprint_sha256: str + serial_number: str + not_valid_before: datetime + not_valid_after: datetime + status: str + deployed_at: datetime | None + created_at: datetime + updated_at: datetime + + model_config = ConfigDict(from_attributes=True) + + +class CertDeployResponse(BaseModel): + """Result of a single device certificate deployment attempt.""" + + success: bool + device_id: UUID + cert_name_on_device: str | None = None + error: str | None = None diff --git a/backend/app/schemas/device.py b/backend/app/schemas/device.py new file mode 100644 index 0000000..1cf46f7 --- /dev/null +++ b/backend/app/schemas/device.py @@ -0,0 +1,271 @@ +"""Pydantic schemas for Device, DeviceGroup, and DeviceTag endpoints.""" + +import uuid +from datetime import datetime +from typing import Optional + +from pydantic import BaseModel, field_validator + + +# --------------------------------------------------------------------------- +# Device schemas +# --------------------------------------------------------------------------- + + +class DeviceCreate(BaseModel): + """Schema for creating a new device.""" + + hostname: str + ip_address: str + api_port: int = 8728 + api_ssl_port: int = 8729 + username: str + password: str + + +class DeviceUpdate(BaseModel): + """Schema for updating an existing device. All fields optional.""" + + hostname: Optional[str] = None + ip_address: Optional[str] = None + api_port: Optional[int] = None + api_ssl_port: Optional[int] = None + username: Optional[str] = None + password: Optional[str] = None + latitude: Optional[float] = None + longitude: Optional[float] = None + tls_mode: Optional[str] = None + + @field_validator("tls_mode") + @classmethod + def validate_tls_mode(cls, v: Optional[str]) -> Optional[str]: + """Validate tls_mode is one of the allowed values.""" + if v is None: + return v + allowed = {"auto", "insecure", "plain", "portal_ca"} + if v not in allowed: + raise ValueError(f"tls_mode must be one of: {', '.join(sorted(allowed))}") + return v + + +class DeviceTagRef(BaseModel): + """Minimal tag info embedded in device responses.""" + + id: uuid.UUID + name: str + color: Optional[str] = None + + model_config = {"from_attributes": True} + + +class DeviceGroupRef(BaseModel): + """Minimal group info embedded in device responses.""" + + id: uuid.UUID + name: str + + model_config = {"from_attributes": True} + + +class DeviceResponse(BaseModel): + """Device response schema. NEVER includes credential fields.""" + + id: uuid.UUID + hostname: str + ip_address: str + api_port: int + api_ssl_port: int + model: Optional[str] = None + serial_number: Optional[str] = None + firmware_version: Optional[str] = None + routeros_version: Optional[str] = None + routeros_major_version: Optional[int] = None + uptime_seconds: Optional[int] = None + last_seen: Optional[datetime] = None + latitude: Optional[float] = None + longitude: Optional[float] = None + status: str + tls_mode: str = "auto" + tags: list[DeviceTagRef] = [] + groups: list[DeviceGroupRef] = [] + created_at: datetime + + model_config = {"from_attributes": True} + + +class DeviceListResponse(BaseModel): + """Paginated device list response.""" + + items: list[DeviceResponse] + total: int + page: int + page_size: int + + +# --------------------------------------------------------------------------- +# Subnet scan schemas +# --------------------------------------------------------------------------- + + +class SubnetScanRequest(BaseModel): + """Request body for a subnet scan.""" + + cidr: str + + @field_validator("cidr") + @classmethod + def validate_cidr(cls, v: str) -> str: + """Validate that the value is a valid CIDR notation and RFC 1918 private range.""" + import ipaddress + try: + network = ipaddress.ip_network(v, strict=False) + except ValueError as e: + raise ValueError(f"Invalid CIDR notation: {e}") from e + # Only allow private IP ranges (RFC 1918: 10/8, 172.16/12, 192.168/16) + if not network.is_private: + raise ValueError( + "Only private IP ranges can be scanned (RFC 1918: " + "10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16)" + ) + # Reject ranges larger than /20 (4096 IPs) to prevent abuse + if network.num_addresses > 4096: + raise ValueError( + f"CIDR range too large ({network.num_addresses} addresses). " + "Maximum allowed: /20 (4096 addresses)." + ) + return v + + +class SubnetScanResult(BaseModel): + """A single discovered host from a subnet scan.""" + + ip_address: str + hostname: Optional[str] = None + api_port_open: bool = False + api_ssl_port_open: bool = False + + +class SubnetScanResponse(BaseModel): + """Response for a subnet scan operation.""" + + cidr: str + discovered: list[SubnetScanResult] + total_scanned: int + total_discovered: int + + +# --------------------------------------------------------------------------- +# Bulk add from scan +# --------------------------------------------------------------------------- + + +class BulkDeviceAdd(BaseModel): + """One device entry within a bulk-add request.""" + + ip_address: str + hostname: Optional[str] = None + api_port: int = 8728 + api_ssl_port: int = 8729 + username: Optional[str] = None + password: Optional[str] = None + + +class BulkAddRequest(BaseModel): + """ + Bulk-add devices selected from a scan result. + + shared_username / shared_password are used for all devices that do not + provide their own credentials. + """ + + devices: list[BulkDeviceAdd] + shared_username: Optional[str] = None + shared_password: Optional[str] = None + + +class BulkAddResult(BaseModel): + """Summary result of a bulk-add operation.""" + + added: list[DeviceResponse] + failed: list[dict] # {ip_address, error} + + +# --------------------------------------------------------------------------- +# DeviceGroup schemas +# --------------------------------------------------------------------------- + + +class DeviceGroupCreate(BaseModel): + """Schema for creating a device group.""" + + name: str + description: Optional[str] = None + + +class DeviceGroupUpdate(BaseModel): + """Schema for updating a device group.""" + + name: Optional[str] = None + description: Optional[str] = None + + +class DeviceGroupResponse(BaseModel): + """Device group response schema.""" + + id: uuid.UUID + name: str + description: Optional[str] = None + device_count: int = 0 + created_at: datetime + + model_config = {"from_attributes": True} + + +# --------------------------------------------------------------------------- +# DeviceTag schemas +# --------------------------------------------------------------------------- + + +class DeviceTagCreate(BaseModel): + """Schema for creating a device tag.""" + + name: str + color: Optional[str] = None + + @field_validator("color") + @classmethod + def validate_color(cls, v: Optional[str]) -> Optional[str]: + """Validate hex color format if provided.""" + if v is None: + return v + import re + if not re.match(r"^#[0-9A-Fa-f]{6}$", v): + raise ValueError("Color must be a valid 6-digit hex color (e.g. #FF5733)") + return v + + +class DeviceTagUpdate(BaseModel): + """Schema for updating a device tag.""" + + name: Optional[str] = None + color: Optional[str] = None + + @field_validator("color") + @classmethod + def validate_color(cls, v: Optional[str]) -> Optional[str]: + if v is None: + return v + import re + if not re.match(r"^#[0-9A-Fa-f]{6}$", v): + raise ValueError("Color must be a valid 6-digit hex color (e.g. #FF5733)") + return v + + +class DeviceTagResponse(BaseModel): + """Device tag response schema.""" + + id: uuid.UUID + name: str + color: Optional[str] = None + + model_config = {"from_attributes": True} diff --git a/backend/app/schemas/tenant.py b/backend/app/schemas/tenant.py new file mode 100644 index 0000000..d14bd1f --- /dev/null +++ b/backend/app/schemas/tenant.py @@ -0,0 +1,31 @@ +"""Tenant request/response schemas.""" + +import uuid +from datetime import datetime +from typing import Optional + +from pydantic import BaseModel + + +class TenantCreate(BaseModel): + name: str + description: Optional[str] = None + contact_email: Optional[str] = None + + +class TenantUpdate(BaseModel): + name: Optional[str] = None + description: Optional[str] = None + contact_email: Optional[str] = None + + +class TenantResponse(BaseModel): + id: uuid.UUID + name: str + description: Optional[str] = None + contact_email: Optional[str] = None + user_count: int = 0 + device_count: int = 0 + created_at: datetime + + model_config = {"from_attributes": True} diff --git a/backend/app/schemas/user.py b/backend/app/schemas/user.py new file mode 100644 index 0000000..190ba88 --- /dev/null +++ b/backend/app/schemas/user.py @@ -0,0 +1,53 @@ +"""User request/response schemas.""" + +import uuid +from datetime import datetime +from typing import Optional + +from pydantic import BaseModel, EmailStr, field_validator + +from app.models.user import UserRole + + +class UserCreate(BaseModel): + name: str + email: EmailStr + password: str + role: UserRole = UserRole.VIEWER + + @field_validator("password") + @classmethod + def validate_password(cls, v: str) -> str: + if len(v) < 8: + raise ValueError("Password must be at least 8 characters") + return v + + @field_validator("role") + @classmethod + def validate_role(cls, v: UserRole) -> UserRole: + """Tenant admins can only create operator/viewer roles; super_admin via separate flow.""" + allowed_tenant_roles = {UserRole.TENANT_ADMIN, UserRole.OPERATOR, UserRole.VIEWER} + if v not in allowed_tenant_roles: + raise ValueError( + f"Role must be one of: {', '.join(r.value for r in allowed_tenant_roles)}" + ) + return v + + +class UserResponse(BaseModel): + id: uuid.UUID + name: str + email: str + role: str + tenant_id: Optional[uuid.UUID] = None + is_active: bool + last_login: Optional[datetime] = None + created_at: datetime + + model_config = {"from_attributes": True} + + +class UserUpdate(BaseModel): + name: Optional[str] = None + role: Optional[UserRole] = None + is_active: Optional[bool] = None diff --git a/backend/app/schemas/vpn.py b/backend/app/schemas/vpn.py new file mode 100644 index 0000000..d36d872 --- /dev/null +++ b/backend/app/schemas/vpn.py @@ -0,0 +1,91 @@ +"""Pydantic schemas for WireGuard VPN management.""" + +import uuid +from datetime import datetime +from typing import Optional + +from pydantic import BaseModel + + +# ── VPN Config (server-side) ── + + +class VpnSetupRequest(BaseModel): + """Request to enable VPN for a tenant.""" + endpoint: Optional[str] = None # public hostname:port — if blank, devices must be configured manually + + +class VpnConfigResponse(BaseModel): + """VPN server configuration (never exposes private key).""" + model_config = {"from_attributes": True} + + id: uuid.UUID + tenant_id: uuid.UUID + server_public_key: str + subnet: str + server_port: int + server_address: str + endpoint: Optional[str] + is_enabled: bool + peer_count: int = 0 + created_at: datetime + + +class VpnConfigUpdate(BaseModel): + """Update VPN configuration.""" + endpoint: Optional[str] = None + is_enabled: Optional[bool] = None + + +# ── VPN Peers ── + + +class VpnPeerCreate(BaseModel): + """Add a device as a VPN peer.""" + device_id: uuid.UUID + additional_allowed_ips: Optional[str] = None # comma-separated subnets for site-to-site routing + + +class VpnPeerResponse(BaseModel): + """VPN peer info (never exposes private key).""" + model_config = {"from_attributes": True} + + id: uuid.UUID + device_id: uuid.UUID + device_hostname: str = "" + device_ip: str = "" + peer_public_key: str + assigned_ip: str + is_enabled: bool + last_handshake: Optional[datetime] + created_at: datetime + + +# ── VPN Onboarding (combined device + peer creation) ── + + +class VpnOnboardRequest(BaseModel): + """Combined device creation + VPN peer onboarding.""" + hostname: str + username: str + password: str + + +class VpnOnboardResponse(BaseModel): + """Response from onboarding — device, peer, and RouterOS commands.""" + device_id: uuid.UUID + peer_id: uuid.UUID + hostname: str + assigned_ip: str + routeros_commands: list[str] + + +class VpnPeerConfig(BaseModel): + """Full peer config for display/export — includes private key for device setup.""" + peer_private_key: str + peer_public_key: str + assigned_ip: str + server_public_key: str + server_endpoint: str + allowed_ips: str + routeros_commands: list[str] diff --git a/backend/app/security/__init__.py b/backend/app/security/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/app/security/command_blocklist.py b/backend/app/security/command_blocklist.py new file mode 100644 index 0000000..8f949ab --- /dev/null +++ b/backend/app/security/command_blocklist.py @@ -0,0 +1,95 @@ +"""Dangerous RouterOS command and path blocklist. + +Prevents destructive or sensitive operations from being executed through +the config editor. Commands and paths are checked via case-insensitive +prefix matching against known-dangerous entries. + +To extend: add strings to DANGEROUS_COMMANDS, BROWSE_BLOCKED_PATHS, +or WRITE_BLOCKED_PATHS. +""" + +from fastapi import HTTPException, status + +# CLI commands blocked from the execute endpoint. +# Matched as case-insensitive prefixes (e.g., "/user" blocks "/user/print" too). +DANGEROUS_COMMANDS: list[str] = [ + "/system/reset-configuration", + "/system/shutdown", + "/system/reboot", + "/system/backup", + "/system/license", + "/user", + "/password", + "/certificate", + "/radius", + "/export", + "/import", +] + +# Paths blocked from ALL operations including browse (truly dangerous to read). +BROWSE_BLOCKED_PATHS: list[str] = [ + "system/reset-configuration", + "system/shutdown", + "system/reboot", + "system/backup", + "system/license", + "password", +] + +# Paths blocked from write operations (add/set/remove) but readable via browse. +WRITE_BLOCKED_PATHS: list[str] = [ + "user", + "certificate", + "radius", +] + + +def check_command_safety(command: str) -> None: + """Reject dangerous CLI commands with HTTP 403. + + Normalizes the command (strip + lowercase) and checks against + DANGEROUS_COMMANDS using prefix matching. + + Raises: + HTTPException: 403 if the command matches a dangerous prefix. + """ + normalized = command.strip().lower() + for blocked in DANGEROUS_COMMANDS: + if normalized.startswith(blocked): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail=( + f"Command blocked: '{command}' matches dangerous prefix '{blocked}'. " + f"This operation is not allowed through the config editor." + ), + ) + + +def check_path_safety(path: str, *, write: bool = False) -> None: + """Reject dangerous menu paths with HTTP 403. + + Normalizes the path (strip + lowercase + lstrip '/') and checks + against blocked path lists using prefix matching. + + Args: + path: The RouterOS menu path to check. + write: If True, also check WRITE_BLOCKED_PATHS (for add/set/remove). + If False, only check BROWSE_BLOCKED_PATHS (for read-only browse). + + Raises: + HTTPException: 403 if the path matches a blocked prefix. + """ + normalized = path.strip().lower().lstrip("/") + blocked_lists = [BROWSE_BLOCKED_PATHS] + if write: + blocked_lists.append(WRITE_BLOCKED_PATHS) + for blocklist in blocked_lists: + for blocked in blocklist: + if normalized.startswith(blocked): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail=( + f"Path blocked: '{path}' matches dangerous prefix '{blocked}'. " + f"This operation is not allowed through the config editor." + ), + ) diff --git a/backend/app/services/__init__.py b/backend/app/services/__init__.py new file mode 100644 index 0000000..53a144d --- /dev/null +++ b/backend/app/services/__init__.py @@ -0,0 +1 @@ +"""Backend services — auth, crypto, and business logic.""" diff --git a/backend/app/services/account_service.py b/backend/app/services/account_service.py new file mode 100644 index 0000000..5339974 --- /dev/null +++ b/backend/app/services/account_service.py @@ -0,0 +1,240 @@ +"""Account self-service operations: deletion and data export. + +Provides GDPR/CCPA-compliant account deletion with full PII erasure +and data portability export (Article 20). + +All queries use raw SQL via text() with admin sessions (bypass RLS) +since these are cross-table operations on the authenticated user's data. +""" + +import hashlib +import uuid +from datetime import UTC, datetime +from typing import Any + +import structlog +from sqlalchemy import text +from sqlalchemy.ext.asyncio import AsyncSession + +from app.database import AdminAsyncSessionLocal +from app.services.audit_service import log_action + +logger = structlog.get_logger("account_service") + + +async def delete_user_account( + db: AsyncSession, + user_id: uuid.UUID, + tenant_id: uuid.UUID | None, + user_email: str, +) -> dict[str, Any]: + """Hard-delete a user account with full PII erasure. + + Steps: + 1. Create a deletion receipt audit log (persisted via separate session) + 2. Anonymize PII in existing audit_logs for this user + 3. Hard-delete the user row (CASCADE handles related tables) + 4. Best-effort session invalidation via Redis + + Args: + db: Admin async session (bypasses RLS). + user_id: UUID of the user to delete. + tenant_id: Tenant UUID (None for super_admin). + user_email: User's email (needed for audit hash before deletion). + + Returns: + Dict with deleted=True and user_id on success. + """ + effective_tenant_id = tenant_id or uuid.UUID(int=0) + email_hash = hashlib.sha256(user_email.encode()).hexdigest() + + # ── 1. Pre-deletion audit receipt (separate session so it persists) ──── + try: + async with AdminAsyncSessionLocal() as audit_db: + await log_action( + audit_db, + tenant_id=effective_tenant_id, + user_id=user_id, + action="account_deleted", + resource_type="user", + resource_id=str(user_id), + details={ + "deleted_user_id": str(user_id), + "email_hash": email_hash, + "deletion_type": "self_service", + "deleted_at": datetime.now(UTC).isoformat(), + }, + ) + await audit_db.commit() + except Exception: + logger.warning( + "deletion_receipt_failed", + user_id=str(user_id), + exc_info=True, + ) + + # ── 2. Anonymize PII in audit_logs for this user ───────────────────── + # Strip PII keys from details JSONB (email, name, user_email, user_name) + await db.execute( + text( + "UPDATE audit_logs " + "SET details = details - 'email' - 'name' - 'user_email' - 'user_name' " + "WHERE user_id = :user_id" + ), + {"user_id": user_id}, + ) + + # Null out encrypted_details (may contain encrypted PII) + await db.execute( + text( + "UPDATE audit_logs " + "SET encrypted_details = NULL " + "WHERE user_id = :user_id" + ), + {"user_id": user_id}, + ) + + # ── 3. Hard delete user row ────────────────────────────────────────── + # CASCADE handles: user_key_sets, api_keys, password_reset_tokens + # SET NULL handles: audit_logs.user_id, key_access_log.user_id, + # maintenance_windows.created_by, alert_events.acknowledged_by + await db.execute( + text("DELETE FROM users WHERE id = :user_id"), + {"user_id": user_id}, + ) + + await db.commit() + + # ── 4. Best-effort Redis session invalidation ──────────────────────── + try: + import redis.asyncio as aioredis + from app.config import settings + from app.services.auth import revoke_user_tokens + + r = aioredis.from_url(settings.REDIS_URL, decode_responses=True) + await revoke_user_tokens(r, str(user_id)) + await r.aclose() + except Exception: + # JWT expires in 15 min anyway; not critical + logger.debug("redis_session_invalidation_skipped", user_id=str(user_id)) + + logger.info("account_deleted", user_id=str(user_id), email_hash=email_hash) + + return {"deleted": True, "user_id": str(user_id)} + + +async def export_user_data( + db: AsyncSession, + user_id: uuid.UUID, + tenant_id: uuid.UUID | None, +) -> dict[str, Any]: + """Assemble all user data for GDPR Art. 20 data portability export. + + Returns a structured dict with user profile, API keys, audit logs, + and key access log entries. + + Args: + db: Admin async session (bypasses RLS). + user_id: UUID of the user whose data to export. + tenant_id: Tenant UUID (None for super_admin). + + Returns: + Envelope dict with export_date, format_version, and all user data. + """ + + # ── User profile ───────────────────────────────────────────────────── + result = await db.execute( + text( + "SELECT id, email, name, role, tenant_id, " + "created_at, last_login, auth_version " + "FROM users WHERE id = :user_id" + ), + {"user_id": user_id}, + ) + user_row = result.mappings().first() + user_data: dict[str, Any] = {} + if user_row: + user_data = { + "id": str(user_row["id"]), + "email": user_row["email"], + "name": user_row["name"], + "role": user_row["role"], + "tenant_id": str(user_row["tenant_id"]) if user_row["tenant_id"] else None, + "created_at": user_row["created_at"].isoformat() if user_row["created_at"] else None, + "last_login": user_row["last_login"].isoformat() if user_row["last_login"] else None, + "auth_version": user_row["auth_version"], + } + + # ── API keys (exclude key_hash for security) ───────────────────────── + result = await db.execute( + text( + "SELECT id, name, key_prefix, scopes, created_at, " + "expires_at, revoked_at, last_used_at " + "FROM api_keys WHERE user_id = :user_id " + "ORDER BY created_at DESC" + ), + {"user_id": user_id}, + ) + api_keys = [] + for row in result.mappings().all(): + api_keys.append({ + "id": str(row["id"]), + "name": row["name"], + "key_prefix": row["key_prefix"], + "scopes": row["scopes"], + "created_at": row["created_at"].isoformat() if row["created_at"] else None, + "expires_at": row["expires_at"].isoformat() if row["expires_at"] else None, + "revoked_at": row["revoked_at"].isoformat() if row["revoked_at"] else None, + "last_used_at": row["last_used_at"].isoformat() if row["last_used_at"] else None, + }) + + # ── Audit logs (limit 1000, most recent first) ─────────────────────── + result = await db.execute( + text( + "SELECT id, action, resource_type, resource_id, " + "details, ip_address, created_at " + "FROM audit_logs WHERE user_id = :user_id " + "ORDER BY created_at DESC LIMIT 1000" + ), + {"user_id": user_id}, + ) + audit_logs = [] + for row in result.mappings().all(): + details = row["details"] if row["details"] else {} + audit_logs.append({ + "id": str(row["id"]), + "action": row["action"], + "resource_type": row["resource_type"], + "resource_id": row["resource_id"], + "details": details, + "ip_address": row["ip_address"], + "created_at": row["created_at"].isoformat() if row["created_at"] else None, + }) + + # ── Key access log (limit 1000, most recent first) ─────────────────── + result = await db.execute( + text( + "SELECT id, action, resource_type, ip_address, created_at " + "FROM key_access_log WHERE user_id = :user_id " + "ORDER BY created_at DESC LIMIT 1000" + ), + {"user_id": user_id}, + ) + key_access_entries = [] + for row in result.mappings().all(): + key_access_entries.append({ + "id": str(row["id"]), + "action": row["action"], + "resource_type": row["resource_type"], + "ip_address": row["ip_address"], + "created_at": row["created_at"].isoformat() if row["created_at"] else None, + }) + + return { + "export_date": datetime.now(UTC).isoformat(), + "format_version": "1.0", + "user": user_data, + "api_keys": api_keys, + "audit_logs": audit_logs, + "key_access_log": key_access_entries, + } diff --git a/backend/app/services/alert_evaluator.py b/backend/app/services/alert_evaluator.py new file mode 100644 index 0000000..e6d474b --- /dev/null +++ b/backend/app/services/alert_evaluator.py @@ -0,0 +1,723 @@ +"""Alert rule evaluation engine with Redis breach counters and flap detection. + +Entry points: +- evaluate(device_id, tenant_id, metric_type, data): called from metrics_subscriber +- evaluate_offline(device_id, tenant_id): called from nats_subscriber on device offline +- evaluate_online(device_id, tenant_id): called from nats_subscriber on device online + +Uses Redis for: +- Consecutive breach counting (alert:breach:{device_id}:{rule_id}) +- Flap detection (alert:flap:{device_id}:{rule_id} sorted set) + +Uses AdminAsyncSessionLocal for all DB operations (runs cross-tenant in NATS handlers). +""" + +import asyncio +import logging +import time +from datetime import datetime, timezone +from typing import Any + +import redis.asyncio as aioredis +from sqlalchemy import text + +from app.config import settings +from app.database import AdminAsyncSessionLocal +from app.services.event_publisher import publish_event + +logger = logging.getLogger(__name__) + +# Module-level Redis client, lazily initialized +_redis_client: aioredis.Redis | None = None + +# Module-level rule cache: {tenant_id: (rules_list, fetched_at_timestamp)} +_rule_cache: dict[str, tuple[list[dict], float]] = {} +_CACHE_TTL_SECONDS = 60 + +# Module-level maintenance window cache: {tenant_id: (active_windows_list, fetched_at_timestamp)} +# Each window: {"device_ids": [...], "suppress_alerts": True} +_maintenance_cache: dict[str, tuple[list[dict], float]] = {} +_MAINTENANCE_CACHE_TTL = 30 # 30 seconds + + +async def _get_redis() -> aioredis.Redis: + """Get or create the Redis client.""" + global _redis_client + if _redis_client is None: + _redis_client = aioredis.from_url(settings.REDIS_URL, decode_responses=True) + return _redis_client + + +async def _get_active_maintenance_windows(tenant_id: str) -> list[dict]: + """Fetch active maintenance windows for a tenant, with 30s cache.""" + now = time.time() + cached = _maintenance_cache.get(tenant_id) + if cached and (now - cached[1]) < _MAINTENANCE_CACHE_TTL: + return cached[0] + + async with AdminAsyncSessionLocal() as session: + result = await session.execute( + text(""" + SELECT device_ids, suppress_alerts + FROM maintenance_windows + WHERE tenant_id = CAST(:tenant_id AS uuid) + AND suppress_alerts = true + AND start_at <= NOW() + AND end_at >= NOW() + """), + {"tenant_id": tenant_id}, + ) + rows = result.fetchall() + + windows = [ + { + "device_ids": row[0] if isinstance(row[0], list) else [], + "suppress_alerts": row[1], + } + for row in rows + ] + + _maintenance_cache[tenant_id] = (windows, now) + return windows + + +async def _is_device_in_maintenance(tenant_id: str, device_id: str) -> bool: + """Check if a device is currently under active maintenance with alert suppression. + + Returns True if there is at least one active maintenance window covering + this device (or all devices via empty device_ids array). + """ + windows = await _get_active_maintenance_windows(tenant_id) + for window in windows: + device_ids = window["device_ids"] + # Empty device_ids means "all devices in tenant" + if not device_ids or device_id in device_ids: + return True + return False + + +async def _get_rules_for_tenant(tenant_id: str) -> list[dict]: + """Fetch active alert rules for a tenant, with 60s cache.""" + now = time.time() + cached = _rule_cache.get(tenant_id) + if cached and (now - cached[1]) < _CACHE_TTL_SECONDS: + return cached[0] + + async with AdminAsyncSessionLocal() as session: + result = await session.execute( + text(""" + SELECT id, tenant_id, device_id, group_id, name, metric, + operator, threshold, duration_polls, severity + FROM alert_rules + WHERE tenant_id = CAST(:tenant_id AS uuid) AND enabled = TRUE + """), + {"tenant_id": tenant_id}, + ) + rows = result.fetchall() + + rules = [ + { + "id": str(row[0]), + "tenant_id": str(row[1]), + "device_id": str(row[2]) if row[2] else None, + "group_id": str(row[3]) if row[3] else None, + "name": row[4], + "metric": row[5], + "operator": row[6], + "threshold": float(row[7]), + "duration_polls": row[8], + "severity": row[9], + } + for row in rows + ] + + _rule_cache[tenant_id] = (rules, now) + return rules + + +def _check_threshold(value: float, operator: str, threshold: float) -> bool: + """Check if a metric value breaches a threshold.""" + if operator == "gt": + return value > threshold + elif operator == "lt": + return value < threshold + elif operator == "gte": + return value >= threshold + elif operator == "lte": + return value <= threshold + return False + + +def _extract_metrics(metric_type: str, data: dict) -> dict[str, float]: + """Extract metric name->value pairs from a NATS metrics event.""" + metrics: dict[str, float] = {} + + if metric_type == "health": + health = data.get("health", {}) + for key in ("cpu_load", "temperature"): + val = health.get(key) + if val is not None and val != "": + try: + metrics[key] = float(val) + except (ValueError, TypeError): + pass + # Compute memory_used_pct and disk_used_pct + free_mem = health.get("free_memory") + total_mem = health.get("total_memory") + if free_mem is not None and total_mem is not None: + try: + total = float(total_mem) + free = float(free_mem) + if total > 0: + metrics["memory_used_pct"] = round((1.0 - free / total) * 100, 1) + except (ValueError, TypeError): + pass + free_disk = health.get("free_disk") + total_disk = health.get("total_disk") + if free_disk is not None and total_disk is not None: + try: + total = float(total_disk) + free = float(free_disk) + if total > 0: + metrics["disk_used_pct"] = round((1.0 - free / total) * 100, 1) + except (ValueError, TypeError): + pass + + elif metric_type == "wireless": + wireless = data.get("wireless", []) + # Aggregate: use worst signal, lowest CCQ, sum client_count + for wif in wireless: + for key in ("signal_strength", "ccq", "client_count"): + val = wif.get(key) if key != "avg_signal" else wif.get("avg_signal") + if key == "signal_strength": + val = wif.get("avg_signal") + if val is not None and val != "": + try: + fval = float(val) + if key not in metrics: + metrics[key] = fval + elif key == "signal_strength": + metrics[key] = min(metrics[key], fval) # worst signal + elif key == "ccq": + metrics[key] = min(metrics[key], fval) # worst CCQ + elif key == "client_count": + metrics[key] = metrics.get(key, 0) + fval # sum + except (ValueError, TypeError): + pass + + # TODO: Interface bandwidth alerting (rx_bps/tx_bps) requires stateful delta + # computation between consecutive poll values. Deferred for now — the alert_rules + # table supports these metric types, but evaluation is skipped. + + return metrics + + +async def _increment_breach( + r: aioredis.Redis, device_id: str, rule_id: str, required_polls: int +) -> bool: + """Increment breach counter in Redis. Returns True when threshold duration reached.""" + key = f"alert:breach:{device_id}:{rule_id}" + count = await r.incr(key) + # Set TTL to (required_polls + 2) * 60 seconds so it expires if breaches stop + await r.expire(key, (required_polls + 2) * 60) + return count >= required_polls + + +async def _reset_breach(r: aioredis.Redis, device_id: str, rule_id: str) -> None: + """Reset breach counter when metric returns to normal.""" + key = f"alert:breach:{device_id}:{rule_id}" + await r.delete(key) + + +async def _check_flapping(r: aioredis.Redis, device_id: str, rule_id: str) -> bool: + """Check if alert is flapping (>= 5 state transitions in 10 minutes). + + Uses a Redis sorted set with timestamps as scores. + """ + key = f"alert:flap:{device_id}:{rule_id}" + now = time.time() + window_start = now - 600 # 10 minute window + + # Add this transition + await r.zadd(key, {str(now): now}) + # Remove entries outside the window + await r.zremrangebyscore(key, "-inf", window_start) + # Set TTL on the key + await r.expire(key, 1200) + # Count transitions in window + count = await r.zcard(key) + return count >= 5 + + +async def _get_device_groups(device_id: str) -> list[str]: + """Get group IDs for a device.""" + async with AdminAsyncSessionLocal() as session: + result = await session.execute( + text("SELECT group_id FROM device_group_memberships WHERE device_id = CAST(:device_id AS uuid)"), + {"device_id": device_id}, + ) + return [str(row[0]) for row in result.fetchall()] + + +async def _has_open_alert(device_id: str, rule_id: str | None, metric: str | None = None) -> bool: + """Check if there's an open (firing, unresolved) alert for this device+rule.""" + async with AdminAsyncSessionLocal() as session: + if rule_id: + result = await session.execute( + text(""" + SELECT 1 FROM alert_events + WHERE device_id = CAST(:device_id AS uuid) AND rule_id = CAST(:rule_id AS uuid) + AND status = 'firing' AND resolved_at IS NULL + LIMIT 1 + """), + {"device_id": device_id, "rule_id": rule_id}, + ) + else: + result = await session.execute( + text(""" + SELECT 1 FROM alert_events + WHERE device_id = CAST(:device_id AS uuid) AND rule_id IS NULL + AND metric = :metric AND status = 'firing' AND resolved_at IS NULL + LIMIT 1 + """), + {"device_id": device_id, "metric": metric or "offline"}, + ) + return result.fetchone() is not None + + +async def _create_alert_event( + device_id: str, + tenant_id: str, + rule_id: str | None, + status: str, + severity: str, + metric: str | None, + value: float | None, + threshold: float | None, + message: str | None, + is_flapping: bool = False, +) -> dict: + """Create an alert event row and return its data.""" + async with AdminAsyncSessionLocal() as session: + result = await session.execute( + text(""" + INSERT INTO alert_events + (id, device_id, tenant_id, rule_id, status, severity, metric, + value, threshold, message, is_flapping, fired_at, + resolved_at) + VALUES + (gen_random_uuid(), CAST(:device_id AS uuid), CAST(:tenant_id AS uuid), + :rule_id, :status, :severity, :metric, + :value, :threshold, :message, :is_flapping, NOW(), + CASE WHEN :status = 'resolved' THEN NOW() ELSE NULL END) + RETURNING id, fired_at + """), + { + "device_id": device_id, + "tenant_id": tenant_id, + "rule_id": rule_id, + "status": status, + "severity": severity, + "metric": metric, + "value": value, + "threshold": threshold, + "message": message, + "is_flapping": is_flapping, + }, + ) + row = result.fetchone() + await session.commit() + + alert_data = { + "id": str(row[0]) if row else None, + "device_id": device_id, + "tenant_id": tenant_id, + "rule_id": rule_id, + "status": status, + "severity": severity, + "metric": metric, + "value": value, + "threshold": threshold, + "message": message, + "is_flapping": is_flapping, + } + + # Publish real-time event to NATS for SSE pipeline (fire-and-forget) + if status in ("firing", "flapping"): + await publish_event(f"alert.fired.{tenant_id}", { + "event_type": "alert_fired", + "tenant_id": tenant_id, + "device_id": device_id, + "alert_event_id": alert_data["id"], + "severity": severity, + "metric": metric, + "current_value": value, + "threshold": threshold, + "message": message, + "is_flapping": is_flapping, + "fired_at": datetime.now(timezone.utc).isoformat(), + }) + elif status == "resolved": + await publish_event(f"alert.resolved.{tenant_id}", { + "event_type": "alert_resolved", + "tenant_id": tenant_id, + "device_id": device_id, + "alert_event_id": alert_data["id"], + "severity": severity, + "metric": metric, + "message": message, + "resolved_at": datetime.now(timezone.utc).isoformat(), + }) + + return alert_data + + +async def _resolve_alert(device_id: str, rule_id: str | None, metric: str | None = None) -> None: + """Resolve an open alert by setting resolved_at.""" + async with AdminAsyncSessionLocal() as session: + if rule_id: + await session.execute( + text(""" + UPDATE alert_events SET resolved_at = NOW(), status = 'resolved' + WHERE device_id = CAST(:device_id AS uuid) AND rule_id = CAST(:rule_id AS uuid) + AND status = 'firing' AND resolved_at IS NULL + """), + {"device_id": device_id, "rule_id": rule_id}, + ) + else: + await session.execute( + text(""" + UPDATE alert_events SET resolved_at = NOW(), status = 'resolved' + WHERE device_id = CAST(:device_id AS uuid) AND rule_id IS NULL + AND metric = :metric AND status = 'firing' AND resolved_at IS NULL + """), + {"device_id": device_id, "metric": metric or "offline"}, + ) + await session.commit() + + +async def _get_channels_for_tenant(tenant_id: str) -> list[dict]: + """Get all notification channels for a tenant.""" + async with AdminAsyncSessionLocal() as session: + result = await session.execute( + text(""" + SELECT id, name, channel_type, smtp_host, smtp_port, smtp_user, + smtp_password, smtp_use_tls, from_address, to_address, + webhook_url, smtp_password_transit, slack_webhook_url, tenant_id + FROM notification_channels + WHERE tenant_id = CAST(:tenant_id AS uuid) + """), + {"tenant_id": tenant_id}, + ) + return [ + { + "id": str(row[0]), + "name": row[1], + "channel_type": row[2], + "smtp_host": row[3], + "smtp_port": row[4], + "smtp_user": row[5], + "smtp_password": row[6], + "smtp_use_tls": row[7], + "from_address": row[8], + "to_address": row[9], + "webhook_url": row[10], + "smtp_password_transit": row[11], + "slack_webhook_url": row[12], + "tenant_id": str(row[13]) if row[13] else None, + } + for row in result.fetchall() + ] + + +async def _get_channels_for_rule(rule_id: str) -> list[dict]: + """Get notification channels linked to a specific alert rule.""" + async with AdminAsyncSessionLocal() as session: + result = await session.execute( + text(""" + SELECT nc.id, nc.name, nc.channel_type, nc.smtp_host, nc.smtp_port, + nc.smtp_user, nc.smtp_password, nc.smtp_use_tls, + nc.from_address, nc.to_address, nc.webhook_url, + nc.smtp_password_transit, nc.slack_webhook_url, nc.tenant_id + FROM notification_channels nc + JOIN alert_rule_channels arc ON arc.channel_id = nc.id + WHERE arc.rule_id = CAST(:rule_id AS uuid) + """), + {"rule_id": rule_id}, + ) + return [ + { + "id": str(row[0]), + "name": row[1], + "channel_type": row[2], + "smtp_host": row[3], + "smtp_port": row[4], + "smtp_user": row[5], + "smtp_password": row[6], + "smtp_use_tls": row[7], + "from_address": row[8], + "to_address": row[9], + "webhook_url": row[10], + "smtp_password_transit": row[11], + "slack_webhook_url": row[12], + "tenant_id": str(row[13]) if row[13] else None, + } + for row in result.fetchall() + ] + + +async def _dispatch_async(alert_event: dict, channels: list[dict], device_hostname: str) -> None: + """Fire-and-forget notification dispatch.""" + try: + from app.services.notification_service import dispatch_notifications + await dispatch_notifications(alert_event, channels, device_hostname) + except Exception as e: + logger.warning("Notification dispatch failed: %s", e) + + +async def _get_device_hostname(device_id: str) -> str: + """Get device hostname for notification messages.""" + async with AdminAsyncSessionLocal() as session: + result = await session.execute( + text("SELECT hostname FROM devices WHERE id = CAST(:device_id AS uuid)"), + {"device_id": device_id}, + ) + row = result.fetchone() + return row[0] if row else device_id + + +async def evaluate( + device_id: str, + tenant_id: str, + metric_type: str, + data: dict[str, Any], +) -> None: + """Evaluate alert rules for incoming device metrics. + + Called from metrics_subscriber after metric DB write. + """ + # Check maintenance window suppression before evaluating rules + if await _is_device_in_maintenance(tenant_id, device_id): + logger.debug( + "Alert suppressed by maintenance window for device %s tenant %s", + device_id, tenant_id, + ) + return + + rules = await _get_rules_for_tenant(tenant_id) + if not rules: + return + + metrics = _extract_metrics(metric_type, data) + if not metrics: + return + + r = await _get_redis() + device_groups = await _get_device_groups(device_id) + + # Build a set of metrics that have device-specific rules + device_specific_metrics: set[str] = set() + for rule in rules: + if rule["device_id"] == device_id: + device_specific_metrics.add(rule["metric"]) + + for rule in rules: + rule_metric = rule["metric"] + if rule_metric not in metrics: + continue + + # Check if rule applies to this device + applies = False + if rule["device_id"] == device_id: + applies = True + elif rule["device_id"] is None and rule["group_id"] is None: + # Tenant-wide rule — skip if device-specific rule exists for same metric + if rule_metric in device_specific_metrics: + continue + applies = True + elif rule["group_id"] and rule["group_id"] in device_groups: + applies = True + + if not applies: + continue + + value = metrics[rule_metric] + breaching = _check_threshold(value, rule["operator"], rule["threshold"]) + + if breaching: + reached = await _increment_breach(r, device_id, rule["id"], rule["duration_polls"]) + if reached: + # Check if already firing + if await _has_open_alert(device_id, rule["id"]): + continue + + # Check flapping + is_flapping = await _check_flapping(r, device_id, rule["id"]) + + hostname = await _get_device_hostname(device_id) + message = f"{rule['name']}: {rule_metric} = {value} (threshold: {rule['operator']} {rule['threshold']})" + + alert_event = await _create_alert_event( + device_id=device_id, + tenant_id=tenant_id, + rule_id=rule["id"], + status="flapping" if is_flapping else "firing", + severity=rule["severity"], + metric=rule_metric, + value=value, + threshold=rule["threshold"], + message=message, + is_flapping=is_flapping, + ) + + if is_flapping: + logger.info( + "Alert %s for device %s is flapping — notifications suppressed", + rule["name"], device_id, + ) + else: + channels = await _get_channels_for_rule(rule["id"]) + if channels: + asyncio.create_task(_dispatch_async(alert_event, channels, hostname)) + else: + # Not breaching — reset counter and check for open alert to resolve + await _reset_breach(r, device_id, rule["id"]) + + if await _has_open_alert(device_id, rule["id"]): + # Check flapping before resolving + is_flapping = await _check_flapping(r, device_id, rule["id"]) + + await _resolve_alert(device_id, rule["id"]) + + hostname = await _get_device_hostname(device_id) + message = f"Resolved: {rule['name']}: {rule_metric} = {value}" + + resolved_event = await _create_alert_event( + device_id=device_id, + tenant_id=tenant_id, + rule_id=rule["id"], + status="resolved", + severity=rule["severity"], + metric=rule_metric, + value=value, + threshold=rule["threshold"], + message=message, + is_flapping=is_flapping, + ) + + if not is_flapping: + channels = await _get_channels_for_rule(rule["id"]) + if channels: + asyncio.create_task(_dispatch_async(resolved_event, channels, hostname)) + + +async def _get_offline_rule(tenant_id: str) -> dict | None: + """Look up the device_offline default rule for a tenant.""" + async with AdminAsyncSessionLocal() as session: + result = await session.execute( + text(""" + SELECT id, enabled FROM alert_rules + WHERE tenant_id = CAST(:tenant_id AS uuid) + AND metric = 'device_offline' AND is_default = TRUE + LIMIT 1 + """), + {"tenant_id": tenant_id}, + ) + row = result.fetchone() + if row: + return {"id": str(row[0]), "enabled": row[1]} + return None + + +async def evaluate_offline(device_id: str, tenant_id: str) -> None: + """Create a critical alert when a device goes offline. + + Uses the tenant's device_offline default rule if it exists and is enabled. + Falls back to system-level alert (rule_id=NULL) for backward compatibility. + """ + if await _is_device_in_maintenance(tenant_id, device_id): + logger.debug( + "Offline alert suppressed by maintenance window for device %s", + device_id, + ) + return + + rule = await _get_offline_rule(tenant_id) + rule_id = rule["id"] if rule else None + + # If rule exists but is disabled, skip alert creation (user opted out) + if rule and not rule["enabled"]: + return + + if rule_id: + if await _has_open_alert(device_id, rule_id): + return + else: + if await _has_open_alert(device_id, None, "offline"): + return + + hostname = await _get_device_hostname(device_id) + message = f"Device {hostname} is offline" + + alert_event = await _create_alert_event( + device_id=device_id, + tenant_id=tenant_id, + rule_id=rule_id, + status="firing", + severity="critical", + metric="offline", + value=None, + threshold=None, + message=message, + ) + + # Use rule-linked channels if available, otherwise tenant-wide channels + if rule_id: + channels = await _get_channels_for_rule(rule_id) + if not channels: + channels = await _get_channels_for_tenant(tenant_id) + else: + channels = await _get_channels_for_tenant(tenant_id) + + if channels: + asyncio.create_task(_dispatch_async(alert_event, channels, hostname)) + + +async def evaluate_online(device_id: str, tenant_id: str) -> None: + """Resolve offline alert when device comes back online.""" + rule = await _get_offline_rule(tenant_id) + rule_id = rule["id"] if rule else None + + if rule_id: + if not await _has_open_alert(device_id, rule_id): + return + await _resolve_alert(device_id, rule_id) + else: + if not await _has_open_alert(device_id, None, "offline"): + return + await _resolve_alert(device_id, None, "offline") + + hostname = await _get_device_hostname(device_id) + message = f"Device {hostname} is back online" + + resolved_event = await _create_alert_event( + device_id=device_id, + tenant_id=tenant_id, + rule_id=rule_id, + status="resolved", + severity="critical", + metric="offline", + value=None, + threshold=None, + message=message, + ) + + if rule_id: + channels = await _get_channels_for_rule(rule_id) + if not channels: + channels = await _get_channels_for_tenant(tenant_id) + else: + channels = await _get_channels_for_tenant(tenant_id) + + if channels: + asyncio.create_task(_dispatch_async(resolved_event, channels, hostname)) diff --git a/backend/app/services/api_key_service.py b/backend/app/services/api_key_service.py new file mode 100644 index 0000000..b6fefd5 --- /dev/null +++ b/backend/app/services/api_key_service.py @@ -0,0 +1,190 @@ +"""API key generation, validation, and management service. + +Keys use the mktp_ prefix for easy identification in logs. +Storage uses SHA-256 hash -- the plaintext key is never persisted. +Validation uses AdminAsyncSessionLocal since it runs before tenant context is set. +""" + +import hashlib +import json +import secrets +import uuid +from datetime import datetime, timezone +from typing import Optional + +from sqlalchemy import text + +from app.database import AdminAsyncSessionLocal + +# Allowed scopes for API keys +ALLOWED_SCOPES: set[str] = { + "devices:read", + "devices:write", + "config:read", + "config:write", + "alerts:read", + "firmware:write", +} + + +def generate_raw_key() -> str: + """Generate a raw API key with mktp_ prefix + 32 URL-safe random chars.""" + random_part = secrets.token_urlsafe(32) + return f"mktp_{random_part}" + + +def hash_key(raw_key: str) -> str: + """SHA-256 hex digest of a raw API key.""" + return hashlib.sha256(raw_key.encode()).hexdigest() + + +async def create_api_key( + db, + tenant_id: uuid.UUID, + user_id: uuid.UUID, + name: str, + scopes: list[str], + expires_at: Optional[datetime] = None, +) -> dict: + """Create a new API key. + + Returns dict with: + - key: the plaintext key (shown once, never again) + - id: the key UUID + - key_prefix: first 9 chars of the key (e.g. "mktp_abc1") + """ + raw_key = generate_raw_key() + key_hash_value = hash_key(raw_key) + key_prefix = raw_key[:9] # "mktp_" + first 4 random chars + + result = await db.execute( + text(""" + INSERT INTO api_keys (tenant_id, user_id, name, key_prefix, key_hash, scopes, expires_at) + VALUES (:tenant_id, :user_id, :name, :key_prefix, :key_hash, CAST(:scopes AS jsonb), :expires_at) + RETURNING id, created_at + """), + { + "tenant_id": str(tenant_id), + "user_id": str(user_id), + "name": name, + "key_prefix": key_prefix, + "key_hash": key_hash_value, + "scopes": json.dumps(scopes), + "expires_at": expires_at, + }, + ) + row = result.fetchone() + await db.commit() + + return { + "key": raw_key, + "id": row.id, + "key_prefix": key_prefix, + "name": name, + "scopes": scopes, + "expires_at": expires_at, + "created_at": row.created_at, + } + + +async def validate_api_key(raw_key: str) -> Optional[dict]: + """Validate an API key and return context if valid. + + Uses AdminAsyncSessionLocal since this runs before tenant context is set. + + Returns dict with tenant_id, user_id, scopes, key_id on success. + Returns None for invalid, expired, or revoked keys. + Updates last_used_at on successful validation. + """ + key_hash_value = hash_key(raw_key) + + async with AdminAsyncSessionLocal() as session: + result = await session.execute( + text(""" + SELECT id, tenant_id, user_id, scopes, expires_at, revoked_at + FROM api_keys + WHERE key_hash = :key_hash + """), + {"key_hash": key_hash_value}, + ) + row = result.fetchone() + + if not row: + return None + + # Check revoked + if row.revoked_at is not None: + return None + + # Check expired + if row.expires_at is not None and row.expires_at <= datetime.now(timezone.utc): + return None + + # Update last_used_at + await session.execute( + text(""" + UPDATE api_keys SET last_used_at = now() + WHERE id = :key_id + """), + {"key_id": str(row.id)}, + ) + await session.commit() + + return { + "tenant_id": row.tenant_id, + "user_id": row.user_id, + "scopes": row.scopes if row.scopes else [], + "key_id": row.id, + } + + +async def list_api_keys(db, tenant_id: uuid.UUID) -> list[dict]: + """List all API keys for a tenant (active and revoked). + + Returns keys with masked display (key_prefix + "..."). + """ + result = await db.execute( + text(""" + SELECT id, name, key_prefix, scopes, expires_at, last_used_at, + created_at, revoked_at, user_id + FROM api_keys + WHERE tenant_id = :tenant_id + ORDER BY created_at DESC + """), + {"tenant_id": str(tenant_id)}, + ) + rows = result.fetchall() + + return [ + { + "id": row.id, + "name": row.name, + "key_prefix": row.key_prefix, + "scopes": row.scopes if row.scopes else [], + "expires_at": row.expires_at.isoformat() if row.expires_at else None, + "last_used_at": row.last_used_at.isoformat() if row.last_used_at else None, + "created_at": row.created_at.isoformat() if row.created_at else None, + "revoked_at": row.revoked_at.isoformat() if row.revoked_at else None, + "user_id": str(row.user_id), + } + for row in rows + ] + + +async def revoke_api_key(db, tenant_id: uuid.UUID, key_id: uuid.UUID) -> bool: + """Revoke an API key by setting revoked_at = now(). + + Returns True if a key was actually revoked, False if not found or already revoked. + """ + result = await db.execute( + text(""" + UPDATE api_keys + SET revoked_at = now() + WHERE id = :key_id AND tenant_id = :tenant_id AND revoked_at IS NULL + RETURNING id + """), + {"key_id": str(key_id), "tenant_id": str(tenant_id)}, + ) + row = result.fetchone() + await db.commit() + return row is not None diff --git a/backend/app/services/audit_service.py b/backend/app/services/audit_service.py new file mode 100644 index 0000000..ae6a65d --- /dev/null +++ b/backend/app/services/audit_service.py @@ -0,0 +1,92 @@ +"""Centralized audit logging service. + +Provides a fire-and-forget ``log_action`` coroutine that inserts a row into +the ``audit_logs`` table. Uses raw SQL INSERT (not ORM) for minimal overhead. + +The function is wrapped in a try/except so that a logging failure **never** +breaks the parent operation. + +Phase 30: When details are non-empty, they are encrypted via OpenBao Transit +(per-tenant data key) and stored in encrypted_details. The plaintext details +column is set to '{}' for column compatibility. If Transit encryption fails +(e.g., OpenBao unavailable), details are stored in plaintext as a fallback. +""" + +import uuid +from typing import Any, Optional + +import structlog +from sqlalchemy import text +from sqlalchemy.ext.asyncio import AsyncSession + +logger = structlog.get_logger("audit") + + +async def log_action( + db: AsyncSession, + tenant_id: uuid.UUID, + user_id: uuid.UUID, + action: str, + resource_type: Optional[str] = None, + resource_id: Optional[str] = None, + device_id: Optional[uuid.UUID] = None, + details: Optional[dict[str, Any]] = None, + ip_address: Optional[str] = None, +) -> None: + """Insert a row into audit_logs. Swallows all exceptions on failure.""" + try: + import json as _json + + details_dict = details or {} + details_json = _json.dumps(details_dict) + encrypted_details: Optional[str] = None + + # Attempt Transit encryption for non-empty details + if details_dict: + try: + from app.services.crypto import encrypt_data_transit + + encrypted_details = await encrypt_data_transit( + details_json, str(tenant_id) + ) + # Encryption succeeded — clear plaintext details + details_json = _json.dumps({}) + except Exception: + # Transit unavailable — fall back to plaintext details + logger.warning( + "audit_transit_encryption_failed", + action=action, + tenant_id=str(tenant_id), + exc_info=True, + ) + # Keep details_json as-is (plaintext fallback) + encrypted_details = None + + await db.execute( + text( + "INSERT INTO audit_logs " + "(tenant_id, user_id, action, resource_type, resource_id, " + "device_id, details, encrypted_details, ip_address) " + "VALUES (:tenant_id, :user_id, :action, :resource_type, " + ":resource_id, :device_id, CAST(:details AS jsonb), " + ":encrypted_details, :ip_address)" + ), + { + "tenant_id": str(tenant_id), + "user_id": str(user_id), + "action": action, + "resource_type": resource_type, + "resource_id": resource_id, + "device_id": str(device_id) if device_id else None, + "details": details_json, + "encrypted_details": encrypted_details, + "ip_address": ip_address, + }, + ) + except Exception: + logger.warning( + "audit_log_insert_failed", + action=action, + tenant_id=str(tenant_id), + exc_info=True, + ) diff --git a/backend/app/services/auth.py b/backend/app/services/auth.py new file mode 100644 index 0000000..854a820 --- /dev/null +++ b/backend/app/services/auth.py @@ -0,0 +1,154 @@ +""" +JWT authentication service. + +Handles password hashing, JWT token creation, token verification, +and token revocation via Redis. +""" + +import time +import uuid +from datetime import UTC, datetime, timedelta +from typing import Optional + +import bcrypt +from fastapi import HTTPException, status +from jose import JWTError, jwt +from redis.asyncio import Redis + +from app.config import settings + +TOKEN_REVOCATION_PREFIX = "token_revoked:" + + +def hash_password(password: str) -> str: + """Hash a plaintext password using bcrypt. + + DEPRECATED: Used only by password reset (temporary bcrypt hash for + upgrade flow) and bootstrap_first_admin. Remove post-v6.0. + """ + return bcrypt.hashpw(password.encode(), bcrypt.gensalt()).decode() + + +def verify_password(plain_password: str, hashed_password: str) -> bool: + """Verify a plaintext password against a bcrypt hash. + + DEPRECATED: Used only by the one-time SRP upgrade flow (login with + must_upgrade_auth=True) and anti-enumeration dummy calls. Remove post-v6.0. + """ + return bcrypt.checkpw(plain_password.encode(), hashed_password.encode()) + + +def create_access_token( + user_id: uuid.UUID, + tenant_id: Optional[uuid.UUID], + role: str, +) -> str: + """ + Create a short-lived JWT access token. + + Claims: + sub: user UUID (subject) + tenant_id: tenant UUID or None for super_admin + role: user's role string + type: "access" (to distinguish from refresh tokens) + exp: expiry timestamp + """ + now = datetime.now(UTC) + expire = now + timedelta(minutes=settings.JWT_ACCESS_TOKEN_EXPIRE_MINUTES) + + payload = { + "sub": str(user_id), + "tenant_id": str(tenant_id) if tenant_id else None, + "role": role, + "type": "access", + "iat": now, + "exp": expire, + } + + return jwt.encode(payload, settings.JWT_SECRET_KEY, algorithm=settings.JWT_ALGORITHM) + + +def create_refresh_token(user_id: uuid.UUID) -> str: + """ + Create a long-lived JWT refresh token. + + Claims: + sub: user UUID (subject) + type: "refresh" (to distinguish from access tokens) + exp: expiry timestamp (7 days) + """ + now = datetime.now(UTC) + expire = now + timedelta(days=settings.JWT_REFRESH_TOKEN_EXPIRE_DAYS) + + payload = { + "sub": str(user_id), + "type": "refresh", + "iat": now, + "exp": expire, + } + + return jwt.encode(payload, settings.JWT_SECRET_KEY, algorithm=settings.JWT_ALGORITHM) + + +def verify_token(token: str, expected_type: str = "access") -> dict: + """ + Decode and validate a JWT token. + + Args: + token: JWT string to validate + expected_type: "access" or "refresh" + + Returns: + dict: Decoded payload (sub, tenant_id, role, type, exp, iat) + + Raises: + HTTPException 401: If token is invalid, expired, or wrong type + """ + credentials_exception = HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Could not validate credentials", + headers={"WWW-Authenticate": "Bearer"}, + ) + + try: + payload = jwt.decode( + token, + settings.JWT_SECRET_KEY, + algorithms=[settings.JWT_ALGORITHM], + ) + except JWTError: + raise credentials_exception + + # Validate token type + token_type = payload.get("type") + if token_type != expected_type: + raise credentials_exception + + # Validate subject exists + sub = payload.get("sub") + if not sub: + raise credentials_exception + + return payload + + +async def revoke_user_tokens(redis: Redis, user_id: str) -> None: + """Mark all tokens for a user as revoked by storing current timestamp. + + Any refresh token issued before this timestamp will be rejected. + TTL matches maximum refresh token lifetime (7 days). + """ + key = f"{TOKEN_REVOCATION_PREFIX}{user_id}" + await redis.set(key, str(time.time()), ex=7 * 24 * 3600) + + +async def is_token_revoked(redis: Redis, user_id: str, issued_at: float) -> bool: + """Check if a token was issued before the user's revocation timestamp. + + Returns True if the token should be rejected. + """ + key = f"{TOKEN_REVOCATION_PREFIX}{user_id}" + revoked_at = await redis.get(key) + if revoked_at is None: + return False + return issued_at < float(revoked_at) diff --git a/backend/app/services/backup_scheduler.py b/backend/app/services/backup_scheduler.py new file mode 100644 index 0000000..cbaa5a1 --- /dev/null +++ b/backend/app/services/backup_scheduler.py @@ -0,0 +1,197 @@ +"""Dynamic backup scheduler — reads cron schedules from DB, manages APScheduler jobs.""" + +import logging +from typing import Optional + +from apscheduler.schedulers.asyncio import AsyncIOScheduler +from apscheduler.triggers.cron import CronTrigger + +from app.database import AdminAsyncSessionLocal +from app.models.config_backup import ConfigBackupSchedule +from app.models.device import Device +from app.services import backup_service + +from sqlalchemy import select + +logger = logging.getLogger(__name__) + +_scheduler: Optional[AsyncIOScheduler] = None + +# System default: 2am UTC daily +DEFAULT_CRON = "0 2 * * *" + + +def _cron_to_trigger(cron_expr: str) -> Optional[CronTrigger]: + """Parse a 5-field cron expression into an APScheduler CronTrigger. + + Returns None if the expression is invalid. + """ + try: + parts = cron_expr.strip().split() + if len(parts) != 5: + return None + minute, hour, day, month, day_of_week = parts + return CronTrigger( + minute=minute, hour=hour, day=day, month=month, + day_of_week=day_of_week, timezone="UTC", + ) + except Exception as e: + logger.warning("Invalid cron expression '%s': %s", cron_expr, e) + return None + + +def build_schedule_map(schedules: list) -> dict[str, list[dict]]: + """Group device schedules by cron expression. + + Returns: {cron_expression: [{device_id, tenant_id}, ...]} + """ + schedule_map: dict[str, list[dict]] = {} + for s in schedules: + if not s.enabled: + continue + cron = s.cron_expression or DEFAULT_CRON + if cron not in schedule_map: + schedule_map[cron] = [] + schedule_map[cron].append({ + "device_id": str(s.device_id), + "tenant_id": str(s.tenant_id), + }) + return schedule_map + + +async def _run_scheduled_backups(devices: list[dict]) -> None: + """Run backups for a list of devices. Each failure is isolated.""" + success_count = 0 + failure_count = 0 + + for dev_info in devices: + try: + async with AdminAsyncSessionLocal() as session: + await backup_service.run_backup( + device_id=dev_info["device_id"], + tenant_id=dev_info["tenant_id"], + trigger_type="scheduled", + db_session=session, + ) + await session.commit() + logger.info("Scheduled backup OK: device %s", dev_info["device_id"]) + success_count += 1 + except Exception as e: + logger.error( + "Scheduled backup FAILED: device %s: %s", + dev_info["device_id"], e, + ) + failure_count += 1 + + logger.info( + "Backup batch complete — %d succeeded, %d failed", + success_count, failure_count, + ) + + +async def _load_effective_schedules() -> list: + """Load all effective schedules from DB. + + For each device: use device-specific schedule if exists, else tenant default. + Returns flat list of (device_id, tenant_id, cron_expression, enabled) objects. + """ + from types import SimpleNamespace + + async with AdminAsyncSessionLocal() as session: + # Get all devices + dev_result = await session.execute(select(Device)) + devices = dev_result.scalars().all() + + # Get all schedules + sched_result = await session.execute(select(ConfigBackupSchedule)) + schedules = sched_result.scalars().all() + + # Index: device-specific and tenant defaults + device_schedules = {} # device_id -> schedule + tenant_defaults = {} # tenant_id -> schedule + + for s in schedules: + if s.device_id: + device_schedules[str(s.device_id)] = s + else: + tenant_defaults[str(s.tenant_id)] = s + + effective = [] + for dev in devices: + dev_id = str(dev.id) + tenant_id = str(dev.tenant_id) + + if dev_id in device_schedules: + sched = device_schedules[dev_id] + elif tenant_id in tenant_defaults: + sched = tenant_defaults[tenant_id] + else: + # No schedule configured — use system default + sched = None + + effective.append(SimpleNamespace( + device_id=dev_id, + tenant_id=tenant_id, + cron_expression=sched.cron_expression if sched else DEFAULT_CRON, + enabled=sched.enabled if sched else True, + )) + + return effective + + +async def sync_schedules() -> None: + """Reload all schedules from DB and reconfigure APScheduler jobs.""" + global _scheduler + if not _scheduler: + return + + # Remove all existing backup jobs (keep other jobs like firmware check) + for job in _scheduler.get_jobs(): + if job.id.startswith("backup_cron_"): + job.remove() + + schedules = await _load_effective_schedules() + schedule_map = build_schedule_map(schedules) + + for cron_expr, devices in schedule_map.items(): + trigger = _cron_to_trigger(cron_expr) + if not trigger: + logger.warning("Skipping invalid cron '%s', using default", cron_expr) + trigger = _cron_to_trigger(DEFAULT_CRON) + + job_id = f"backup_cron_{cron_expr.replace(' ', '_')}" + _scheduler.add_job( + _run_scheduled_backups, + trigger=trigger, + args=[devices], + id=job_id, + name=f"Backup: {cron_expr} ({len(devices)} devices)", + max_instances=1, + replace_existing=True, + ) + logger.info("Scheduled %d devices with cron '%s'", len(devices), cron_expr) + + +async def on_schedule_change(tenant_id: str, device_id: str) -> None: + """Called when a schedule is created/updated via API. Hot-reloads all schedules.""" + logger.info("Schedule changed for tenant=%s device=%s, resyncing", tenant_id, device_id) + await sync_schedules() + + +async def start_backup_scheduler() -> None: + """Start the APScheduler and load initial schedules from DB.""" + global _scheduler + _scheduler = AsyncIOScheduler(timezone="UTC") + _scheduler.start() + + await sync_schedules() + logger.info("Backup scheduler started with dynamic schedules") + + +async def stop_backup_scheduler() -> None: + """Gracefully shutdown the scheduler.""" + global _scheduler + if _scheduler: + _scheduler.shutdown(wait=False) + _scheduler = None + logger.info("Backup scheduler stopped") diff --git a/backend/app/services/backup_service.py b/backend/app/services/backup_service.py new file mode 100644 index 0000000..e9a50fd --- /dev/null +++ b/backend/app/services/backup_service.py @@ -0,0 +1,378 @@ +"""SSH-based config capture service for RouterOS devices. + +This service handles: +1. capture_export() — SSH to device, run /export compact, return stdout text +2. capture_binary_backup() — SSH to device, trigger /system backup save, SFTP-download result +3. run_backup() — Orchestrate a full backup: capture + git commit + DB record + +All functions are async (asyncssh is asyncio-native). + +Security policy: + known_hosts=None is intentional — RouterOS devices use self-signed SSH host keys + that change on reset or key regeneration. This mirrors InsecureSkipVerify=true + used in the poller's TLS connection. The threat model accepts device impersonation + risk in exchange for operational simplicity (no pre-enrollment of host keys needed). + See Pitfall 2 in 04-RESEARCH.md. + +pygit2 calls are synchronous C bindings and MUST be wrapped in run_in_executor. +See Pitfall 3 in 04-RESEARCH.md. + +Phase 30: ALL backups (manual, scheduled, pre-restore) are encrypted via OpenBao +Transit (Tier 2) before git commit. The server retains decrypt capability for +on-demand viewing. Raw files in git are ciphertext; the API decrypts on GET. +""" + +import asyncio +import base64 +import io +import json +import logging +from datetime import datetime, timezone + +import asyncssh +from sqlalchemy.ext.asyncio import AsyncSession + +from app.config import settings +from app.database import AdminAsyncSessionLocal, set_tenant_context +from app.models.config_backup import ConfigBackupRun +from app.models.device import Device +from app.services import git_store +from app.services.crypto import decrypt_credentials_hybrid + +logger = logging.getLogger(__name__) + +# Fixed backup file name on device flash — overwrites on each run so files +# don't accumulate. See Pitfall 4 in 04-RESEARCH.md. +_BACKUP_NAME = "portal-backup" + + +async def capture_export( + ip: str, + port: int = 22, + username: str = "", + password: str = "", +) -> str: + """SSH to a RouterOS device and capture /export compact output. + + Args: + ip: Device IP address. + port: SSH port (default 22; RouterOS default is 22). + username: SSH login username. + password: SSH login password. + + Returns: + The raw RSC text from /export compact (may include RouterOS header line). + + Raises: + asyncssh.Error: On SSH connection or command execution failure. + """ + async with asyncssh.connect( + ip, + port=port, + username=username, + password=password, + known_hosts=None, # RouterOS self-signed host keys — see module docstring + connect_timeout=30, + ) as conn: + result = await conn.run("/export compact", check=True) + return result.stdout + + +async def capture_binary_backup( + ip: str, + port: int = 22, + username: str = "", + password: str = "", +) -> bytes: + """SSH to a RouterOS device, create a binary backup, SFTP-download it, then clean up. + + Uses a fixed backup name ({_BACKUP_NAME}.backup) so the file overwrites + on subsequent runs, preventing flash storage accumulation. + + The cleanup (removing the file from device flash) runs in a try/finally + block so cleanup failures don't mask the actual backup error but are + logged for observability. See Pitfall 4 in 04-RESEARCH.md. + + Args: + ip: Device IP address. + port: SSH port (default 22). + username: SSH login username. + password: SSH login password. + + Returns: + Raw bytes of the binary backup file. + + Raises: + asyncssh.Error: On SSH connection, command, or SFTP failure. + """ + async with asyncssh.connect( + ip, + port=port, + username=username, + password=password, + known_hosts=None, + connect_timeout=30, + ) as conn: + # Step 1: Trigger backup creation on device flash. + await conn.run( + f"/system backup save name={_BACKUP_NAME} dont-encrypt=yes", + check=True, + ) + + buf = io.BytesIO() + try: + # Step 2: SFTP-download the backup file. + async with conn.start_sftp_client() as sftp: + async with sftp.open(f"{_BACKUP_NAME}.backup", "rb") as f: + buf.write(await f.read()) + finally: + # Step 3: Remove backup file from device flash (best-effort cleanup). + try: + await conn.run(f"/file remove {_BACKUP_NAME}.backup", check=True) + except Exception as cleanup_err: + logger.warning( + "Failed to remove backup file from device %s: %s", + ip, + cleanup_err, + ) + + return buf.getvalue() + + +async def run_backup( + device_id: str, + tenant_id: str, + trigger_type: str, + db_session: AsyncSession | None = None, +) -> dict: + """Orchestrate a full config backup for a device. + + Steps: + 1. Load device from DB (ip_address, encrypted_credentials). + 2. Decrypt credentials using crypto.decrypt_credentials(). + 3. Capture /export compact and binary backup concurrently via asyncio.gather(). + 4. Compute line delta vs the most recent export.rsc in git (None for first backup). + 5. Commit both files to the tenant's bare git repo (run_in_executor for pygit2). + 6. Insert ConfigBackupRun record with commit SHA, trigger type, line deltas. + 7. Return summary dict. + + Args: + device_id: Device UUID as string. + tenant_id: Tenant UUID as string. + trigger_type: 'scheduled' | 'manual' | 'pre-restore' + db_session: Optional AsyncSession with RLS context already set. + If None, uses AdminAsyncSessionLocal (for scheduler context). + + Returns: + Dict: {"commit_sha": str, "trigger_type": str, "lines_added": int|None, "lines_removed": int|None} + + Raises: + ValueError: If device not found or missing credentials. + asyncssh.Error: On SSH/SFTP failure. + """ + loop = asyncio.get_event_loop() + ts = datetime.now(timezone.utc).isoformat() + + # ----------------------------------------------------------------------- + # Step 1: Load device from DB + # ----------------------------------------------------------------------- + if db_session is not None: + session = db_session + should_close = False + else: + # Scheduler context: use admin session (cross-tenant; RLS bypassed) + session = AdminAsyncSessionLocal() + should_close = True + + try: + from sqlalchemy import select + + if should_close: + # Admin session doesn't have RLS context — query directly. + result = await session.execute( + select(Device).where( + Device.id == device_id, # type: ignore[arg-type] + Device.tenant_id == tenant_id, # type: ignore[arg-type] + ) + ) + else: + result = await session.execute( + select(Device).where(Device.id == device_id) # type: ignore[arg-type] + ) + + device = result.scalar_one_or_none() + if device is None: + raise ValueError(f"Device {device_id!r} not found for tenant {tenant_id!r}") + + if not device.encrypted_credentials_transit and not device.encrypted_credentials: + raise ValueError( + f"Device {device_id!r} has no stored credentials — cannot perform backup" + ) + + # ----------------------------------------------------------------------- + # Step 2: Decrypt credentials (dual-read: Transit preferred, legacy fallback) + # ----------------------------------------------------------------------- + key = settings.get_encryption_key_bytes() + creds_json = await decrypt_credentials_hybrid( + device.encrypted_credentials_transit, + device.encrypted_credentials, + str(device.tenant_id), + key, + ) + creds = json.loads(creds_json) + ssh_username = creds.get("username", "") + ssh_password = creds.get("password", "") + ip = device.ip_address + + hostname = device.hostname or ip + + # ----------------------------------------------------------------------- + # Step 3: Capture export and binary backup concurrently + # ----------------------------------------------------------------------- + logger.info( + "Starting %s backup for device %s (%s) tenant %s", + trigger_type, + hostname, + ip, + tenant_id, + ) + + export_text, binary_backup = await asyncio.gather( + capture_export(ip, username=ssh_username, password=ssh_password), + capture_binary_backup(ip, username=ssh_username, password=ssh_password), + ) + + # ----------------------------------------------------------------------- + # Step 4: Compute line delta vs prior version + # ----------------------------------------------------------------------- + lines_added: int | None = None + lines_removed: int | None = None + + prior_commits = await loop.run_in_executor( + None, git_store.list_device_commits, tenant_id, device_id + ) + + if prior_commits: + try: + prior_export_bytes = await loop.run_in_executor( + None, git_store.read_file, tenant_id, prior_commits[0]["sha"], device_id, "export.rsc" + ) + prior_text = prior_export_bytes.decode("utf-8", errors="replace") + lines_added, lines_removed = await loop.run_in_executor( + None, git_store.compute_line_delta, prior_text, export_text + ) + except Exception as delta_err: + logger.warning( + "Failed to compute line delta for device %s: %s", + device_id, + delta_err, + ) + # Keep lines_added/lines_removed as None on error — non-fatal + else: + # First backup: all lines are "added", none removed + all_lines = len(export_text.splitlines()) + lines_added = all_lines + lines_removed = 0 + + # ----------------------------------------------------------------------- + # Step 5: Encrypt ALL backups via Transit (Tier 2: OpenBao Transit) + # ----------------------------------------------------------------------- + encryption_tier: int | None = None + git_export_content = export_text + git_binary_content = binary_backup + + try: + from app.services.crypto import encrypt_data_transit + + encrypted_export = await encrypt_data_transit( + export_text, tenant_id + ) + encrypted_binary = await encrypt_data_transit( + base64.b64encode(binary_backup).decode(), tenant_id + ) + # Transit ciphertext is text — store directly in git + git_export_content = encrypted_export + git_binary_content = encrypted_binary.encode("utf-8") + encryption_tier = 2 + logger.info( + "Tier 2 Transit encryption applied for %s backup of device %s", + trigger_type, + device_id, + ) + except Exception as enc_err: + # Transit unavailable — fall back to plaintext (non-fatal) + logger.warning( + "Transit encryption failed for %s backup of device %s, " + "storing plaintext: %s", + trigger_type, + device_id, + enc_err, + ) + # Keep encryption_tier = None (plaintext fallback) + + # ----------------------------------------------------------------------- + # Step 6: Commit to git (wrapped in run_in_executor — pygit2 is sync C bindings) + # ----------------------------------------------------------------------- + commit_message = ( + f"{trigger_type}: {hostname} ({ip}) at {ts}" + ) + + commit_sha = await loop.run_in_executor( + None, + git_store.commit_backup, + tenant_id, + device_id, + git_export_content, + git_binary_content, + commit_message, + ) + + logger.info( + "Committed backup for device %s to git SHA %s (tier=%s)", + device_id, + commit_sha[:8], + encryption_tier, + ) + + # ----------------------------------------------------------------------- + # Step 7: Insert ConfigBackupRun record + # ----------------------------------------------------------------------- + if not should_close: + # RLS-scoped session from API context — record directly + backup_run = ConfigBackupRun( + device_id=device.id, + tenant_id=device.tenant_id, + commit_sha=commit_sha, + trigger_type=trigger_type, + lines_added=lines_added, + lines_removed=lines_removed, + encryption_tier=encryption_tier, + ) + session.add(backup_run) + await session.flush() + else: + # Admin session — set tenant context before insert so RLS policy is satisfied + async with AdminAsyncSessionLocal() as admin_session: + await set_tenant_context(admin_session, str(device.tenant_id)) + backup_run = ConfigBackupRun( + device_id=device.id, + tenant_id=device.tenant_id, + commit_sha=commit_sha, + trigger_type=trigger_type, + lines_added=lines_added, + lines_removed=lines_removed, + encryption_tier=encryption_tier, + ) + admin_session.add(backup_run) + await admin_session.commit() + + return { + "commit_sha": commit_sha, + "trigger_type": trigger_type, + "lines_added": lines_added, + "lines_removed": lines_removed, + } + + finally: + if should_close: + await session.close() diff --git a/backend/app/services/ca_service.py b/backend/app/services/ca_service.py new file mode 100644 index 0000000..ba5c3cf --- /dev/null +++ b/backend/app/services/ca_service.py @@ -0,0 +1,462 @@ +"""Certificate Authority service — CA generation, device cert signing, lifecycle. + +This module provides the core PKI functionality for the Internal Certificate +Authority feature. All functions receive an ``AsyncSession`` and an +``encryption_key`` as parameters (no direct Settings access) for testability. + +Security notes: +- CA private keys are encrypted with AES-256-GCM before database storage. +- PEM key material is NEVER logged. +- Device keys are decrypted only when needed for NATS transmission. +""" + +from __future__ import annotations + +import datetime +import ipaddress +import logging +from uuid import UUID + +from cryptography import x509 +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import rsa +from cryptography.x509.oid import ExtendedKeyUsageOID, NameOID +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.models.certificate import CertificateAuthority, DeviceCertificate +from app.services.crypto import ( + decrypt_credentials_hybrid, + encrypt_credentials_transit, +) + +logger = logging.getLogger(__name__) + +# Valid status transitions for the device certificate lifecycle. +_VALID_TRANSITIONS: dict[str, set[str]] = { + "issued": {"deploying"}, + "deploying": {"deployed", "issued"}, # issued = rollback on deploy failure + "deployed": {"expiring", "revoked", "superseded"}, + "expiring": {"expired", "revoked", "superseded"}, + "expired": {"superseded"}, + "revoked": set(), + "superseded": set(), +} + + +# --------------------------------------------------------------------------- +# CA Generation +# --------------------------------------------------------------------------- + +async def generate_ca( + db: AsyncSession, + tenant_id: UUID, + common_name: str, + validity_years: int, + encryption_key: bytes, +) -> CertificateAuthority: + """Generate a self-signed root CA for a tenant. + + Args: + db: Async database session. + tenant_id: Tenant UUID — only one CA per tenant. + common_name: CN for the CA certificate (e.g., "Portal Root CA"). + validity_years: How many years the CA cert is valid. + encryption_key: 32-byte AES-256-GCM key for encrypting the CA private key. + + Returns: + The newly created ``CertificateAuthority`` model instance. + + Raises: + ValueError: If the tenant already has a CA. + """ + # Ensure one CA per tenant + existing = await get_ca_for_tenant(db, tenant_id) + if existing is not None: + raise ValueError( + f"Tenant {tenant_id} already has a CA (id={existing.id}). " + "Delete the existing CA before creating a new one." + ) + + # Generate RSA 2048 key pair + ca_key = rsa.generate_private_key(public_exponent=65537, key_size=2048) + + now = datetime.datetime.now(datetime.timezone.utc) + expiry = now + datetime.timedelta(days=365 * validity_years) + + subject = issuer = x509.Name([ + x509.NameAttribute(NameOID.ORGANIZATION_NAME, "The Other Dude"), + x509.NameAttribute(NameOID.COMMON_NAME, common_name), + ]) + + ca_cert = ( + x509.CertificateBuilder() + .subject_name(subject) + .issuer_name(issuer) + .public_key(ca_key.public_key()) + .serial_number(x509.random_serial_number()) + .not_valid_before(now) + .not_valid_after(expiry) + .add_extension( + x509.BasicConstraints(ca=True, path_length=0), critical=True + ) + .add_extension( + x509.KeyUsage( + digital_signature=True, + content_commitment=False, + key_encipherment=False, + data_encipherment=False, + key_agreement=False, + key_cert_sign=True, + crl_sign=True, + encipher_only=False, + decipher_only=False, + ), + critical=True, + ) + .add_extension( + x509.SubjectKeyIdentifier.from_public_key(ca_key.public_key()), + critical=False, + ) + .sign(ca_key, hashes.SHA256()) + ) + + # Serialize public cert to PEM + cert_pem = ca_cert.public_bytes(serialization.Encoding.PEM).decode("utf-8") + + # Serialize private key to PEM, then encrypt with OpenBao Transit + key_pem = ca_key.private_bytes( + serialization.Encoding.PEM, + serialization.PrivateFormat.PKCS8, + serialization.NoEncryption(), + ).decode("utf-8") + encrypted_key_transit = await encrypt_credentials_transit(key_pem, str(tenant_id)) + + # Compute SHA-256 fingerprint (colon-separated hex) + fingerprint_bytes = ca_cert.fingerprint(hashes.SHA256()) + fingerprint = ":".join(f"{b:02X}" for b in fingerprint_bytes) + + # Serial number as hex string + serial_hex = format(ca_cert.serial_number, "X") + + model = CertificateAuthority( + tenant_id=tenant_id, + common_name=common_name, + cert_pem=cert_pem, + encrypted_private_key=b"", # Legacy column kept for schema compat + encrypted_private_key_transit=encrypted_key_transit, + serial_number=serial_hex, + fingerprint_sha256=fingerprint, + not_valid_before=now, + not_valid_after=expiry, + ) + db.add(model) + await db.flush() + + logger.info( + "Generated CA for tenant %s: cn=%s fingerprint=%s", + tenant_id, + common_name, + fingerprint, + ) + return model + + +# --------------------------------------------------------------------------- +# Device Certificate Signing +# --------------------------------------------------------------------------- + +async def sign_device_cert( + db: AsyncSession, + ca: CertificateAuthority, + device_id: UUID, + hostname: str, + ip_address: str, + validity_days: int, + encryption_key: bytes, +) -> DeviceCertificate: + """Sign a per-device TLS certificate using the tenant's CA. + + Args: + db: Async database session. + ca: The tenant's CertificateAuthority model instance. + device_id: UUID of the device receiving the cert. + hostname: Device hostname — used as CN and SAN DNSName. + ip_address: Device IP — used as SAN IPAddress. + validity_days: Certificate validity in days. + encryption_key: 32-byte AES-256-GCM key for encrypting the device private key. + + Returns: + The newly created ``DeviceCertificate`` model instance (status='issued'). + """ + # Decrypt CA private key (dual-read: Transit preferred, legacy fallback) + ca_key_pem = await decrypt_credentials_hybrid( + ca.encrypted_private_key_transit, + ca.encrypted_private_key, + str(ca.tenant_id), + encryption_key, + ) + ca_key = serialization.load_pem_private_key( + ca_key_pem.encode("utf-8"), password=None + ) + + # Load CA certificate for issuer info and AuthorityKeyIdentifier + ca_cert = x509.load_pem_x509_certificate(ca.cert_pem.encode("utf-8")) + + # Generate device RSA 2048 key + device_key = rsa.generate_private_key(public_exponent=65537, key_size=2048) + + now = datetime.datetime.now(datetime.timezone.utc) + expiry = now + datetime.timedelta(days=validity_days) + + device_cert = ( + x509.CertificateBuilder() + .subject_name( + x509.Name([ + x509.NameAttribute(NameOID.ORGANIZATION_NAME, "The Other Dude"), + x509.NameAttribute(NameOID.COMMON_NAME, hostname), + ]) + ) + .issuer_name(ca_cert.subject) + .public_key(device_key.public_key()) + .serial_number(x509.random_serial_number()) + .not_valid_before(now) + .not_valid_after(expiry) + .add_extension( + x509.BasicConstraints(ca=False, path_length=None), critical=True + ) + .add_extension( + x509.KeyUsage( + digital_signature=True, + content_commitment=False, + key_encipherment=True, + data_encipherment=False, + key_agreement=False, + key_cert_sign=False, + crl_sign=False, + encipher_only=False, + decipher_only=False, + ), + critical=True, + ) + .add_extension( + x509.ExtendedKeyUsage([ExtendedKeyUsageOID.SERVER_AUTH]), + critical=False, + ) + .add_extension( + x509.SubjectAlternativeName([ + x509.IPAddress(ipaddress.ip_address(ip_address)), + x509.DNSName(hostname), + ]), + critical=False, + ) + .add_extension( + x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier( + ca_cert.extensions.get_extension_for_class( + x509.SubjectKeyIdentifier + ).value + ), + critical=False, + ) + .sign(ca_key, hashes.SHA256()) + ) + + # Serialize device cert and key to PEM + cert_pem = device_cert.public_bytes(serialization.Encoding.PEM).decode("utf-8") + key_pem = device_key.private_bytes( + serialization.Encoding.PEM, + serialization.PrivateFormat.PKCS8, + serialization.NoEncryption(), + ).decode("utf-8") + + # Encrypt device private key via OpenBao Transit + encrypted_key_transit = await encrypt_credentials_transit(key_pem, str(ca.tenant_id)) + + # Compute fingerprint + fingerprint_bytes = device_cert.fingerprint(hashes.SHA256()) + fingerprint = ":".join(f"{b:02X}" for b in fingerprint_bytes) + + serial_hex = format(device_cert.serial_number, "X") + + model = DeviceCertificate( + tenant_id=ca.tenant_id, + device_id=device_id, + ca_id=ca.id, + common_name=hostname, + serial_number=serial_hex, + fingerprint_sha256=fingerprint, + cert_pem=cert_pem, + encrypted_private_key=b"", # Legacy column kept for schema compat + encrypted_private_key_transit=encrypted_key_transit, + not_valid_before=now, + not_valid_after=expiry, + status="issued", + ) + db.add(model) + await db.flush() + + logger.info( + "Signed device cert for device %s: cn=%s fingerprint=%s", + device_id, + hostname, + fingerprint, + ) + return model + + +# --------------------------------------------------------------------------- +# Queries +# --------------------------------------------------------------------------- + +async def get_ca_for_tenant( + db: AsyncSession, + tenant_id: UUID, +) -> CertificateAuthority | None: + """Return the tenant's CA, or None if not yet initialized.""" + result = await db.execute( + select(CertificateAuthority).where( + CertificateAuthority.tenant_id == tenant_id + ) + ) + return result.scalar_one_or_none() + + +async def get_device_certs( + db: AsyncSession, + tenant_id: UUID, + device_id: UUID | None = None, +) -> list[DeviceCertificate]: + """List device certificates for a tenant. + + Args: + db: Async database session. + tenant_id: Tenant UUID. + device_id: If provided, filter to certs for this device only. + + Returns: + List of DeviceCertificate models (excludes superseded by default). + """ + stmt = ( + select(DeviceCertificate) + .where(DeviceCertificate.tenant_id == tenant_id) + .where(DeviceCertificate.status != "superseded") + ) + if device_id is not None: + stmt = stmt.where(DeviceCertificate.device_id == device_id) + stmt = stmt.order_by(DeviceCertificate.created_at.desc()) + result = await db.execute(stmt) + return list(result.scalars().all()) + + +# --------------------------------------------------------------------------- +# Status Management +# --------------------------------------------------------------------------- + +async def update_cert_status( + db: AsyncSession, + cert_id: UUID, + status: str, + deployed_at: datetime.datetime | None = None, +) -> DeviceCertificate: + """Update a device certificate's lifecycle status. + + Validates that the transition is allowed by the state machine: + issued -> deploying -> deployed -> expiring -> expired + \\-> revoked + \\-> superseded + + Args: + db: Async database session. + cert_id: Certificate UUID. + status: New status value. + deployed_at: Timestamp to set when transitioning to 'deployed'. + + Returns: + The updated DeviceCertificate model. + + Raises: + ValueError: If the certificate is not found or the transition is invalid. + """ + result = await db.execute( + select(DeviceCertificate).where(DeviceCertificate.id == cert_id) + ) + cert = result.scalar_one_or_none() + if cert is None: + raise ValueError(f"Device certificate {cert_id} not found") + + allowed = _VALID_TRANSITIONS.get(cert.status, set()) + if status not in allowed: + raise ValueError( + f"Invalid status transition: {cert.status} -> {status}. " + f"Allowed transitions from '{cert.status}': {allowed or 'none'}" + ) + + cert.status = status + cert.updated_at = datetime.datetime.now(datetime.timezone.utc) + + if status == "deployed" and deployed_at is not None: + cert.deployed_at = deployed_at + elif status == "deployed": + cert.deployed_at = cert.updated_at + + await db.flush() + + logger.info( + "Updated cert %s status to %s", + cert_id, + status, + ) + return cert + + +# --------------------------------------------------------------------------- +# Cert Data for Deployment +# --------------------------------------------------------------------------- + +async def get_cert_for_deploy( + db: AsyncSession, + cert_id: UUID, + encryption_key: bytes, +) -> tuple[str, str, str]: + """Retrieve and decrypt certificate data for NATS deployment. + + Returns the device cert PEM, decrypted device key PEM, and the CA cert + PEM — everything needed to push to a device via the Go poller. + + Args: + db: Async database session. + cert_id: Device certificate UUID. + encryption_key: 32-byte AES-256-GCM key to decrypt the device private key. + + Returns: + Tuple of (cert_pem, key_pem_decrypted, ca_cert_pem). + + Raises: + ValueError: If the certificate or its CA is not found. + """ + result = await db.execute( + select(DeviceCertificate).where(DeviceCertificate.id == cert_id) + ) + cert = result.scalar_one_or_none() + if cert is None: + raise ValueError(f"Device certificate {cert_id} not found") + + # Fetch the CA for the ca_cert_pem + ca_result = await db.execute( + select(CertificateAuthority).where( + CertificateAuthority.id == cert.ca_id + ) + ) + ca = ca_result.scalar_one_or_none() + if ca is None: + raise ValueError(f"CA {cert.ca_id} not found for certificate {cert_id}") + + # Decrypt device private key (dual-read: Transit preferred, legacy fallback) + key_pem = await decrypt_credentials_hybrid( + cert.encrypted_private_key_transit, + cert.encrypted_private_key, + str(cert.tenant_id), + encryption_key, + ) + + return cert.cert_pem, key_pem, ca.cert_pem diff --git a/backend/app/services/config_change_subscriber.py b/backend/app/services/config_change_subscriber.py new file mode 100644 index 0000000..fec1969 --- /dev/null +++ b/backend/app/services/config_change_subscriber.py @@ -0,0 +1,118 @@ +"""NATS subscriber for config change events from the Go poller. + +Triggers automatic backups when out-of-band config changes are detected, +with 5-minute deduplication to prevent rapid-fire backups. +""" + +import json +import logging +from datetime import datetime, timedelta, timezone +from typing import Any, Optional + +from sqlalchemy import select + +from app.config import settings +from app.database import AdminAsyncSessionLocal +from app.models.config_backup import ConfigBackupRun +from app.services import backup_service + +logger = logging.getLogger(__name__) + +DEDUP_WINDOW_MINUTES = 5 + +_nc: Optional[Any] = None + + +async def _last_backup_within_dedup_window(device_id: str) -> bool: + """Check if a backup was created for this device in the last N minutes.""" + cutoff = datetime.now(timezone.utc) - timedelta(minutes=DEDUP_WINDOW_MINUTES) + async with AdminAsyncSessionLocal() as session: + result = await session.execute( + select(ConfigBackupRun) + .where( + ConfigBackupRun.device_id == device_id, + ConfigBackupRun.created_at > cutoff, + ) + .limit(1) + ) + return result.scalar_one_or_none() is not None + + +async def handle_config_changed(event: dict) -> None: + """Handle a config change event. Trigger backup with dedup.""" + device_id = event.get("device_id") + tenant_id = event.get("tenant_id") + + if not device_id or not tenant_id: + logger.warning("Config change event missing device_id or tenant_id: %s", event) + return + + # Dedup check + if await _last_backup_within_dedup_window(device_id): + logger.info( + "Config change on device %s — skipping backup (within %dm dedup window)", + device_id, DEDUP_WINDOW_MINUTES, + ) + return + + logger.info( + "Config change detected on device %s (tenant %s): %s -> %s", + device_id, tenant_id, + event.get("old_timestamp", "?"), + event.get("new_timestamp", "?"), + ) + + try: + async with AdminAsyncSessionLocal() as session: + await backup_service.run_backup( + device_id=device_id, + tenant_id=tenant_id, + trigger_type="config-change", + db_session=session, + ) + await session.commit() + logger.info("Config-change backup completed for device %s", device_id) + except Exception as e: + logger.error("Config-change backup failed for device %s: %s", device_id, e) + + +async def _on_message(msg) -> None: + """NATS message handler for config.changed.> subjects.""" + try: + event = json.loads(msg.data.decode()) + await handle_config_changed(event) + await msg.ack() + except Exception as e: + logger.error("Error handling config change message: %s", e) + await msg.nak() + + +async def start_config_change_subscriber() -> Optional[Any]: + """Connect to NATS and subscribe to config.changed.> events.""" + import nats + + global _nc + try: + logger.info("NATS config-change: connecting to %s", settings.NATS_URL) + _nc = await nats.connect(settings.NATS_URL) + js = _nc.jetstream() + await js.subscribe( + "config.changed.>", + cb=_on_message, + durable="api-config-change-consumer", + stream="DEVICE_EVENTS", + manual_ack=True, + ) + logger.info("Config change subscriber started") + return _nc + except Exception as e: + logger.error("Failed to start config change subscriber: %s", e) + return None + + +async def stop_config_change_subscriber() -> None: + """Gracefully close the NATS connection.""" + global _nc + if _nc: + await _nc.drain() + _nc = None diff --git a/backend/app/services/crypto.py b/backend/app/services/crypto.py new file mode 100644 index 0000000..3aa5e01 --- /dev/null +++ b/backend/app/services/crypto.py @@ -0,0 +1,183 @@ +""" +Credential encryption/decryption with dual-read (OpenBao Transit + legacy AES-256-GCM). + +This module provides two encryption paths: +1. Legacy (sync): AES-256-GCM with static CREDENTIAL_ENCRYPTION_KEY — used for fallback reads. +2. Transit (async): OpenBao Transit per-tenant keys — used for all new writes. + +The dual-read pattern: +- New writes always use OpenBao Transit (encrypt_credentials_transit). +- Reads prefer Transit ciphertext, falling back to legacy (decrypt_credentials_hybrid). +- Legacy functions are preserved for backward compatibility during migration. + +Security properties: +- AES-256-GCM provides authenticated encryption (confidentiality + integrity) +- A unique 12-byte random nonce is generated per legacy encryption operation +- OpenBao Transit keys are AES-256-GCM96, managed entirely by OpenBao +- Ciphertext format: "vault:v1:..." for Transit, raw bytes for legacy +""" + +import os + + +def encrypt_credentials(plaintext: str, key: bytes) -> bytes: + """ + Encrypt a plaintext string using AES-256-GCM. + + Args: + plaintext: The credential string to encrypt (e.g., JSON with username/password) + key: 32-byte encryption key + + Returns: + bytes: nonce (12 bytes) + ciphertext + GCM tag (16 bytes) + + Raises: + ValueError: If key is not exactly 32 bytes + """ + if len(key) != 32: + raise ValueError(f"Key must be exactly 32 bytes, got {len(key)}") + + from cryptography.hazmat.primitives.ciphers.aead import AESGCM + + aesgcm = AESGCM(key) + nonce = os.urandom(12) # 96-bit nonce, unique per encryption + ciphertext = aesgcm.encrypt(nonce, plaintext.encode("utf-8"), None) + + # Store as: nonce (12 bytes) + ciphertext + GCM tag (included in ciphertext by library) + return nonce + ciphertext + + +def decrypt_credentials(ciphertext: bytes, key: bytes) -> str: + """ + Decrypt AES-256-GCM encrypted credentials. + + Args: + ciphertext: bytes from encrypt_credentials (nonce + encrypted data + GCM tag) + key: 32-byte encryption key (must match the key used for encryption) + + Returns: + str: The original plaintext string + + Raises: + ValueError: If key is not exactly 32 bytes + cryptography.exceptions.InvalidTag: If authentication fails (tampered data or wrong key) + """ + if len(key) != 32: + raise ValueError(f"Key must be exactly 32 bytes, got {len(key)}") + + from cryptography.hazmat.primitives.ciphers.aead import AESGCM + + nonce = ciphertext[:12] + encrypted_data = ciphertext[12:] + + aesgcm = AESGCM(key) + plaintext_bytes = aesgcm.decrypt(nonce, encrypted_data, None) + + return plaintext_bytes.decode("utf-8") + + +# --------------------------------------------------------------------------- +# OpenBao Transit functions (async, per-tenant keys) +# --------------------------------------------------------------------------- + + +async def encrypt_credentials_transit(plaintext: str, tenant_id: str) -> str: + """Encrypt via OpenBao Transit. Returns ciphertext string (vault:v1:...). + + Args: + plaintext: The credential string to encrypt. + tenant_id: Tenant UUID string for key lookup. + + Returns: + Transit ciphertext string (vault:v1:base64...). + """ + from app.services.openbao_service import get_openbao_service + + service = get_openbao_service() + return await service.encrypt(tenant_id, plaintext.encode("utf-8")) + + +async def decrypt_credentials_transit(ciphertext: str, tenant_id: str) -> str: + """Decrypt OpenBao Transit ciphertext. Returns plaintext string. + + Args: + ciphertext: Transit ciphertext (vault:v1:...). + tenant_id: Tenant UUID string for key lookup. + + Returns: + Decrypted plaintext string. + """ + from app.services.openbao_service import get_openbao_service + + service = get_openbao_service() + plaintext_bytes = await service.decrypt(tenant_id, ciphertext) + return plaintext_bytes.decode("utf-8") + + +# --------------------------------------------------------------------------- +# OpenBao Transit data encryption (async, per-tenant _data keys — Phase 30) +# --------------------------------------------------------------------------- + + +async def encrypt_data_transit(plaintext: str, tenant_id: str) -> str: + """Encrypt non-credential data via OpenBao Transit using per-tenant data key. + + Used for audit log details, config backups, and reports. Data keys are + separate from credential keys (tenant_{uuid}_data vs tenant_{uuid}). + + Args: + plaintext: The data string to encrypt. + tenant_id: Tenant UUID string for data key lookup. + + Returns: + Transit ciphertext string (vault:v1:base64...). + """ + from app.services.openbao_service import get_openbao_service + + service = get_openbao_service() + return await service.encrypt_data(tenant_id, plaintext.encode("utf-8")) + + +async def decrypt_data_transit(ciphertext: str, tenant_id: str) -> str: + """Decrypt OpenBao Transit data ciphertext. Returns plaintext string. + + Args: + ciphertext: Transit ciphertext (vault:v1:...). + tenant_id: Tenant UUID string for data key lookup. + + Returns: + Decrypted plaintext string. + """ + from app.services.openbao_service import get_openbao_service + + service = get_openbao_service() + plaintext_bytes = await service.decrypt_data(tenant_id, ciphertext) + return plaintext_bytes.decode("utf-8") + + +async def decrypt_credentials_hybrid( + transit_ciphertext: str | None, + legacy_ciphertext: bytes | None, + tenant_id: str, + legacy_key: bytes, +) -> str: + """Dual-read: prefer Transit ciphertext, fall back to legacy. + + Args: + transit_ciphertext: OpenBao Transit ciphertext (vault:v1:...) or None. + legacy_ciphertext: Legacy AES-256-GCM bytes (nonce+ciphertext+tag) or None. + tenant_id: Tenant UUID string for Transit key lookup. + legacy_key: 32-byte legacy encryption key for fallback. + + Returns: + Decrypted plaintext string. + + Raises: + ValueError: If neither ciphertext is available. + """ + if transit_ciphertext and transit_ciphertext.startswith("vault:v"): + return await decrypt_credentials_transit(transit_ciphertext, tenant_id) + elif legacy_ciphertext: + return decrypt_credentials(legacy_ciphertext, legacy_key) + else: + raise ValueError("No credentials available (both transit and legacy are empty)") diff --git a/backend/app/services/device.py b/backend/app/services/device.py new file mode 100644 index 0000000..627ff49 --- /dev/null +++ b/backend/app/services/device.py @@ -0,0 +1,670 @@ +""" +Device service — business logic for device CRUD, credential encryption, groups, and tags. + +All functions operate via the app_user engine (RLS enforced). +Tenant isolation is handled automatically by PostgreSQL RLS policies +(SET LOCAL app.current_tenant is set by the get_current_user dependency before +this layer is called). + +Credential policy: +- Credentials are always stored as AES-256-GCM encrypted JSON blobs. +- Credentials are NEVER returned in any public-facing response. +- Re-encryption happens only when a new password is explicitly provided in an update. +""" + +import asyncio +import json +import uuid +from typing import Optional + +from sqlalchemy import func, or_, select +from sqlalchemy.ext.asyncio import AsyncSession +from sqlalchemy.orm import selectinload + +from app.models.device import ( + Device, + DeviceGroup, + DeviceGroupMembership, + DeviceTag, + DeviceTagAssignment, +) +from app.schemas.device import ( + BulkAddRequest, + BulkAddResult, + DeviceCreate, + DeviceGroupCreate, + DeviceGroupResponse, + DeviceGroupUpdate, + DeviceResponse, + DeviceTagCreate, + DeviceTagResponse, + DeviceTagUpdate, + DeviceUpdate, +) +from app.config import settings +from app.services.crypto import ( + decrypt_credentials, + decrypt_credentials_hybrid, + encrypt_credentials, + encrypt_credentials_transit, +) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +async def _tcp_reachable(ip: str, port: int, timeout: float = 3.0) -> bool: + """Return True if a TCP connection to ip:port succeeds within timeout.""" + try: + _, writer = await asyncio.wait_for( + asyncio.open_connection(ip, port), timeout=timeout + ) + writer.close() + try: + await writer.wait_closed() + except Exception: + pass + return True + except Exception: + return False + + +def _build_device_response(device: Device) -> DeviceResponse: + """ + Build a DeviceResponse from an ORM Device instance. + + Tags and groups are extracted from pre-loaded relationships. + Credentials are explicitly EXCLUDED. + """ + from app.schemas.device import DeviceGroupRef, DeviceTagRef + + tags = [ + DeviceTagRef( + id=a.tag.id, + name=a.tag.name, + color=a.tag.color, + ) + for a in device.tag_assignments + ] + + groups = [ + DeviceGroupRef( + id=m.group.id, + name=m.group.name, + ) + for m in device.group_memberships + ] + + return DeviceResponse( + id=device.id, + hostname=device.hostname, + ip_address=device.ip_address, + api_port=device.api_port, + api_ssl_port=device.api_ssl_port, + model=device.model, + serial_number=device.serial_number, + firmware_version=device.firmware_version, + routeros_version=device.routeros_version, + uptime_seconds=device.uptime_seconds, + last_seen=device.last_seen, + latitude=device.latitude, + longitude=device.longitude, + status=device.status, + tls_mode=device.tls_mode, + tags=tags, + groups=groups, + created_at=device.created_at, + ) + + +def _device_with_relations(): + """Return a select() for Device with tags and groups eagerly loaded.""" + return select(Device).options( + selectinload(Device.tag_assignments).selectinload(DeviceTagAssignment.tag), + selectinload(Device.group_memberships).selectinload(DeviceGroupMembership.group), + ) + + +# --------------------------------------------------------------------------- +# Device CRUD +# --------------------------------------------------------------------------- + + +async def create_device( + db: AsyncSession, + tenant_id: uuid.UUID, + data: DeviceCreate, + encryption_key: bytes, +) -> DeviceResponse: + """ + Create a new device. + + - Validates TCP connectivity (api_port or api_ssl_port must be reachable). + - Encrypts credentials before storage. + - Status set to "unknown" until the Go poller runs a full auth check (Phase 2). + """ + # Test connectivity before accepting the device + api_reachable = await _tcp_reachable(data.ip_address, data.api_port) + ssl_reachable = await _tcp_reachable(data.ip_address, data.api_ssl_port) + + if not api_reachable and not ssl_reachable: + from fastapi import HTTPException, status + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail=( + f"Cannot reach {data.ip_address} on port {data.api_port} " + f"(RouterOS API) or {data.api_ssl_port} (RouterOS SSL API). " + "Verify the IP address and that the RouterOS API is enabled." + ), + ) + + # Encrypt credentials via OpenBao Transit (new writes go through Transit) + credentials_json = json.dumps({"username": data.username, "password": data.password}) + transit_ciphertext = await encrypt_credentials_transit( + credentials_json, str(tenant_id) + ) + + device = Device( + tenant_id=tenant_id, + hostname=data.hostname, + ip_address=data.ip_address, + api_port=data.api_port, + api_ssl_port=data.api_ssl_port, + encrypted_credentials_transit=transit_ciphertext, + status="unknown", + ) + db.add(device) + await db.flush() # Get the ID without committing + await db.refresh(device) + + # Re-query with relationships loaded + result = await db.execute( + _device_with_relations().where(Device.id == device.id) + ) + device = result.scalar_one() + return _build_device_response(device) + + +async def get_devices( + db: AsyncSession, + tenant_id: uuid.UUID, + page: int = 1, + page_size: int = 25, + status: Optional[str] = None, + search: Optional[str] = None, + tag_id: Optional[uuid.UUID] = None, + group_id: Optional[uuid.UUID] = None, + sort_by: str = "created_at", + sort_order: str = "desc", +) -> tuple[list[DeviceResponse], int]: + """ + Return a paginated list of devices with optional filtering and sorting. + + Returns (items, total_count). + RLS automatically scopes this to the caller's tenant. + """ + base_q = _device_with_relations() + + # Filtering + if status: + base_q = base_q.where(Device.status == status) + + if search: + pattern = f"%{search}%" + base_q = base_q.where( + or_( + Device.hostname.ilike(pattern), + Device.ip_address.ilike(pattern), + ) + ) + + if tag_id: + base_q = base_q.where( + Device.id.in_( + select(DeviceTagAssignment.device_id).where( + DeviceTagAssignment.tag_id == tag_id + ) + ) + ) + + if group_id: + base_q = base_q.where( + Device.id.in_( + select(DeviceGroupMembership.device_id).where( + DeviceGroupMembership.group_id == group_id + ) + ) + ) + + # Count total before pagination + count_q = select(func.count()).select_from(base_q.subquery()) + total_result = await db.execute(count_q) + total = total_result.scalar_one() + + # Sorting + allowed_sort_cols = { + "created_at": Device.created_at, + "hostname": Device.hostname, + "ip_address": Device.ip_address, + "status": Device.status, + "last_seen": Device.last_seen, + } + sort_col = allowed_sort_cols.get(sort_by, Device.created_at) + if sort_order.lower() == "asc": + base_q = base_q.order_by(sort_col.asc()) + else: + base_q = base_q.order_by(sort_col.desc()) + + # Pagination + offset = (page - 1) * page_size + base_q = base_q.offset(offset).limit(page_size) + + result = await db.execute(base_q) + devices = result.scalars().all() + return [_build_device_response(d) for d in devices], total + + +async def get_device( + db: AsyncSession, + tenant_id: uuid.UUID, + device_id: uuid.UUID, +) -> DeviceResponse: + """Get a single device by ID.""" + from fastapi import HTTPException, status + + result = await db.execute( + _device_with_relations().where(Device.id == device_id) + ) + device = result.scalar_one_or_none() + if not device: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Device not found") + return _build_device_response(device) + + +async def update_device( + db: AsyncSession, + tenant_id: uuid.UUID, + device_id: uuid.UUID, + data: DeviceUpdate, + encryption_key: bytes, +) -> DeviceResponse: + """ + Update device fields. Re-encrypts credentials only if password is provided. + """ + from fastapi import HTTPException, status + + result = await db.execute( + _device_with_relations().where(Device.id == device_id) + ) + device = result.scalar_one_or_none() + if not device: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Device not found") + + # Update scalar fields + if data.hostname is not None: + device.hostname = data.hostname + if data.ip_address is not None: + device.ip_address = data.ip_address + if data.api_port is not None: + device.api_port = data.api_port + if data.api_ssl_port is not None: + device.api_ssl_port = data.api_ssl_port + if data.latitude is not None: + device.latitude = data.latitude + if data.longitude is not None: + device.longitude = data.longitude + if data.tls_mode is not None: + device.tls_mode = data.tls_mode + + # Re-encrypt credentials if new ones are provided + credentials_changed = False + if data.password is not None: + # Decrypt existing to get current username if no new username given + current_username: str = data.username or "" + if not current_username and (device.encrypted_credentials_transit or device.encrypted_credentials): + try: + existing_json = await decrypt_credentials_hybrid( + device.encrypted_credentials_transit, + device.encrypted_credentials, + str(device.tenant_id), + settings.get_encryption_key_bytes(), + ) + existing = json.loads(existing_json) + current_username = existing.get("username", "") + except Exception: + current_username = "" + + credentials_json = json.dumps({ + "username": data.username if data.username is not None else current_username, + "password": data.password, + }) + # New writes go through Transit + device.encrypted_credentials_transit = await encrypt_credentials_transit( + credentials_json, str(device.tenant_id) + ) + device.encrypted_credentials = None # Clear legacy (Transit is canonical) + credentials_changed = True + elif data.username is not None and (device.encrypted_credentials_transit or device.encrypted_credentials): + # Only username changed — update it without changing the password + try: + existing_json = await decrypt_credentials_hybrid( + device.encrypted_credentials_transit, + device.encrypted_credentials, + str(device.tenant_id), + settings.get_encryption_key_bytes(), + ) + existing = json.loads(existing_json) + existing["username"] = data.username + # Re-encrypt via Transit + device.encrypted_credentials_transit = await encrypt_credentials_transit( + json.dumps(existing), str(device.tenant_id) + ) + device.encrypted_credentials = None + credentials_changed = True + except Exception: + pass # Keep existing encrypted blob if decryption fails + + await db.flush() + await db.refresh(device) + + # Notify poller to invalidate cached credentials (fire-and-forget via NATS) + if credentials_changed: + try: + from app.services.event_publisher import publish_event + await publish_event( + f"device.credential_changed.{device_id}", + {"device_id": str(device_id), "tenant_id": str(tenant_id)}, + ) + except Exception: + pass # Never fail the update due to NATS issues + + result2 = await db.execute( + _device_with_relations().where(Device.id == device_id) + ) + device = result2.scalar_one() + return _build_device_response(device) + + +async def delete_device( + db: AsyncSession, + tenant_id: uuid.UUID, + device_id: uuid.UUID, +) -> None: + """Hard-delete a device (v1 — no soft delete for devices).""" + from fastapi import HTTPException, status + + result = await db.execute(select(Device).where(Device.id == device_id)) + device = result.scalar_one_or_none() + if not device: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Device not found") + await db.delete(device) + await db.flush() + + +# --------------------------------------------------------------------------- +# Group / Tag assignment +# --------------------------------------------------------------------------- + + +async def assign_device_to_group( + db: AsyncSession, + tenant_id: uuid.UUID, + device_id: uuid.UUID, + group_id: uuid.UUID, +) -> None: + """Assign a device to a group (idempotent).""" + from fastapi import HTTPException, status + + # Verify device and group exist (RLS scopes both) + dev = await db.get(Device, device_id) + if not dev: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Device not found") + grp = await db.get(DeviceGroup, group_id) + if not grp: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Group not found") + + existing = await db.get(DeviceGroupMembership, (device_id, group_id)) + if not existing: + db.add(DeviceGroupMembership(device_id=device_id, group_id=group_id)) + await db.flush() + + +async def remove_device_from_group( + db: AsyncSession, + tenant_id: uuid.UUID, + device_id: uuid.UUID, + group_id: uuid.UUID, +) -> None: + """Remove a device from a group.""" + from fastapi import HTTPException, status + + membership = await db.get(DeviceGroupMembership, (device_id, group_id)) + if not membership: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="Device is not in this group", + ) + await db.delete(membership) + await db.flush() + + +async def assign_tag_to_device( + db: AsyncSession, + tenant_id: uuid.UUID, + device_id: uuid.UUID, + tag_id: uuid.UUID, +) -> None: + """Assign a tag to a device (idempotent).""" + from fastapi import HTTPException, status + + dev = await db.get(Device, device_id) + if not dev: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Device not found") + tag = await db.get(DeviceTag, tag_id) + if not tag: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Tag not found") + + existing = await db.get(DeviceTagAssignment, (device_id, tag_id)) + if not existing: + db.add(DeviceTagAssignment(device_id=device_id, tag_id=tag_id)) + await db.flush() + + +async def remove_tag_from_device( + db: AsyncSession, + tenant_id: uuid.UUID, + device_id: uuid.UUID, + tag_id: uuid.UUID, +) -> None: + """Remove a tag from a device.""" + from fastapi import HTTPException, status + + assignment = await db.get(DeviceTagAssignment, (device_id, tag_id)) + if not assignment: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="Tag is not assigned to this device", + ) + await db.delete(assignment) + await db.flush() + + +# --------------------------------------------------------------------------- +# DeviceGroup CRUD +# --------------------------------------------------------------------------- + + +async def create_group( + db: AsyncSession, + tenant_id: uuid.UUID, + data: DeviceGroupCreate, +) -> DeviceGroupResponse: + """Create a new device group.""" + group = DeviceGroup( + tenant_id=tenant_id, + name=data.name, + description=data.description, + ) + db.add(group) + await db.flush() + await db.refresh(group) + + # Count devices in the group (0 for new group) + return DeviceGroupResponse( + id=group.id, + name=group.name, + description=group.description, + device_count=0, + created_at=group.created_at, + ) + + +async def get_groups( + db: AsyncSession, + tenant_id: uuid.UUID, +) -> list[DeviceGroupResponse]: + """Return all device groups for the current tenant with device counts.""" + result = await db.execute( + select(DeviceGroup).options( + selectinload(DeviceGroup.memberships) + ) + ) + groups = result.scalars().all() + return [ + DeviceGroupResponse( + id=g.id, + name=g.name, + description=g.description, + device_count=len(g.memberships), + created_at=g.created_at, + ) + for g in groups + ] + + +async def update_group( + db: AsyncSession, + tenant_id: uuid.UUID, + group_id: uuid.UUID, + data: DeviceGroupUpdate, +) -> DeviceGroupResponse: + """Update a device group.""" + from fastapi import HTTPException, status + + result = await db.execute( + select(DeviceGroup).options( + selectinload(DeviceGroup.memberships) + ).where(DeviceGroup.id == group_id) + ) + group = result.scalar_one_or_none() + if not group: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Group not found") + + if data.name is not None: + group.name = data.name + if data.description is not None: + group.description = data.description + + await db.flush() + await db.refresh(group) + + result2 = await db.execute( + select(DeviceGroup).options( + selectinload(DeviceGroup.memberships) + ).where(DeviceGroup.id == group_id) + ) + group = result2.scalar_one() + return DeviceGroupResponse( + id=group.id, + name=group.name, + description=group.description, + device_count=len(group.memberships), + created_at=group.created_at, + ) + + +async def delete_group( + db: AsyncSession, + tenant_id: uuid.UUID, + group_id: uuid.UUID, +) -> None: + """Delete a device group.""" + from fastapi import HTTPException, status + + group = await db.get(DeviceGroup, group_id) + if not group: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Group not found") + await db.delete(group) + await db.flush() + + +# --------------------------------------------------------------------------- +# DeviceTag CRUD +# --------------------------------------------------------------------------- + + +async def create_tag( + db: AsyncSession, + tenant_id: uuid.UUID, + data: DeviceTagCreate, +) -> DeviceTagResponse: + """Create a new device tag.""" + tag = DeviceTag( + tenant_id=tenant_id, + name=data.name, + color=data.color, + ) + db.add(tag) + await db.flush() + await db.refresh(tag) + return DeviceTagResponse(id=tag.id, name=tag.name, color=tag.color) + + +async def get_tags( + db: AsyncSession, + tenant_id: uuid.UUID, +) -> list[DeviceTagResponse]: + """Return all device tags for the current tenant.""" + result = await db.execute(select(DeviceTag)) + tags = result.scalars().all() + return [DeviceTagResponse(id=t.id, name=t.name, color=t.color) for t in tags] + + +async def update_tag( + db: AsyncSession, + tenant_id: uuid.UUID, + tag_id: uuid.UUID, + data: DeviceTagUpdate, +) -> DeviceTagResponse: + """Update a device tag.""" + from fastapi import HTTPException, status + + tag = await db.get(DeviceTag, tag_id) + if not tag: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Tag not found") + + if data.name is not None: + tag.name = data.name + if data.color is not None: + tag.color = data.color + + await db.flush() + await db.refresh(tag) + return DeviceTagResponse(id=tag.id, name=tag.name, color=tag.color) + + +async def delete_tag( + db: AsyncSession, + tenant_id: uuid.UUID, + tag_id: uuid.UUID, +) -> None: + """Delete a device tag.""" + from fastapi import HTTPException, status + + tag = await db.get(DeviceTag, tag_id) + if not tag: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Tag not found") + await db.delete(tag) + await db.flush() diff --git a/backend/app/services/email_service.py b/backend/app/services/email_service.py new file mode 100644 index 0000000..6e7cff5 --- /dev/null +++ b/backend/app/services/email_service.py @@ -0,0 +1,124 @@ +"""Unified email sending service. + +All email sending (system emails, alert notifications) goes through this module. +Supports TLS, STARTTLS, and plain SMTP. Handles Transit + legacy Fernet password decryption. +""" + +import logging +from email.message import EmailMessage +from typing import Optional + +import aiosmtplib + +logger = logging.getLogger(__name__) + + +class SMTPConfig: + """SMTP connection configuration.""" + + def __init__( + self, + host: str, + port: int = 587, + user: Optional[str] = None, + password: Optional[str] = None, + use_tls: bool = False, + from_address: str = "noreply@example.com", + ): + self.host = host + self.port = port + self.user = user + self.password = password + self.use_tls = use_tls + self.from_address = from_address + + +async def send_email( + to: str, + subject: str, + html: str, + plain_text: str, + smtp_config: SMTPConfig, +) -> None: + """Send an email via SMTP. + + Args: + to: Recipient email address. + subject: Email subject line. + html: HTML body. + plain_text: Plain text fallback body. + smtp_config: SMTP connection settings. + + Raises: + aiosmtplib.SMTPException: On SMTP connection or send failure. + """ + msg = EmailMessage() + msg["Subject"] = subject + msg["From"] = smtp_config.from_address + msg["To"] = to + msg.set_content(plain_text) + msg.add_alternative(html, subtype="html") + + use_tls = smtp_config.use_tls + start_tls = not use_tls if smtp_config.port != 25 else False + + await aiosmtplib.send( + msg, + hostname=smtp_config.host, + port=smtp_config.port, + username=smtp_config.user or None, + password=smtp_config.password or None, + use_tls=use_tls, + start_tls=start_tls, + ) + + +async def test_smtp_connection(smtp_config: SMTPConfig) -> dict: + """Test SMTP connectivity without sending an email. + + Returns: + dict with "success" bool and "message" string. + """ + try: + smtp = aiosmtplib.SMTP( + hostname=smtp_config.host, + port=smtp_config.port, + use_tls=smtp_config.use_tls, + start_tls=not smtp_config.use_tls if smtp_config.port != 25 else False, + ) + await smtp.connect() + if smtp_config.user and smtp_config.password: + await smtp.login(smtp_config.user, smtp_config.password) + await smtp.quit() + return {"success": True, "message": "SMTP connection successful"} + except Exception as e: + return {"success": False, "message": str(e)} + + +async def send_test_email(to: str, smtp_config: SMTPConfig) -> dict: + """Send a test email to verify the full SMTP flow. + + Returns: + dict with "success" bool and "message" string. + """ + html = """ +
+
+

TOD — Email Test

+
+
+

This is a test email from The Other Dude.

+

If you're reading this, your SMTP configuration is working correctly.

+

+ Sent from TOD Fleet Management +

+
+
+ """ + plain = "TOD — Email Test\n\nThis is a test email from The Other Dude.\nIf you're reading this, your SMTP configuration is working correctly." + + try: + await send_email(to, "TOD — Test Email", html, plain, smtp_config) + return {"success": True, "message": f"Test email sent to {to}"} + except Exception as e: + return {"success": False, "message": str(e)} diff --git a/backend/app/services/emergency_kit_service.py b/backend/app/services/emergency_kit_service.py new file mode 100644 index 0000000..41171bf --- /dev/null +++ b/backend/app/services/emergency_kit_service.py @@ -0,0 +1,54 @@ +"""Emergency Kit PDF template generation. + +Generates an Emergency Kit PDF containing the user's email and sign-in URL +but NOT the Secret Key. The Secret Key placeholder is filled client-side +so that the server never sees it. + +Uses Jinja2 + WeasyPrint following the same pattern as the reports service. +""" + +import asyncio +from datetime import UTC, datetime +from pathlib import Path + +from jinja2 import Environment, FileSystemLoader + +from app.config import settings + +TEMPLATE_DIR = Path(__file__).parent.parent.parent / "templates" + + +async def generate_emergency_kit_template( + email: str, +) -> bytes: + """Generate Emergency Kit PDF template WITHOUT the Secret Key. + + The Secret Key placeholder will be filled client-side. + The server never sees the Secret Key. + + Args: + email: The user's email address to display in the PDF. + + Returns: + PDF bytes ready for streaming response. + """ + env = Environment( + loader=FileSystemLoader(str(TEMPLATE_DIR)), + autoescape=True, + ) + template = env.get_template("emergency_kit.html") + + html_content = template.render( + email=email, + signin_url=settings.APP_BASE_URL, + date=datetime.now(UTC).strftime("%Y-%m-%d"), + secret_key_placeholder="[Download complete -- your Secret Key will be inserted by your browser]", + ) + + # Run weasyprint in thread to avoid blocking the event loop + from weasyprint import HTML + + pdf_bytes = await asyncio.to_thread( + lambda: HTML(string=html_content).write_pdf() + ) + return pdf_bytes diff --git a/backend/app/services/event_publisher.py b/backend/app/services/event_publisher.py new file mode 100644 index 0000000..60724dd --- /dev/null +++ b/backend/app/services/event_publisher.py @@ -0,0 +1,52 @@ +"""Fire-and-forget NATS JetStream event publisher for real-time SSE pipeline. + +Provides a shared lazy NATS connection and publish helper used by: +- alert_evaluator.py (alert.fired.{tenant_id}, alert.resolved.{tenant_id}) +- restore_service.py (config.push.{tenant_id}.{device_id}) +- upgrade_service.py (firmware.progress.{tenant_id}.{device_id}) + +All publishes are fire-and-forget: errors are logged but never propagate +to the caller. A NATS outage must never block alert evaluation, config +push, or firmware upgrade operations. +""" + +import json +import logging +from typing import Any + +import nats +import nats.aio.client + +from app.config import settings + +logger = logging.getLogger(__name__) + +# Module-level NATS connection (lazy initialized, reused across publishes) +_nc: nats.aio.client.Client | None = None + + +async def _get_nats() -> nats.aio.client.Client: + """Get or create a NATS connection for event publishing.""" + global _nc + if _nc is None or _nc.is_closed: + _nc = await nats.connect(settings.NATS_URL) + logger.info("Event publisher NATS connection established") + return _nc + + +async def publish_event(subject: str, payload: dict[str, Any]) -> None: + """Publish a JSON event to a NATS JetStream subject (fire-and-forget). + + Args: + subject: NATS subject, e.g. "alert.fired.{tenant_id}". + payload: Dict that will be JSON-serialized as the message body. + + Never raises -- all exceptions are caught and logged as warnings. + """ + try: + nc = await _get_nats() + js = nc.jetstream() + await js.publish(subject, json.dumps(payload).encode()) + logger.debug("Published event to %s", subject) + except Exception as exc: + logger.warning("Failed to publish event to %s: %s", subject, exc) diff --git a/backend/app/services/firmware_service.py b/backend/app/services/firmware_service.py new file mode 100644 index 0000000..58cd7c2 --- /dev/null +++ b/backend/app/services/firmware_service.py @@ -0,0 +1,303 @@ +"""Firmware version cache service and NPK downloader. + +Responsibilities: +- check_latest_versions(): fetch latest RouterOS versions from download.mikrotik.com +- download_firmware(): download NPK packages to local PVC cache +- get_firmware_overview(): return fleet firmware status for a tenant +- schedule_firmware_checks(): register daily firmware check job with APScheduler + +Version discovery comes from two sources: +1. Go poller runs /system/package/update per device (rate-limited to once/day) + and publishes via NATS -> firmware_subscriber processes these events +2. check_latest_versions() fetches LATEST.7 / LATEST.6 from download.mikrotik.com +""" + +import logging +import os +from pathlib import Path + +import httpx +from sqlalchemy import text + +from app.config import settings +from app.database import AdminAsyncSessionLocal + +logger = logging.getLogger(__name__) + +# Architectures supported by RouterOS v7 and v6 +_V7_ARCHITECTURES = ["arm", "arm64", "mipsbe", "mmips", "smips", "tile", "ppc", "x86"] +_V6_ARCHITECTURES = ["mipsbe", "mmips", "smips", "tile", "ppc", "x86"] + +# Version source files on download.mikrotik.com +_VERSION_SOURCES = [ + ("LATEST.7", "stable", 7), + ("LATEST.7long", "long-term", 7), + ("LATEST.6", "stable", 6), + ("LATEST.6long", "long-term", 6), +] + + +async def check_latest_versions() -> list[dict]: + """Fetch latest RouterOS versions from download.mikrotik.com. + + Checks LATEST.7, LATEST.7long, LATEST.6, and LATEST.6long files for + version strings, then upserts into firmware_versions table for each + architecture/channel combination. + + Returns list of discovered version dicts. + """ + results: list[dict] = [] + + async with httpx.AsyncClient(timeout=30.0) as client: + for channel_file, channel, major in _VERSION_SOURCES: + try: + resp = await client.get( + f"https://download.mikrotik.com/routeros/{channel_file}" + ) + if resp.status_code != 200: + logger.warning( + "MikroTik version check returned %d for %s", + resp.status_code, channel_file, + ) + continue + + version = resp.text.strip() + if not version or not version[0].isdigit(): + logger.warning("Invalid version string from %s: %r", channel_file, version) + continue + + architectures = _V7_ARCHITECTURES if major == 7 else _V6_ARCHITECTURES + for arch in architectures: + npk_url = ( + f"https://download.mikrotik.com/routeros/" + f"{version}/routeros-{version}-{arch}.npk" + ) + results.append({ + "architecture": arch, + "channel": channel, + "version": version, + "npk_url": npk_url, + }) + + except Exception as e: + logger.warning("Failed to check %s: %s", channel_file, e) + + # Upsert into firmware_versions table + if results: + async with AdminAsyncSessionLocal() as session: + for r in results: + await session.execute( + text(""" + INSERT INTO firmware_versions (id, architecture, channel, version, npk_url, checked_at) + VALUES (gen_random_uuid(), :arch, :channel, :version, :npk_url, NOW()) + ON CONFLICT (architecture, channel, version) DO UPDATE SET checked_at = NOW() + """), + { + "arch": r["architecture"], + "channel": r["channel"], + "version": r["version"], + "npk_url": r["npk_url"], + }, + ) + await session.commit() + + logger.info("Firmware version check complete — %d versions discovered", len(results)) + return results + + +async def download_firmware(architecture: str, channel: str, version: str) -> str: + """Download an NPK package to the local firmware cache. + + Returns the local file path. Skips download if file already exists + and size matches. + """ + cache_dir = Path(settings.FIRMWARE_CACHE_DIR) / version + cache_dir.mkdir(parents=True, exist_ok=True) + + filename = f"routeros-{version}-{architecture}.npk" + local_path = cache_dir / filename + npk_url = f"https://download.mikrotik.com/routeros/{version}/{filename}" + + # Check if already cached + if local_path.exists() and local_path.stat().st_size > 0: + logger.info("Firmware already cached: %s", local_path) + return str(local_path) + + logger.info("Downloading firmware: %s", npk_url) + + async with httpx.AsyncClient(timeout=300.0) as client: + async with client.stream("GET", npk_url) as response: + response.raise_for_status() + with open(local_path, "wb") as f: + async for chunk in response.aiter_bytes(chunk_size=65536): + f.write(chunk) + + file_size = local_path.stat().st_size + logger.info("Firmware downloaded: %s (%d bytes)", local_path, file_size) + + # Update firmware_versions table with local path and size + async with AdminAsyncSessionLocal() as session: + await session.execute( + text(""" + UPDATE firmware_versions + SET npk_local_path = :path, npk_size_bytes = :size + WHERE architecture = :arch AND channel = :channel AND version = :version + """), + { + "path": str(local_path), + "size": file_size, + "arch": architecture, + "channel": channel, + "version": version, + }, + ) + await session.commit() + + return str(local_path) + + +async def get_firmware_overview(tenant_id: str) -> dict: + """Return fleet firmware status for a tenant. + + Returns devices grouped by firmware version, annotated with up-to-date status + based on the latest known version for each device's architecture and preferred channel. + """ + async with AdminAsyncSessionLocal() as session: + # Get all devices for tenant + devices_result = await session.execute( + text(""" + SELECT id, hostname, ip_address, routeros_version, architecture, + preferred_channel, routeros_major_version, + serial_number, firmware_version, model + FROM devices + WHERE tenant_id = CAST(:tenant_id AS uuid) + ORDER BY hostname + """), + {"tenant_id": tenant_id}, + ) + devices = devices_result.fetchall() + + # Get latest firmware versions per architecture/channel + versions_result = await session.execute( + text(""" + SELECT DISTINCT ON (architecture, channel) + architecture, channel, version, npk_url + FROM firmware_versions + ORDER BY architecture, channel, checked_at DESC + """) + ) + latest_versions = { + (row[0], row[1]): {"version": row[2], "npk_url": row[3]} + for row in versions_result.fetchall() + } + + # Build per-device status + device_list = [] + version_groups: dict[str, list] = {} + summary = {"total": 0, "up_to_date": 0, "outdated": 0, "unknown": 0} + + for dev in devices: + dev_id = str(dev[0]) + hostname = dev[1] + current_version = dev[3] + arch = dev[4] + channel = dev[5] or "stable" + + latest = latest_versions.get((arch, channel)) if arch else None + latest_version = latest["version"] if latest else None + + is_up_to_date = False + if not current_version or not arch: + summary["unknown"] += 1 + elif latest_version and current_version == latest_version: + is_up_to_date = True + summary["up_to_date"] += 1 + else: + summary["outdated"] += 1 + + summary["total"] += 1 + + dev_info = { + "id": dev_id, + "hostname": hostname, + "ip_address": dev[2], + "routeros_version": current_version, + "architecture": arch, + "latest_version": latest_version, + "channel": channel, + "is_up_to_date": is_up_to_date, + "serial_number": dev[7], + "firmware_version": dev[8], + "model": dev[9], + } + device_list.append(dev_info) + + # Group by version + ver_key = current_version or "unknown" + if ver_key not in version_groups: + version_groups[ver_key] = [] + version_groups[ver_key].append(dev_info) + + # Build version groups with is_latest flag + groups = [] + for ver, devs in sorted(version_groups.items()): + # A version is "latest" if it matches the latest for any arch/channel combo + is_latest = any( + v["version"] == ver for v in latest_versions.values() + ) + groups.append({ + "version": ver, + "count": len(devs), + "is_latest": is_latest, + "devices": devs, + }) + + return { + "devices": device_list, + "version_groups": groups, + "summary": summary, + } + + +async def get_cached_firmware() -> list[dict]: + """List all locally cached NPK files with their sizes.""" + cache_dir = Path(settings.FIRMWARE_CACHE_DIR) + cached = [] + + if not cache_dir.exists(): + return cached + + for version_dir in sorted(cache_dir.iterdir()): + if not version_dir.is_dir(): + continue + for npk_file in sorted(version_dir.iterdir()): + if npk_file.suffix == ".npk": + cached.append({ + "path": str(npk_file), + "version": version_dir.name, + "filename": npk_file.name, + "size_bytes": npk_file.stat().st_size, + }) + + return cached + + +def schedule_firmware_checks() -> None: + """Register daily firmware version check with APScheduler. + + Called from FastAPI lifespan startup to schedule check_latest_versions() + at 3am UTC daily. + """ + from apscheduler.triggers.cron import CronTrigger + from app.services.backup_scheduler import backup_scheduler + + backup_scheduler.add_job( + check_latest_versions, + trigger=CronTrigger(hour=3, minute=0, timezone="UTC"), + id="firmware_version_check", + name="Check for new RouterOS firmware versions", + max_instances=1, + replace_existing=True, + ) + + logger.info("Firmware version check scheduled — daily at 3am UTC") diff --git a/backend/app/services/firmware_subscriber.py b/backend/app/services/firmware_subscriber.py new file mode 100644 index 0000000..36ed39c --- /dev/null +++ b/backend/app/services/firmware_subscriber.py @@ -0,0 +1,206 @@ +"""NATS JetStream subscriber for device firmware events from the Go poller. + +Subscribes to device.firmware.> and: +1. Updates devices.routeros_version and devices.architecture from poller data +2. Upserts firmware_versions table with latest version per architecture/channel + +Uses AdminAsyncSessionLocal (superuser bypass RLS) so firmware data from any +tenant can be written without setting app.current_tenant. +""" + +import asyncio +import json +import logging +from typing import Optional + +import nats +from nats.js import JetStreamContext +from nats.aio.client import Client as NATSClient +from sqlalchemy import text + +from app.config import settings +from app.database import AdminAsyncSessionLocal + +logger = logging.getLogger(__name__) + +_firmware_client: Optional[NATSClient] = None + + +async def on_device_firmware(msg) -> None: + """Handle a device.firmware event published by the Go poller. + + Payload (JSON): + device_id (str) -- UUID of the device + tenant_id (str) -- UUID of the owning tenant + installed_version (str) -- currently installed RouterOS version + latest_version (str) -- latest available version (may be empty) + channel (str) -- firmware channel ("stable", "long-term") + status (str) -- "New version is available", etc. + architecture (str) -- CPU architecture (arm, arm64, mipsbe, etc.) + """ + try: + data = json.loads(msg.data) + device_id = data.get("device_id") + tenant_id = data.get("tenant_id") + architecture = data.get("architecture") + installed_version = data.get("installed_version") + latest_version = data.get("latest_version") + channel = data.get("channel", "stable") + + if not device_id: + logger.warning("device.firmware event missing device_id — skipping") + await msg.ack() + return + + async with AdminAsyncSessionLocal() as session: + # Update device routeros_version and architecture from poller data + if architecture or installed_version: + await session.execute( + text(""" + UPDATE devices + SET routeros_version = COALESCE(:installed_ver, routeros_version), + architecture = COALESCE(:architecture, architecture), + updated_at = NOW() + WHERE id = CAST(:device_id AS uuid) + """), + { + "installed_ver": installed_version, + "architecture": architecture, + "device_id": device_id, + }, + ) + + # Upsert firmware_versions if we got latest version info + if latest_version and architecture: + npk_url = ( + f"https://download.mikrotik.com/routeros/" + f"{latest_version}/routeros-{latest_version}-{architecture}.npk" + ) + await session.execute( + text(""" + INSERT INTO firmware_versions (id, architecture, channel, version, npk_url, checked_at) + VALUES (gen_random_uuid(), :arch, :channel, :version, :url, NOW()) + ON CONFLICT (architecture, channel, version) DO UPDATE SET checked_at = NOW() + """), + { + "arch": architecture, + "channel": channel, + "version": latest_version, + "url": npk_url, + }, + ) + + await session.commit() + + logger.debug( + "device.firmware processed", + extra={ + "device_id": device_id, + "architecture": architecture, + "installed": installed_version, + "latest": latest_version, + }, + ) + await msg.ack() + + except Exception as exc: + logger.error( + "Failed to process device.firmware event: %s", + exc, + exc_info=True, + ) + try: + await msg.nak() + except Exception: + pass + + +async def _subscribe_with_retry(js: JetStreamContext) -> None: + """Subscribe to device.firmware.> with durable consumer, retrying if stream not ready.""" + max_attempts = 6 # ~30 seconds at 5s intervals + for attempt in range(1, max_attempts + 1): + try: + await js.subscribe( + "device.firmware.>", + cb=on_device_firmware, + durable="api-firmware-consumer", + stream="DEVICE_EVENTS", + ) + logger.info( + "NATS: subscribed to device.firmware.> (durable: api-firmware-consumer)" + ) + return + except Exception as exc: + if attempt < max_attempts: + logger.warning( + "NATS: stream DEVICE_EVENTS not ready for firmware (attempt %d/%d): %s — retrying in 5s", + attempt, + max_attempts, + exc, + ) + await asyncio.sleep(5) + else: + logger.warning( + "NATS: giving up on device.firmware.> after %d attempts: %s — API will run without firmware updates", + max_attempts, + exc, + ) + return + + +async def start_firmware_subscriber() -> Optional[NATSClient]: + """Connect to NATS and start the device.firmware.> subscription. + + Uses a separate NATS connection from the status and metrics subscribers. + + Returns the NATS connection (must be passed to stop_firmware_subscriber on shutdown). + Raises on fatal connection errors after retry exhaustion. + """ + global _firmware_client + + logger.info("NATS firmware: connecting to %s", settings.NATS_URL) + + nc = await nats.connect( + settings.NATS_URL, + max_reconnect_attempts=-1, + reconnect_time_wait=2, + error_cb=_on_error, + reconnected_cb=_on_reconnected, + disconnected_cb=_on_disconnected, + ) + + logger.info("NATS firmware: connected to %s", settings.NATS_URL) + + js = nc.jetstream() + await _subscribe_with_retry(js) + + _firmware_client = nc + return nc + + +async def stop_firmware_subscriber(nc: Optional[NATSClient]) -> None: + """Drain and close the firmware NATS connection gracefully.""" + if nc is None: + return + try: + logger.info("NATS firmware: draining connection...") + await nc.drain() + logger.info("NATS firmware: connection closed") + except Exception as exc: + logger.warning("NATS firmware: error during drain: %s", exc) + try: + await nc.close() + except Exception: + pass + + +async def _on_error(exc: Exception) -> None: + logger.error("NATS firmware error: %s", exc) + + +async def _on_reconnected() -> None: + logger.info("NATS firmware: reconnected") + + +async def _on_disconnected() -> None: + logger.warning("NATS firmware: disconnected") diff --git a/backend/app/services/git_store.py b/backend/app/services/git_store.py new file mode 100644 index 0000000..cc52e48 --- /dev/null +++ b/backend/app/services/git_store.py @@ -0,0 +1,296 @@ +"""pygit2-based git store for versioned config backup storage. + +All functions in this module are synchronous (pygit2 is C bindings over libgit2). +Callers running in an async context MUST wrap calls in: + loop.run_in_executor(None, func, *args) +or: + asyncio.get_event_loop().run_in_executor(None, func, *args) + +See Pitfall 3 in 04-RESEARCH.md — blocking pygit2 in async context stalls +the event loop and causes timeouts for other concurrent requests. + +Git layout: + {GIT_STORE_PATH}/{tenant_id}.git/ <- bare repo per tenant + objects/ refs/ HEAD <- standard bare git structure + {device_id}/ <- device subtree + export.rsc <- text export (/export compact) + backup.bin <- binary system backup +""" + +import difflib +import threading +from pathlib import Path +from typing import Optional + +import pygit2 + +from app.config import settings + +# ========================================================================= +# Per-tenant mutex to prevent TreeBuilder race condition (Pitfall 5 in RESEARCH.md). +# Two simultaneous backups for different devices in the same tenant repo would +# each read HEAD, build their own device subtrees, and write conflicting root +# trees. The second commit would lose the first's device subtree. +# Lock scope is the entire tenant repo — not just the device. +# ========================================================================= +_tenant_locks: dict[str, threading.Lock] = {} +_tenant_locks_guard = threading.Lock() + + +def _get_tenant_lock(tenant_id: str) -> threading.Lock: + """Return (creating if needed) the per-tenant commit lock.""" + with _tenant_locks_guard: + if tenant_id not in _tenant_locks: + _tenant_locks[tenant_id] = threading.Lock() + return _tenant_locks[tenant_id] + + +# ========================================================================= +# PUBLIC API +# ========================================================================= + + +def get_or_create_repo(tenant_id: str) -> pygit2.Repository: + """Open the tenant's bare git repo, creating it on first use. + + The repo lives at {GIT_STORE_PATH}/{tenant_id}.git. The parent directory + is created if it does not exist. + + Args: + tenant_id: Tenant UUID as string. + + Returns: + An open pygit2.Repository instance (bare). + """ + git_store_root = Path(settings.GIT_STORE_PATH) + git_store_root.mkdir(parents=True, exist_ok=True) + + repo_path = git_store_root / f"{tenant_id}.git" + if repo_path.exists(): + return pygit2.Repository(str(repo_path)) + + return pygit2.init_repository(str(repo_path), bare=True) + + +def commit_backup( + tenant_id: str, + device_id: str, + export_text: str, + binary_backup: bytes, + message: str, +) -> str: + """Write a backup pair (export.rsc + backup.bin) as a git commit. + + Creates or updates the device subdirectory in the tenant's bare repo. + Preserves other devices' subdirectories by merging the device subtree + into the existing root tree. + + Per-tenant locking (threading.Lock) prevents the TreeBuilder race + condition when two devices in the same tenant back up concurrently. + + Args: + tenant_id: Tenant UUID as string. + device_id: Device UUID as string (becomes a subdirectory in the repo). + export_text: Text output of /export compact. + binary_backup: Raw bytes from /system backup save. + message: Commit message (format: "{trigger}: {hostname} ({ip}) at {ts}"). + + Returns: + The hex commit SHA string (40 characters). + """ + lock = _get_tenant_lock(tenant_id) + + with lock: + repo = get_or_create_repo(tenant_id) + + # Create blobs from content + export_oid = repo.create_blob(export_text.encode("utf-8")) + binary_oid = repo.create_blob(binary_backup) + + # Build device subtree: {device_id}/export.rsc and {device_id}/backup.bin + device_builder = repo.TreeBuilder() + device_builder.insert("export.rsc", export_oid, pygit2.GIT_FILEMODE_BLOB) + device_builder.insert("backup.bin", binary_oid, pygit2.GIT_FILEMODE_BLOB) + device_tree_oid = device_builder.write() + + # Merge device subtree into root tree, preserving all other device subtrees. + # If the repo has no commits yet, start with an empty root tree. + root_ref = repo.references.get("refs/heads/main") + parent_commit: Optional[pygit2.Commit] = None + + if root_ref is not None: + try: + parent_commit = repo.get(root_ref.target) + root_builder = repo.TreeBuilder(parent_commit.tree) + except Exception: + root_builder = repo.TreeBuilder() + else: + root_builder = repo.TreeBuilder() + + root_builder.insert(device_id, device_tree_oid, pygit2.GIT_FILEMODE_TREE) + root_tree_oid = root_builder.write() + + # Author signature — no real identity, portal service account + author = pygit2.Signature("The Other Dude", "backup@tod.local") + + parents = [root_ref.target] if root_ref is not None else [] + + commit_oid = repo.create_commit( + "refs/heads/main", + author, + author, + message, + root_tree_oid, + parents, + ) + + return str(commit_oid) + + +def read_file( + tenant_id: str, + commit_sha: str, + device_id: str, + filename: str, +) -> bytes: + """Read a file blob from a specific backup commit. + + Navigates the tree: root -> device_id subtree -> filename. + + Args: + tenant_id: Tenant UUID as string. + commit_sha: Full or abbreviated git commit SHA. + device_id: Device UUID as string (subdirectory name in the repo). + filename: File to read: "export.rsc" or "backup.bin". + + Returns: + Raw bytes of the file content. + + Raises: + KeyError: If device_id subtree or filename does not exist in commit. + pygit2.GitError: If commit_sha is not found. + """ + repo = get_or_create_repo(tenant_id) + + commit_obj = repo.get(commit_sha) + if commit_obj is None: + raise KeyError(f"Commit {commit_sha!r} not found in tenant {tenant_id!r} repo") + + # Navigate: root tree -> device subtree -> file blob + device_entry = commit_obj.tree[device_id] + device_tree = repo.get(device_entry.id) + file_entry = device_tree[filename] + file_blob = repo.get(file_entry.id) + + return file_blob.data + + +def list_device_commits( + tenant_id: str, + device_id: str, +) -> list[dict]: + """Walk commit history and return commits that include the device subtree. + + Walks commits newest-first. Returns only commits where the device_id + subtree is present in the root tree (the device had a backup in that commit). + + Args: + tenant_id: Tenant UUID as string. + device_id: Device UUID as string. + + Returns: + List of dicts (newest first): + [{"sha": str, "message": str, "timestamp": int}, ...] + Empty list if no commits or device has never been backed up. + """ + repo = get_or_create_repo(tenant_id) + + # If there are no commits, return empty list immediately. + # Use refs/heads/main explicitly rather than repo.head (which defaults to + # refs/heads/master — wrong when the repo uses 'main' as the default branch). + main_ref = repo.references.get("refs/heads/main") + if main_ref is None: + return [] + head_target = main_ref.target + + results = [] + walker = repo.walk(head_target, pygit2.GIT_SORT_TIME) + + for commit in walker: + # Check if device_id subtree exists in this commit's root tree. + try: + device_entry = commit.tree[device_id] + except KeyError: + # Device not present in this commit at all — skip. + continue + + # Only include this commit if it actually changed the device's subtree + # vs its parent. This prevents every subsequent backup (for any device + # in the same tenant) from appearing in all devices' histories. + if commit.parents: + parent = commit.parents[0] + try: + parent_device_entry = parent.tree[device_id] + if parent_device_entry.id == device_entry.id: + # Device subtree unchanged in this commit — skip. + continue + except KeyError: + # Device wasn't in parent but is in this commit — it's the first entry. + pass + + results.append({ + "sha": str(commit.id), + "message": commit.message.strip(), + "timestamp": commit.commit_time, + }) + + return results + + +def compute_line_delta(old_text: str, new_text: str) -> tuple[int, int]: + """Compute (lines_added, lines_removed) between two text versions. + + Uses difflib.SequenceMatcher to efficiently compute the line-count delta + without generating a full unified diff. This is faster than + difflib.unified_diff for large config files. + + For the first backup (no prior version), pass old_text="" to get + (total_lines, 0) as the delta. + + Args: + old_text: Previous export.rsc content (empty string for first backup). + new_text: New export.rsc content. + + Returns: + Tuple of (lines_added, lines_removed). + """ + old_lines = old_text.splitlines() if old_text else [] + new_lines = new_text.splitlines() if new_text else [] + + if not old_lines and not new_lines: + return (0, 0) + + # For first backup (empty old), all lines are "added". + if not old_lines: + return (len(new_lines), 0) + + # For deletion of all content, all lines are "removed". + if not new_lines: + return (0, len(old_lines)) + + matcher = difflib.SequenceMatcher(None, old_lines, new_lines, autojunk=False) + + lines_added = 0 + lines_removed = 0 + + for tag, i1, i2, j1, j2 in matcher.get_opcodes(): + if tag == "replace": + lines_removed += i2 - i1 + lines_added += j2 - j1 + elif tag == "delete": + lines_removed += i2 - i1 + elif tag == "insert": + lines_added += j2 - j1 + # "equal" — no change + + return (lines_added, lines_removed) diff --git a/backend/app/services/key_service.py b/backend/app/services/key_service.py new file mode 100644 index 0000000..8a7b278 --- /dev/null +++ b/backend/app/services/key_service.py @@ -0,0 +1,324 @@ +"""Key hierarchy management service for zero-knowledge architecture. + +Provides CRUD operations for encrypted key bundles (UserKeySet), +append-only audit logging (KeyAccessLog), and OpenBao Transit +tenant key provisioning with credential migration. +""" + +import logging +from uuid import UUID + +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.models.key_set import KeyAccessLog, UserKeySet + +logger = logging.getLogger(__name__) + + +async def store_user_key_set( + db: AsyncSession, + user_id: UUID, + tenant_id: UUID | None, + encrypted_private_key: bytes, + private_key_nonce: bytes, + encrypted_vault_key: bytes, + vault_key_nonce: bytes, + public_key: bytes, + pbkdf2_salt: bytes, + hkdf_salt: bytes, + pbkdf2_iterations: int = 650000, +) -> UserKeySet: + """Store encrypted key bundle during registration. + + Creates a new UserKeySet for the user. Each user has exactly one + key set (UNIQUE constraint on user_id). + + Args: + db: Async database session. + user_id: The user's UUID. + tenant_id: The user's tenant UUID (None for super_admin). + encrypted_private_key: RSA private key wrapped by AUK (AES-GCM). + private_key_nonce: 12-byte AES-GCM nonce for private key. + encrypted_vault_key: Tenant vault key wrapped by user's public key. + vault_key_nonce: 12-byte AES-GCM nonce for vault key. + public_key: RSA-2048 public key in SPKI format. + pbkdf2_salt: 32-byte salt for PBKDF2 key derivation. + hkdf_salt: 32-byte salt for HKDF Secret Key derivation. + pbkdf2_iterations: PBKDF2 iteration count (default 650000). + + Returns: + The created UserKeySet instance. + """ + # Remove any existing key set (e.g. from a failed prior upgrade attempt) + from sqlalchemy import delete + await db.execute(delete(UserKeySet).where(UserKeySet.user_id == user_id)) + + key_set = UserKeySet( + user_id=user_id, + tenant_id=tenant_id, + encrypted_private_key=encrypted_private_key, + private_key_nonce=private_key_nonce, + encrypted_vault_key=encrypted_vault_key, + vault_key_nonce=vault_key_nonce, + public_key=public_key, + pbkdf2_salt=pbkdf2_salt, + hkdf_salt=hkdf_salt, + pbkdf2_iterations=pbkdf2_iterations, + ) + db.add(key_set) + await db.flush() + return key_set + + +async def get_user_key_set( + db: AsyncSession, user_id: UUID +) -> UserKeySet | None: + """Retrieve encrypted key bundle for login response. + + Args: + db: Async database session. + user_id: The user's UUID. + + Returns: + The UserKeySet if found, None otherwise. + """ + result = await db.execute( + select(UserKeySet).where(UserKeySet.user_id == user_id) + ) + return result.scalar_one_or_none() + + +async def log_key_access( + db: AsyncSession, + tenant_id: UUID, + user_id: UUID | None, + action: str, + resource_type: str | None = None, + resource_id: str | None = None, + key_version: int | None = None, + ip_address: str | None = None, + device_id: UUID | None = None, + justification: str | None = None, + correlation_id: str | None = None, +) -> None: + """Append to immutable key_access_log. + + This table is append-only (INSERT+SELECT only via RLS policy). + No UPDATE or DELETE is permitted. + + Args: + db: Async database session. + tenant_id: The tenant UUID for RLS isolation. + user_id: The user who performed the action (None for system ops). + action: Action description (e.g., 'create_key_set', 'decrypt_vault_key'). + resource_type: Optional resource type being accessed. + resource_id: Optional resource identifier. + key_version: Optional key version involved. + ip_address: Optional client IP address. + device_id: Optional device UUID for credential access tracking. + justification: Optional justification for the access (e.g., 'api_backup'). + correlation_id: Optional correlation ID for request tracing. + """ + log_entry = KeyAccessLog( + tenant_id=tenant_id, + user_id=user_id, + action=action, + resource_type=resource_type, + resource_id=resource_id, + key_version=key_version, + ip_address=ip_address, + device_id=device_id, + justification=justification, + correlation_id=correlation_id, + ) + db.add(log_entry) + await db.flush() + + +# --------------------------------------------------------------------------- +# OpenBao Transit tenant key provisioning and credential migration +# --------------------------------------------------------------------------- + + +async def provision_tenant_key(db: AsyncSession, tenant_id: UUID) -> str: + """Provision an OpenBao Transit key for a tenant and update the tenant record. + + Idempotent: if the key already exists in OpenBao, it's a no-op on the + OpenBao side. The tenant record is always updated with the key name. + + Args: + db: Async database session (admin engine, no RLS). + tenant_id: Tenant UUID. + + Returns: + The key name (tenant_{uuid}). + """ + from app.models.tenant import Tenant + from app.services.openbao_service import get_openbao_service + + openbao = get_openbao_service() + key_name = f"tenant_{tenant_id}" + + await openbao.create_tenant_key(str(tenant_id)) + + # Update tenant record with key name + result = await db.execute( + select(Tenant).where(Tenant.id == tenant_id) + ) + tenant = result.scalar_one_or_none() + if tenant: + tenant.openbao_key_name = key_name + await db.flush() + + logger.info( + "Provisioned OpenBao Transit key for tenant %s (key=%s)", + tenant_id, + key_name, + ) + return key_name + + +async def migrate_tenant_credentials(db: AsyncSession, tenant_id: UUID) -> dict: + """Re-encrypt all legacy credentials for a tenant from AES-256-GCM to Transit. + + Migrates device credentials, CA private keys, device cert private keys, + and notification channel secrets. Already-migrated items are skipped. + + Args: + db: Async database session (admin engine, no RLS). + tenant_id: Tenant UUID. + + Returns: + Dict with counts: {"devices": N, "cas": N, "certs": N, "channels": N, "errors": N} + """ + from app.config import settings + from app.models.alert import NotificationChannel + from app.models.certificate import CertificateAuthority, DeviceCertificate + from app.models.device import Device + from app.services.crypto import decrypt_credentials + from app.services.openbao_service import get_openbao_service + + openbao = get_openbao_service() + legacy_key = settings.get_encryption_key_bytes() + tid = str(tenant_id) + + counts = {"devices": 0, "cas": 0, "certs": 0, "channels": 0, "errors": 0} + + # --- Migrate device credentials --- + result = await db.execute( + select(Device).where( + Device.tenant_id == tenant_id, + Device.encrypted_credentials.isnot(None), + (Device.encrypted_credentials_transit.is_(None) | (Device.encrypted_credentials_transit == "")), + ) + ) + for device in result.scalars().all(): + try: + plaintext = decrypt_credentials(device.encrypted_credentials, legacy_key) + device.encrypted_credentials_transit = await openbao.encrypt(tid, plaintext.encode("utf-8")) + counts["devices"] += 1 + except Exception as e: + logger.error("Failed to migrate device %s credentials: %s", device.id, e) + counts["errors"] += 1 + + # --- Migrate CA private keys --- + result = await db.execute( + select(CertificateAuthority).where( + CertificateAuthority.tenant_id == tenant_id, + CertificateAuthority.encrypted_private_key.isnot(None), + (CertificateAuthority.encrypted_private_key_transit.is_(None) | (CertificateAuthority.encrypted_private_key_transit == "")), + ) + ) + for ca in result.scalars().all(): + try: + plaintext = decrypt_credentials(ca.encrypted_private_key, legacy_key) + ca.encrypted_private_key_transit = await openbao.encrypt(tid, plaintext.encode("utf-8")) + counts["cas"] += 1 + except Exception as e: + logger.error("Failed to migrate CA %s private key: %s", ca.id, e) + counts["errors"] += 1 + + # --- Migrate device cert private keys --- + result = await db.execute( + select(DeviceCertificate).where( + DeviceCertificate.tenant_id == tenant_id, + DeviceCertificate.encrypted_private_key.isnot(None), + (DeviceCertificate.encrypted_private_key_transit.is_(None) | (DeviceCertificate.encrypted_private_key_transit == "")), + ) + ) + for cert in result.scalars().all(): + try: + plaintext = decrypt_credentials(cert.encrypted_private_key, legacy_key) + cert.encrypted_private_key_transit = await openbao.encrypt(tid, plaintext.encode("utf-8")) + counts["certs"] += 1 + except Exception as e: + logger.error("Failed to migrate cert %s private key: %s", cert.id, e) + counts["errors"] += 1 + + # --- Migrate notification channel secrets --- + result = await db.execute( + select(NotificationChannel).where( + NotificationChannel.tenant_id == tenant_id, + ) + ) + for ch in result.scalars().all(): + migrated_any = False + try: + # SMTP password + if ch.smtp_password and not ch.smtp_password_transit: + plaintext = decrypt_credentials(ch.smtp_password, legacy_key) + ch.smtp_password_transit = await openbao.encrypt(tid, plaintext.encode("utf-8")) + migrated_any = True + if migrated_any: + counts["channels"] += 1 + except Exception as e: + logger.error("Failed to migrate channel %s secrets: %s", ch.id, e) + counts["errors"] += 1 + + await db.flush() + + logger.info( + "Tenant %s credential migration complete: %s", + tenant_id, + counts, + ) + return counts + + +async def provision_existing_tenants(db: AsyncSession) -> dict: + """Provision OpenBao Transit keys for all existing tenants and migrate credentials. + + Called on app startup to ensure all tenants have Transit keys. + Idempotent -- running multiple times is safe (already-migrated items are skipped). + + Args: + db: Async database session (admin engine, no RLS). + + Returns: + Summary dict with total counts across all tenants. + """ + from app.models.tenant import Tenant + + result = await db.execute(select(Tenant)) + tenants = result.scalars().all() + + total = {"tenants": len(tenants), "devices": 0, "cas": 0, "certs": 0, "channels": 0, "errors": 0} + + for tenant in tenants: + try: + await provision_tenant_key(db, tenant.id) + counts = await migrate_tenant_credentials(db, tenant.id) + total["devices"] += counts["devices"] + total["cas"] += counts["cas"] + total["certs"] += counts["certs"] + total["channels"] += counts["channels"] + total["errors"] += counts["errors"] + except Exception as e: + logger.error("Failed to provision/migrate tenant %s: %s", tenant.id, e) + total["errors"] += 1 + + await db.commit() + + logger.info("Existing tenant provisioning complete: %s", total) + return total diff --git a/backend/app/services/metrics_subscriber.py b/backend/app/services/metrics_subscriber.py new file mode 100644 index 0000000..f637c31 --- /dev/null +++ b/backend/app/services/metrics_subscriber.py @@ -0,0 +1,346 @@ +"""NATS JetStream subscriber for device metrics events. + +Subscribes to device.metrics.> and inserts into TimescaleDB hypertables: + - interface_metrics — per-interface rx/tx byte counters + - health_metrics — CPU, memory, disk, temperature per device + - wireless_metrics — per-wireless-interface aggregated client stats + +Also maintains denormalized last_cpu_load and last_memory_used_pct columns +on the devices table for efficient fleet table display. + +Uses AdminAsyncSessionLocal (superuser bypass RLS) so metrics from any tenant +can be written without setting app.current_tenant. +""" + +import asyncio +import json +import logging +from datetime import datetime, timezone +from typing import Optional + +import nats +from nats.js import JetStreamContext +from nats.aio.client import Client as NATSClient +from sqlalchemy import text + +from app.config import settings +from app.database import AdminAsyncSessionLocal + +logger = logging.getLogger(__name__) + +_metrics_client: Optional[NATSClient] = None + + +# ============================================================================= +# INSERT HANDLERS +# ============================================================================= + + +def _parse_timestamp(val: str | None) -> datetime: + """Parse an ISO 8601 / RFC 3339 timestamp string into a datetime object.""" + if not val: + return datetime.now(timezone.utc) + try: + return datetime.fromisoformat(val.replace("Z", "+00:00")) + except (ValueError, AttributeError): + return datetime.now(timezone.utc) + + +async def _insert_health_metrics(session, data: dict) -> None: + """Insert a health metrics event into health_metrics and update devices.""" + health = data.get("health") + if not health: + logger.warning("health metrics event missing 'health' field — skipping") + return + + device_id = data.get("device_id") + tenant_id = data.get("tenant_id") + collected_at = _parse_timestamp(data.get("collected_at")) + + # Parse numeric values; treat empty strings as NULL. + def parse_int(val: str | None) -> int | None: + if not val: + return None + try: + return int(val) + except (ValueError, TypeError): + return None + + cpu_load = parse_int(health.get("cpu_load")) + free_memory = parse_int(health.get("free_memory")) + total_memory = parse_int(health.get("total_memory")) + free_disk = parse_int(health.get("free_disk")) + total_disk = parse_int(health.get("total_disk")) + temperature = parse_int(health.get("temperature")) + + await session.execute( + text(""" + INSERT INTO health_metrics + (time, device_id, tenant_id, cpu_load, free_memory, total_memory, + free_disk, total_disk, temperature) + VALUES + (:time, :device_id, :tenant_id, :cpu_load, :free_memory, :total_memory, + :free_disk, :total_disk, :temperature) + """), + { + "time": collected_at, + "device_id": device_id, + "tenant_id": tenant_id, + "cpu_load": cpu_load, + "free_memory": free_memory, + "total_memory": total_memory, + "free_disk": free_disk, + "total_disk": total_disk, + "temperature": temperature, + }, + ) + + # Update denormalized columns on devices for fleet table display. + # Compute memory percentage in Python to avoid asyncpg type ambiguity. + mem_pct = None + if total_memory and total_memory > 0 and free_memory is not None: + mem_pct = round((1.0 - free_memory / total_memory) * 100) + + await session.execute( + text(""" + UPDATE devices SET + last_cpu_load = COALESCE(:cpu_load, last_cpu_load), + last_memory_used_pct = COALESCE(:mem_pct, last_memory_used_pct), + updated_at = NOW() + WHERE id = CAST(:device_id AS uuid) + """), + { + "cpu_load": cpu_load, + "mem_pct": mem_pct, + "device_id": device_id, + }, + ) + + +async def _insert_interface_metrics(session, data: dict) -> None: + """Insert per-interface traffic counters into interface_metrics.""" + interfaces = data.get("interfaces") + if not interfaces: + return # Device may have no interfaces (unlikely but safe to skip) + + device_id = data.get("device_id") + tenant_id = data.get("tenant_id") + collected_at = _parse_timestamp(data.get("collected_at")) + + for iface in interfaces: + await session.execute( + text(""" + INSERT INTO interface_metrics + (time, device_id, tenant_id, interface, rx_bytes, tx_bytes, rx_bps, tx_bps) + VALUES + (:time, :device_id, :tenant_id, :interface, :rx_bytes, :tx_bytes, NULL, NULL) + """), + { + "time": collected_at, + "device_id": device_id, + "tenant_id": tenant_id, + "interface": iface.get("name"), + "rx_bytes": iface.get("rx_bytes"), + "tx_bytes": iface.get("tx_bytes"), + }, + ) + + +async def _insert_wireless_metrics(session, data: dict) -> None: + """Insert per-wireless-interface aggregated client stats into wireless_metrics.""" + wireless = data.get("wireless") + if not wireless: + return # Device may have no wireless interfaces + + device_id = data.get("device_id") + tenant_id = data.get("tenant_id") + collected_at = _parse_timestamp(data.get("collected_at")) + + for wif in wireless: + await session.execute( + text(""" + INSERT INTO wireless_metrics + (time, device_id, tenant_id, interface, client_count, avg_signal, ccq, frequency) + VALUES + (:time, :device_id, :tenant_id, :interface, + :client_count, :avg_signal, :ccq, :frequency) + """), + { + "time": collected_at, + "device_id": device_id, + "tenant_id": tenant_id, + "interface": wif.get("interface"), + "client_count": wif.get("client_count"), + "avg_signal": wif.get("avg_signal"), + "ccq": wif.get("ccq"), + "frequency": wif.get("frequency"), + }, + ) + + +# ============================================================================= +# MAIN MESSAGE HANDLER +# ============================================================================= + + +async def on_device_metrics(msg) -> None: + """Handle a device.metrics event published by the Go poller. + + Dispatches to the appropriate insert handler based on the 'type' field: + - "health" → _insert_health_metrics + update devices + - "interfaces" → _insert_interface_metrics + - "wireless" → _insert_wireless_metrics + + On success, acknowledges the message. On error, NAKs so NATS can redeliver. + """ + try: + data = json.loads(msg.data) + metric_type = data.get("type") + device_id = data.get("device_id") + + if not metric_type or not device_id: + logger.warning( + "device.metrics event missing 'type' or 'device_id' — skipping" + ) + await msg.ack() + return + + async with AdminAsyncSessionLocal() as session: + if metric_type == "health": + await _insert_health_metrics(session, data) + elif metric_type == "interfaces": + await _insert_interface_metrics(session, data) + elif metric_type == "wireless": + await _insert_wireless_metrics(session, data) + else: + logger.warning("Unknown metric type '%s' — skipping", metric_type) + await msg.ack() + return + + await session.commit() + + # Alert evaluation — non-fatal; metric write is the primary operation + try: + from app.services import alert_evaluator + await alert_evaluator.evaluate( + device_id=device_id, + tenant_id=data.get("tenant_id", ""), + metric_type=metric_type, + data=data, + ) + except Exception as eval_err: + logger.warning("Alert evaluation failed for device %s: %s", device_id, eval_err) + + logger.debug( + "device.metrics processed", + extra={"device_id": device_id, "type": metric_type}, + ) + await msg.ack() + + except Exception as exc: + logger.error( + "Failed to process device.metrics event: %s", + exc, + exc_info=True, + ) + try: + await msg.nak() + except Exception: + pass # If NAK also fails, NATS will redeliver after ack_wait + + +# ============================================================================= +# SUBSCRIPTION SETUP +# ============================================================================= + + +async def _subscribe_with_retry(js: JetStreamContext) -> None: + """Subscribe to device.metrics.> with durable consumer, retrying if stream not ready.""" + max_attempts = 6 # ~30 seconds at 5s intervals + for attempt in range(1, max_attempts + 1): + try: + await js.subscribe( + "device.metrics.>", + cb=on_device_metrics, + durable="api-metrics-consumer", + stream="DEVICE_EVENTS", + ) + logger.info( + "NATS: subscribed to device.metrics.> (durable: api-metrics-consumer)" + ) + return + except Exception as exc: + if attempt < max_attempts: + logger.warning( + "NATS: stream DEVICE_EVENTS not ready for metrics (attempt %d/%d): %s — retrying in 5s", + attempt, + max_attempts, + exc, + ) + await asyncio.sleep(5) + else: + logger.warning( + "NATS: giving up on device.metrics.> after %d attempts: %s — API will run without metrics ingestion", + max_attempts, + exc, + ) + return + + +async def start_metrics_subscriber() -> Optional[NATSClient]: + """Connect to NATS and start the device.metrics.> subscription. + + Uses a separate NATS connection from the status subscriber — simpler and + NATS handles multiple connections per client efficiently. + + Returns the NATS connection (must be passed to stop_metrics_subscriber on shutdown). + Raises on fatal connection errors after retry exhaustion. + """ + global _metrics_client + + logger.info("NATS metrics: connecting to %s", settings.NATS_URL) + + nc = await nats.connect( + settings.NATS_URL, + max_reconnect_attempts=-1, + reconnect_time_wait=2, + error_cb=_on_error, + reconnected_cb=_on_reconnected, + disconnected_cb=_on_disconnected, + ) + + logger.info("NATS metrics: connected to %s", settings.NATS_URL) + + js = nc.jetstream() + await _subscribe_with_retry(js) + + _metrics_client = nc + return nc + + +async def stop_metrics_subscriber(nc: Optional[NATSClient]) -> None: + """Drain and close the metrics NATS connection gracefully.""" + if nc is None: + return + try: + logger.info("NATS metrics: draining connection...") + await nc.drain() + logger.info("NATS metrics: connection closed") + except Exception as exc: + logger.warning("NATS metrics: error during drain: %s", exc) + try: + await nc.close() + except Exception: + pass + + +async def _on_error(exc: Exception) -> None: + logger.error("NATS metrics error: %s", exc) + + +async def _on_reconnected() -> None: + logger.info("NATS metrics: reconnected") + + +async def _on_disconnected() -> None: + logger.warning("NATS metrics: disconnected") diff --git a/backend/app/services/nats_subscriber.py b/backend/app/services/nats_subscriber.py new file mode 100644 index 0000000..123127e --- /dev/null +++ b/backend/app/services/nats_subscriber.py @@ -0,0 +1,231 @@ +"""NATS JetStream subscriber for device status events from the Go poller. + +Subscribes to device.status.> and updates device records in PostgreSQL. +This is a system-level process that needs to update devices across all tenants, +so it uses the admin engine (bypasses RLS). +""" + +import asyncio +import json +import logging +import re +from datetime import datetime, timezone +from typing import Optional + +import nats +from nats.js import JetStreamContext +from nats.aio.client import Client as NATSClient +from sqlalchemy import text + +from app.config import settings +from app.database import AdminAsyncSessionLocal + +logger = logging.getLogger(__name__) + +_nats_client: Optional[NATSClient] = None + +# Regex for RouterOS uptime strings like "42d14h23m15s", "14h23m15s", "23m15s", "3w2d" +_UPTIME_RE = re.compile(r"(?:(\d+)w)?(?:(\d+)d)?(?:(\d+)h)?(?:(\d+)m)?(?:(\d+)s)?") + + +def _parse_uptime(raw: str) -> int | None: + """Parse a RouterOS uptime string into total seconds.""" + if not raw: + return None + m = _UPTIME_RE.fullmatch(raw) + if not m: + return None + weeks = int(m.group(1) or 0) + days = int(m.group(2) or 0) + hours = int(m.group(3) or 0) + minutes = int(m.group(4) or 0) + seconds = int(m.group(5) or 0) + total = weeks * 604800 + days * 86400 + hours * 3600 + minutes * 60 + seconds + return total if total > 0 else None + + +async def on_device_status(msg) -> None: + """Handle a device.status event published by the Go poller. + + Payload (JSON): + device_id (str) — UUID of the device + tenant_id (str) — UUID of the owning tenant + status (str) — "online" or "offline" + routeros_version (str | None) — e.g. "7.16.2" + major_version (int | None) — e.g. 7 + board_name (str | None) — e.g. "RB4011iGS+5HacQ2HnD" + last_seen (str | None) — ISO-8601 timestamp + """ + try: + data = json.loads(msg.data) + device_id = data.get("device_id") + status = data.get("status") + routeros_version = data.get("routeros_version") + major_version = data.get("major_version") + board_name = data.get("board_name") + last_seen_raw = data.get("last_seen") + serial_number = data.get("serial_number") or None + firmware_version = data.get("firmware_version") or None + uptime_seconds = _parse_uptime(data.get("uptime", "")) + + if not device_id or not status: + logger.warning("Received device.status event with missing device_id or status — skipping") + await msg.ack() + return + + # Parse timestamp in Python — asyncpg needs datetime objects, not strings + last_seen_dt = None + if last_seen_raw: + try: + last_seen_dt = datetime.fromisoformat(last_seen_raw.replace("Z", "+00:00")) + except (ValueError, AttributeError): + last_seen_dt = datetime.now(timezone.utc) + + async with AdminAsyncSessionLocal() as session: + await session.execute( + text( + """ + UPDATE devices SET + status = :status, + routeros_version = COALESCE(:routeros_version, routeros_version), + routeros_major_version = COALESCE(:major_version, routeros_major_version), + model = COALESCE(:board_name, model), + serial_number = COALESCE(:serial_number, serial_number), + firmware_version = COALESCE(:firmware_version, firmware_version), + uptime_seconds = COALESCE(:uptime_seconds, uptime_seconds), + last_seen = COALESCE(:last_seen, last_seen), + updated_at = NOW() + WHERE id = CAST(:device_id AS uuid) + """ + ), + { + "status": status, + "routeros_version": routeros_version, + "major_version": major_version, + "board_name": board_name, + "serial_number": serial_number, + "firmware_version": firmware_version, + "uptime_seconds": uptime_seconds, + "last_seen": last_seen_dt, + "device_id": device_id, + }, + ) + await session.commit() + + # Alert evaluation for offline/online status changes — non-fatal + try: + from app.services import alert_evaluator + if status == "offline": + await alert_evaluator.evaluate_offline(device_id, data.get("tenant_id", "")) + elif status == "online": + await alert_evaluator.evaluate_online(device_id, data.get("tenant_id", "")) + except Exception as e: + logger.warning("Alert evaluation failed for device %s status=%s: %s", device_id, status, e) + + logger.info( + "Device status updated", + extra={ + "device_id": device_id, + "status": status, + "routeros_version": routeros_version, + }, + ) + await msg.ack() + + except Exception as exc: + logger.error( + "Failed to process device.status event: %s", + exc, + exc_info=True, + ) + try: + await msg.nak() + except Exception: + pass # If NAK also fails, NATS will redeliver after ack_wait + + +async def _subscribe_with_retry(js: JetStreamContext) -> None: + """Subscribe to device.status.> with durable consumer, retrying if stream not ready.""" + max_attempts = 6 # ~30 seconds at 5s intervals + for attempt in range(1, max_attempts + 1): + try: + await js.subscribe( + "device.status.>", + cb=on_device_status, + durable="api-status-consumer", + stream="DEVICE_EVENTS", + ) + logger.info("NATS: subscribed to device.status.> (durable: api-status-consumer)") + return + except Exception as exc: + if attempt < max_attempts: + logger.warning( + "NATS: stream DEVICE_EVENTS not ready (attempt %d/%d): %s — retrying in 5s", + attempt, + max_attempts, + exc, + ) + await asyncio.sleep(5) + else: + logger.warning( + "NATS: giving up on device.status.> after %d attempts: %s — API will run without real-time status updates", + max_attempts, + exc, + ) + return + + +async def start_nats_subscriber() -> Optional[NATSClient]: + """Connect to NATS and start the device.status.> subscription. + + Returns the NATS connection (must be passed to stop_nats_subscriber on shutdown). + Raises on fatal connection errors after retry exhaustion. + """ + global _nats_client + + logger.info("NATS: connecting to %s", settings.NATS_URL) + + nc = await nats.connect( + settings.NATS_URL, + max_reconnect_attempts=-1, # reconnect forever (pod-to-pod transient failures) + reconnect_time_wait=2, + error_cb=_on_error, + reconnected_cb=_on_reconnected, + disconnected_cb=_on_disconnected, + ) + + logger.info("NATS: connected to %s", settings.NATS_URL) + + js = nc.jetstream() + await _subscribe_with_retry(js) + + _nats_client = nc + return nc + + +async def stop_nats_subscriber(nc: Optional[NATSClient]) -> None: + """Drain and close the NATS connection gracefully.""" + if nc is None: + return + try: + logger.info("NATS: draining connection...") + await nc.drain() + logger.info("NATS: connection closed") + except Exception as exc: + logger.warning("NATS: error during drain: %s", exc) + try: + await nc.close() + except Exception: + pass + + +async def _on_error(exc: Exception) -> None: + logger.error("NATS error: %s", exc) + + +async def _on_reconnected() -> None: + logger.info("NATS: reconnected") + + +async def _on_disconnected() -> None: + logger.warning("NATS: disconnected") diff --git a/backend/app/services/notification_service.py b/backend/app/services/notification_service.py new file mode 100644 index 0000000..0f1f31b --- /dev/null +++ b/backend/app/services/notification_service.py @@ -0,0 +1,256 @@ +"""Email and webhook notification delivery for alert events. + +Best-effort delivery: failures are logged but never raised. +Each dispatch is wrapped in try/except so one failing channel +doesn't prevent delivery to other channels. +""" + +import logging +from typing import Any + +import httpx + + +logger = logging.getLogger(__name__) + + +async def dispatch_notifications( + alert_event: dict[str, Any], + channels: list[dict[str, Any]], + device_hostname: str, +) -> None: + """Send notifications for an alert event to all provided channels. + + Args: + alert_event: Dict with alert event fields (status, severity, metric, etc.) + channels: List of notification channel dicts + device_hostname: Human-readable device name for messages + """ + for channel in channels: + try: + if channel["channel_type"] == "email": + await _send_email(channel, alert_event, device_hostname) + elif channel["channel_type"] == "webhook": + await _send_webhook(channel, alert_event, device_hostname) + elif channel["channel_type"] == "slack": + await _send_slack(channel, alert_event, device_hostname) + else: + logger.warning("Unknown channel type: %s", channel["channel_type"]) + except Exception as e: + logger.warning( + "Notification delivery failed for channel %s (%s): %s", + channel.get("name"), channel.get("channel_type"), e, + ) + + +async def _send_email(channel: dict, alert_event: dict, device_hostname: str) -> None: + """Send alert notification email using per-channel SMTP config.""" + from app.services.email_service import SMTPConfig, send_email + + severity = alert_event.get("severity", "warning") + status = alert_event.get("status", "firing") + rule_name = alert_event.get("rule_name") or alert_event.get("message", "Unknown Rule") + metric = alert_event.get("metric_name") or alert_event.get("metric", "") + value = alert_event.get("current_value") or alert_event.get("value", "") + threshold = alert_event.get("threshold", "") + + severity_colors = { + "critical": "#ef4444", + "warning": "#f59e0b", + "info": "#38bdf8", + } + color = severity_colors.get(severity, "#38bdf8") + status_label = "RESOLVED" if status == "resolved" else "FIRING" + + html = f""" +
+
+

[{status_label}] {rule_name}

+
+
+ + + + + + +
Device{device_hostname}
Severity{severity.upper()}
Metric{metric}
Value{value}
Threshold{threshold}
+

+ TOD — Fleet Management for MikroTik RouterOS +

+
+
+ """ + + plain = ( + f"[{status_label}] {rule_name}\n\n" + f"Device: {device_hostname}\n" + f"Severity: {severity}\n" + f"Metric: {metric}\n" + f"Value: {value}\n" + f"Threshold: {threshold}\n" + ) + + # Decrypt SMTP password (Transit first, then legacy Fernet) + smtp_password = None + transit_cipher = channel.get("smtp_password_transit") + legacy_cipher = channel.get("smtp_password") + tenant_id = channel.get("tenant_id") + + if transit_cipher and tenant_id: + try: + from app.services.kms_service import decrypt_transit + smtp_password = await decrypt_transit(transit_cipher, tenant_id) + except Exception: + logger.warning("Transit decryption failed for channel %s, trying legacy", channel.get("id")) + + if not smtp_password and legacy_cipher: + try: + from app.config import settings as app_settings + from cryptography.fernet import Fernet + raw = bytes(legacy_cipher) if isinstance(legacy_cipher, memoryview) else legacy_cipher + f = Fernet(app_settings.CREDENTIAL_ENCRYPTION_KEY.encode()) + smtp_password = f.decrypt(raw).decode() + except Exception: + logger.warning("Legacy decryption failed for channel %s", channel.get("id")) + + config = SMTPConfig( + host=channel.get("smtp_host", "localhost"), + port=channel.get("smtp_port", 587), + user=channel.get("smtp_user"), + password=smtp_password, + use_tls=channel.get("smtp_use_tls", False), + from_address=channel.get("from_address") or "alerts@mikrotik-portal.local", + ) + + to = channel.get("to_address") + subject = f"[TOD {status_label}] {rule_name} — {device_hostname}" + await send_email(to, subject, html, plain, config) + + +async def _send_webhook( + channel: dict[str, Any], + alert_event: dict[str, Any], + device_hostname: str, +) -> None: + """Send alert notification to a webhook URL (Slack-compatible JSON).""" + severity = alert_event.get("severity", "info") + status = alert_event.get("status", "firing") + metric = alert_event.get("metric") + value = alert_event.get("value") + threshold = alert_event.get("threshold") + message_text = alert_event.get("message", "") + + payload = { + "alert_name": message_text, + "severity": severity, + "status": status, + "device": device_hostname, + "device_id": alert_event.get("device_id"), + "metric": metric, + "value": value, + "threshold": threshold, + "timestamp": str(alert_event.get("fired_at", "")), + "text": f"[{severity.upper()}] {device_hostname}: {message_text}", + } + + webhook_url = channel.get("webhook_url", "") + if not webhook_url: + logger.warning("Webhook channel %s has no URL configured", channel.get("name")) + return + + async with httpx.AsyncClient(timeout=10.0) as client: + response = await client.post(webhook_url, json=payload) + logger.info( + "Webhook notification sent to %s — status %d", + webhook_url, response.status_code, + ) + + +async def _send_slack( + channel: dict[str, Any], + alert_event: dict[str, Any], + device_hostname: str, +) -> None: + """Send alert notification to Slack via incoming webhook with Block Kit formatting.""" + severity = alert_event.get("severity", "info").upper() + status = alert_event.get("status", "firing") + metric = alert_event.get("metric", "unknown") + message_text = alert_event.get("message", "") + value = alert_event.get("value") + threshold = alert_event.get("threshold") + + color = {"CRITICAL": "#dc2626", "WARNING": "#f59e0b", "INFO": "#3b82f6"}.get(severity, "#6b7280") + status_label = "RESOLVED" if status == "resolved" else status + + blocks = [ + { + "type": "header", + "text": {"type": "plain_text", "text": f"{'✅' if status == 'resolved' else '🚨'} [{severity}] {status_label.upper()}"}, + }, + { + "type": "section", + "fields": [ + {"type": "mrkdwn", "text": f"*Device:*\n{device_hostname}"}, + {"type": "mrkdwn", "text": f"*Metric:*\n{metric}"}, + ], + }, + ] + if value is not None or threshold is not None: + fields = [] + if value is not None: + fields.append({"type": "mrkdwn", "text": f"*Value:*\n{value}"}) + if threshold is not None: + fields.append({"type": "mrkdwn", "text": f"*Threshold:*\n{threshold}"}) + blocks.append({"type": "section", "fields": fields}) + + if message_text: + blocks.append({"type": "section", "text": {"type": "mrkdwn", "text": f"*Message:*\n{message_text}"}}) + + blocks.append({"type": "context", "elements": [{"type": "mrkdwn", "text": "TOD Alert System"}]}) + + slack_url = channel.get("slack_webhook_url", "") + if not slack_url: + logger.warning("Slack channel %s has no webhook URL configured", channel.get("name")) + return + + payload = {"attachments": [{"color": color, "blocks": blocks}]} + + async with httpx.AsyncClient(timeout=10.0) as client: + response = await client.post(slack_url, json=payload) + logger.info("Slack notification sent — status %d", response.status_code) + + +async def send_test_notification(channel: dict[str, Any]) -> bool: + """Send a test notification through a channel to verify configuration. + + Args: + channel: Notification channel dict with all config fields + + Returns: + True on success + + Raises: + Exception on delivery failure (caller handles) + """ + test_event = { + "status": "test", + "severity": "info", + "metric": "test", + "value": None, + "threshold": None, + "message": "Test notification from TOD", + "device_id": "00000000-0000-0000-0000-000000000000", + "fired_at": "", + } + + if channel["channel_type"] == "email": + await _send_email(channel, test_event, "Test Device") + elif channel["channel_type"] == "webhook": + await _send_webhook(channel, test_event, "Test Device") + elif channel["channel_type"] == "slack": + await _send_slack(channel, test_event, "Test Device") + else: + raise ValueError(f"Unknown channel type: {channel['channel_type']}") + + return True diff --git a/backend/app/services/openbao_service.py b/backend/app/services/openbao_service.py new file mode 100644 index 0000000..a7d6f83 --- /dev/null +++ b/backend/app/services/openbao_service.py @@ -0,0 +1,174 @@ +""" +OpenBao Transit secrets engine client for per-tenant envelope encryption. + +Provides encrypt/decrypt operations via OpenBao's HTTP API. Each tenant gets +a dedicated Transit key (tenant_{uuid}) for AES-256-GCM encryption. The key +material never leaves OpenBao -- the application only sees ciphertext. + +Ciphertext format: "vault:v1:base64..." (compatible with Vault Transit format) +""" +import base64 +import logging +from typing import Optional + +import httpx + +from app.config import settings + +logger = logging.getLogger(__name__) + + +class OpenBaoTransitService: + """Async client for OpenBao Transit secrets engine.""" + + def __init__(self, addr: str | None = None, token: str | None = None): + self.addr = addr or settings.OPENBAO_ADDR + self.token = token or settings.OPENBAO_TOKEN + self._client: httpx.AsyncClient | None = None + + async def _get_client(self) -> httpx.AsyncClient: + if self._client is None or self._client.is_closed: + self._client = httpx.AsyncClient( + base_url=self.addr, + headers={"X-Vault-Token": self.token}, + timeout=5.0, + ) + return self._client + + async def close(self) -> None: + if self._client and not self._client.is_closed: + await self._client.aclose() + self._client = None + + async def create_tenant_key(self, tenant_id: str) -> None: + """Create Transit encryption keys for a tenant (credential + data). Idempotent.""" + client = await self._get_client() + + # Credential key: tenant_{uuid} + key_name = f"tenant_{tenant_id}" + resp = await client.post( + f"/v1/transit/keys/{key_name}", + json={"type": "aes256-gcm96"}, + ) + if resp.status_code not in (200, 204): + resp.raise_for_status() + logger.info("OpenBao Transit key ensured", extra={"key_name": key_name}) + + # Data key: tenant_{uuid}_data (Phase 30) + await self.create_tenant_data_key(tenant_id) + + async def encrypt(self, tenant_id: str, plaintext: bytes) -> str: + """Encrypt plaintext via Transit engine. Returns ciphertext string.""" + client = await self._get_client() + key_name = f"tenant_{tenant_id}" + resp = await client.post( + f"/v1/transit/encrypt/{key_name}", + json={"plaintext": base64.b64encode(plaintext).decode()}, + ) + resp.raise_for_status() + ciphertext = resp.json()["data"]["ciphertext"] + return ciphertext # "vault:v1:..." + + async def decrypt(self, tenant_id: str, ciphertext: str) -> bytes: + """Decrypt Transit ciphertext. Returns plaintext bytes.""" + client = await self._get_client() + key_name = f"tenant_{tenant_id}" + resp = await client.post( + f"/v1/transit/decrypt/{key_name}", + json={"ciphertext": ciphertext}, + ) + resp.raise_for_status() + plaintext_b64 = resp.json()["data"]["plaintext"] + return base64.b64decode(plaintext_b64) + + async def key_exists(self, tenant_id: str) -> bool: + """Check if a Transit key exists for a tenant.""" + client = await self._get_client() + key_name = f"tenant_{tenant_id}" + resp = await client.get(f"/v1/transit/keys/{key_name}") + return resp.status_code == 200 + + # ------------------------------------------------------------------ + # Data encryption keys (tenant_{uuid}_data) — Phase 30 + # ------------------------------------------------------------------ + + async def create_tenant_data_key(self, tenant_id: str) -> None: + """Create a Transit data encryption key for a tenant. Idempotent. + + Data keys use the suffix '_data' to separate them from credential keys. + Key naming: tenant_{uuid}_data (vs tenant_{uuid} for credentials). + """ + client = await self._get_client() + key_name = f"tenant_{tenant_id}_data" + resp = await client.post( + f"/v1/transit/keys/{key_name}", + json={"type": "aes256-gcm96"}, + ) + if resp.status_code not in (200, 204): + resp.raise_for_status() + logger.info("OpenBao Transit data key ensured", extra={"key_name": key_name}) + + async def ensure_tenant_data_key(self, tenant_id: str) -> None: + """Ensure a data encryption key exists for a tenant. Idempotent. + + Checks existence first and creates if missing. Safe to call on every + encrypt operation (fast path: single GET to check existence). + """ + client = await self._get_client() + key_name = f"tenant_{tenant_id}_data" + resp = await client.get(f"/v1/transit/keys/{key_name}") + if resp.status_code != 200: + await self.create_tenant_data_key(tenant_id) + + async def encrypt_data(self, tenant_id: str, plaintext: bytes) -> str: + """Encrypt data via Transit using per-tenant data key. + + Uses the tenant_{uuid}_data key (separate from credential key). + + Args: + tenant_id: Tenant UUID string. + plaintext: Raw bytes to encrypt. + + Returns: + Transit ciphertext string (vault:v1:...). + """ + client = await self._get_client() + key_name = f"tenant_{tenant_id}_data" + resp = await client.post( + f"/v1/transit/encrypt/{key_name}", + json={"plaintext": base64.b64encode(plaintext).decode()}, + ) + resp.raise_for_status() + return resp.json()["data"]["ciphertext"] + + async def decrypt_data(self, tenant_id: str, ciphertext: str) -> bytes: + """Decrypt Transit data ciphertext using per-tenant data key. + + Args: + tenant_id: Tenant UUID string. + ciphertext: Transit ciphertext (vault:v1:...). + + Returns: + Decrypted plaintext bytes. + """ + client = await self._get_client() + key_name = f"tenant_{tenant_id}_data" + resp = await client.post( + f"/v1/transit/decrypt/{key_name}", + json={"ciphertext": ciphertext}, + ) + resp.raise_for_status() + plaintext_b64 = resp.json()["data"]["plaintext"] + return base64.b64decode(plaintext_b64) + + +# Module-level singleton +_openbao_service: Optional[OpenBaoTransitService] = None + + +def get_openbao_service() -> OpenBaoTransitService: + """Return module-level OpenBao Transit service singleton.""" + global _openbao_service + if _openbao_service is None: + _openbao_service = OpenBaoTransitService() + return _openbao_service diff --git a/backend/app/services/push_rollback_subscriber.py b/backend/app/services/push_rollback_subscriber.py new file mode 100644 index 0000000..51766b9 --- /dev/null +++ b/backend/app/services/push_rollback_subscriber.py @@ -0,0 +1,141 @@ +"""NATS subscribers for push rollback (auto) and push alert (manual). + +- config.push.rollback.> -> auto-restore for template pushes +- config.push.alert.> -> create alert for editor pushes +""" + +import json +import logging +from typing import Any, Optional + +from app.config import settings +from app.database import AdminAsyncSessionLocal +from app.models.alert import AlertEvent +from app.services import restore_service + +logger = logging.getLogger(__name__) + +_nc: Optional[Any] = None + + +async def _create_push_alert(device_id: str, tenant_id: str, push_type: str) -> None: + """Create a high-priority alert for device offline after config push.""" + async with AdminAsyncSessionLocal() as session: + alert = AlertEvent( + device_id=device_id, + tenant_id=tenant_id, + status="firing", + severity="critical", + message=f"Device went offline after config {push_type} — rollback available", + ) + session.add(alert) + await session.commit() + logger.info("Created push alert for device %s (type=%s)", device_id, push_type) + + +async def handle_push_rollback(event: dict) -> None: + """Auto-rollback: restore device to pre-push config.""" + device_id = event.get("device_id") + tenant_id = event.get("tenant_id") + commit_sha = event.get("pre_push_commit_sha") + + if not all([device_id, tenant_id, commit_sha]): + logger.warning("Push rollback event missing fields: %s", event) + return + + logger.warning( + "AUTO-ROLLBACK: Device %s offline after template push, restoring to %s", + device_id, + commit_sha, + ) + + try: + async with AdminAsyncSessionLocal() as session: + result = await restore_service.restore_config( + device_id=device_id, + tenant_id=tenant_id, + commit_sha=commit_sha, + db_session=session, + ) + await session.commit() + logger.info( + "Auto-rollback result for device %s: %s", + device_id, + result.get("status"), + ) + except Exception as e: + logger.error("Auto-rollback failed for device %s: %s", device_id, e) + await _create_push_alert(device_id, tenant_id, "template (auto-rollback failed)") + + +async def handle_push_alert(event: dict) -> None: + """Alert: create notification for device offline after editor push.""" + device_id = event.get("device_id") + tenant_id = event.get("tenant_id") + push_type = event.get("push_type", "editor") + + if not device_id or not tenant_id: + logger.warning("Push alert event missing fields: %s", event) + return + + await _create_push_alert(device_id, tenant_id, push_type) + + +async def _on_rollback_message(msg) -> None: + """NATS message handler for config.push.rollback.> subjects.""" + try: + event = json.loads(msg.data.decode()) + await handle_push_rollback(event) + await msg.ack() + except Exception as e: + logger.error("Error handling rollback message: %s", e) + await msg.nak() + + +async def _on_alert_message(msg) -> None: + """NATS message handler for config.push.alert.> subjects.""" + try: + event = json.loads(msg.data.decode()) + await handle_push_alert(event) + await msg.ack() + except Exception as e: + logger.error("Error handling push alert message: %s", e) + await msg.nak() + + +async def start_push_rollback_subscriber() -> Optional[Any]: + """Connect to NATS and subscribe to push rollback/alert events.""" + import nats + + global _nc + try: + logger.info("NATS push-rollback: connecting to %s", settings.NATS_URL) + _nc = await nats.connect(settings.NATS_URL) + js = _nc.jetstream() + await js.subscribe( + "config.push.rollback.>", + cb=_on_rollback_message, + durable="api-push-rollback-consumer", + stream="DEVICE_EVENTS", + manual_ack=True, + ) + await js.subscribe( + "config.push.alert.>", + cb=_on_alert_message, + durable="api-push-alert-consumer", + stream="DEVICE_EVENTS", + manual_ack=True, + ) + logger.info("Push rollback/alert subscriber started") + return _nc + except Exception as e: + logger.error("Failed to start push rollback subscriber: %s", e) + return None + + +async def stop_push_rollback_subscriber() -> None: + """Gracefully close the NATS connection.""" + global _nc + if _nc: + await _nc.drain() + _nc = None diff --git a/backend/app/services/push_tracker.py b/backend/app/services/push_tracker.py new file mode 100644 index 0000000..41d209d --- /dev/null +++ b/backend/app/services/push_tracker.py @@ -0,0 +1,70 @@ +"""Track recent config pushes in Redis for poller-aware rollback. + +When a device goes offline shortly after a push, the poller checks these +keys and triggers rollback (template/restore) or alert (editor). + +Redis key format: push:recent:{device_id} +TTL: 300 seconds (5 minutes) +""" + +import json +import logging +from typing import Optional + +import redis.asyncio as redis + +from app.config import settings + +logger = logging.getLogger(__name__) + +PUSH_TTL_SECONDS = 300 # 5 minutes + +_redis: Optional[redis.Redis] = None + + +async def _get_redis() -> redis.Redis: + global _redis + if _redis is None: + _redis = redis.from_url(settings.REDIS_URL) + return _redis + + +async def record_push( + device_id: str, + tenant_id: str, + push_type: str, + push_operation_id: str = "", + pre_push_commit_sha: str = "", +) -> None: + """Record a recent config push in Redis. + + Args: + device_id: UUID of the device. + tenant_id: UUID of the tenant. + push_type: 'template' (auto-rollback) or 'editor' (alert only) or 'restore'. + push_operation_id: ID of the ConfigPushOperation row. + pre_push_commit_sha: Git SHA of the pre-push backup (for rollback). + """ + r = await _get_redis() + key = f"push:recent:{device_id}" + value = json.dumps({ + "device_id": device_id, + "tenant_id": tenant_id, + "push_type": push_type, + "push_operation_id": push_operation_id, + "pre_push_commit_sha": pre_push_commit_sha, + }) + await r.set(key, value, ex=PUSH_TTL_SECONDS) + logger.debug( + "Recorded push for device %s (type=%s, TTL=%ds)", + device_id, + push_type, + PUSH_TTL_SECONDS, + ) + + +async def clear_push(device_id: str) -> None: + """Clear the push tracking key (e.g., after successful commit).""" + r = await _get_redis() + await r.delete(f"push:recent:{device_id}") + logger.debug("Cleared push tracking for device %s", device_id) diff --git a/backend/app/services/report_service.py b/backend/app/services/report_service.py new file mode 100644 index 0000000..db9177a --- /dev/null +++ b/backend/app/services/report_service.py @@ -0,0 +1,572 @@ +"""Report generation service. + +Generates PDF (via Jinja2 + weasyprint) and CSV reports for: +- Device inventory +- Metrics summary +- Alert history +- Change log (audit_logs if available, else config_backups fallback) + +Phase 30 NOTE: Reports are currently ephemeral (generated on-demand per request, +never stored at rest). DATAENC-03 requires "report content is encrypted before +storage." Since no report storage exists yet, encryption will be applied when +report caching/storage is added. The generation pipeline is Transit-ready -- +wrap the file_bytes with encrypt_data_transit() before any future INSERT. +""" + +import csv +import io +import os +import time +from datetime import datetime +from typing import Any, Optional +from uuid import UUID + +import structlog +from jinja2 import Environment, FileSystemLoader +from sqlalchemy import text +from sqlalchemy.ext.asyncio import AsyncSession + +logger = structlog.get_logger(__name__) + +# Jinja2 environment pointing at the templates directory +_TEMPLATE_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), "templates") +_jinja_env = Environment( + loader=FileSystemLoader(_TEMPLATE_DIR), + autoescape=True, +) + + +async def generate_report( + db: AsyncSession, + tenant_id: UUID, + report_type: str, + date_from: Optional[datetime], + date_to: Optional[datetime], + fmt: str = "pdf", +) -> tuple[bytes, str, str]: + """Generate a report and return (file_bytes, content_type, filename). + + Args: + db: RLS-enforced async session (tenant context already set). + tenant_id: Tenant UUID for scoping. + report_type: One of device_inventory, metrics_summary, alert_history, change_log. + date_from: Start date for time-ranged reports. + date_to: End date for time-ranged reports. + fmt: Output format -- "pdf" or "csv". + + Returns: + Tuple of (file_bytes, content_type, filename). + """ + start = time.monotonic() + + # Fetch tenant name for the header + tenant_name = await _get_tenant_name(db, tenant_id) + + # Dispatch to the appropriate handler + handlers = { + "device_inventory": _device_inventory, + "metrics_summary": _metrics_summary, + "alert_history": _alert_history, + "change_log": _change_log, + } + handler = handlers[report_type] + template_data = await handler(db, tenant_id, date_from, date_to) + + # Common template context + generated_at = datetime.utcnow().strftime("%Y-%m-%d %H:%M UTC") + base_context = { + "tenant_name": tenant_name, + "generated_at": generated_at, + } + + timestamp_str = datetime.utcnow().strftime("%Y%m%d_%H%M%S") + + if fmt == "csv": + file_bytes = _render_csv(report_type, template_data) + content_type = "text/csv; charset=utf-8" + filename = f"{report_type}_{timestamp_str}.csv" + else: + file_bytes = _render_pdf(report_type, {**base_context, **template_data}) + content_type = "application/pdf" + filename = f"{report_type}_{timestamp_str}.pdf" + + elapsed = time.monotonic() - start + logger.info( + "report_generated", + report_type=report_type, + format=fmt, + tenant_id=str(tenant_id), + size_bytes=len(file_bytes), + elapsed_seconds=round(elapsed, 2), + ) + + return file_bytes, content_type, filename + + +# --------------------------------------------------------------------------- +# Tenant name helper +# --------------------------------------------------------------------------- + + +async def _get_tenant_name(db: AsyncSession, tenant_id: UUID) -> str: + """Fetch the tenant name by ID.""" + result = await db.execute( + text("SELECT name FROM tenants WHERE id = CAST(:tid AS uuid)"), + {"tid": str(tenant_id)}, + ) + row = result.fetchone() + return row[0] if row else "Unknown Tenant" + + +# --------------------------------------------------------------------------- +# Report type handlers +# --------------------------------------------------------------------------- + + +async def _device_inventory( + db: AsyncSession, + tenant_id: UUID, + date_from: Optional[datetime], + date_to: Optional[datetime], +) -> dict[str, Any]: + """Gather device inventory data.""" + result = await db.execute( + text(""" + SELECT d.hostname, d.ip_address, d.model, d.routeros_version, + d.status, d.last_seen, d.uptime_seconds, + COALESCE( + (SELECT string_agg(dg.name, ', ') + FROM device_group_memberships dgm + JOIN device_groups dg ON dg.id = dgm.group_id + WHERE dgm.device_id = d.id), + '' + ) AS groups + FROM devices d + ORDER BY d.hostname ASC + """) + ) + rows = result.fetchall() + + devices = [] + online_count = 0 + offline_count = 0 + unknown_count = 0 + + for row in rows: + status = row[4] + if status == "online": + online_count += 1 + elif status == "offline": + offline_count += 1 + else: + unknown_count += 1 + + uptime_str = _format_uptime(row[6]) if row[6] else None + last_seen_str = row[5].strftime("%Y-%m-%d %H:%M") if row[5] else None + + devices.append({ + "hostname": row[0], + "ip_address": row[1], + "model": row[2], + "routeros_version": row[3], + "status": status, + "last_seen": last_seen_str, + "uptime": uptime_str, + "groups": row[7] if row[7] else None, + }) + + return { + "report_title": "Device Inventory", + "devices": devices, + "total_devices": len(devices), + "online_count": online_count, + "offline_count": offline_count, + "unknown_count": unknown_count, + } + + +async def _metrics_summary( + db: AsyncSession, + tenant_id: UUID, + date_from: Optional[datetime], + date_to: Optional[datetime], +) -> dict[str, Any]: + """Gather metrics summary data grouped by device.""" + result = await db.execute( + text(""" + SELECT d.hostname, + AVG(hm.cpu_load) AS avg_cpu, + MAX(hm.cpu_load) AS peak_cpu, + AVG(CASE WHEN hm.total_memory > 0 + THEN 100.0 * (hm.total_memory - hm.free_memory) / hm.total_memory + END) AS avg_mem, + MAX(CASE WHEN hm.total_memory > 0 + THEN 100.0 * (hm.total_memory - hm.free_memory) / hm.total_memory + END) AS peak_mem, + AVG(CASE WHEN hm.total_disk > 0 + THEN 100.0 * (hm.total_disk - hm.free_disk) / hm.total_disk + END) AS avg_disk, + AVG(hm.temperature) AS avg_temp, + COUNT(*) AS data_points + FROM health_metrics hm + JOIN devices d ON d.id = hm.device_id + WHERE hm.time >= :date_from + AND hm.time <= :date_to + GROUP BY d.id, d.hostname + ORDER BY avg_cpu DESC NULLS LAST + """), + { + "date_from": date_from, + "date_to": date_to, + }, + ) + rows = result.fetchall() + + devices = [] + for row in rows: + devices.append({ + "hostname": row[0], + "avg_cpu": float(row[1]) if row[1] is not None else None, + "peak_cpu": float(row[2]) if row[2] is not None else None, + "avg_mem": float(row[3]) if row[3] is not None else None, + "peak_mem": float(row[4]) if row[4] is not None else None, + "avg_disk": float(row[5]) if row[5] is not None else None, + "avg_temp": float(row[6]) if row[6] is not None else None, + "data_points": row[7], + }) + + return { + "report_title": "Metrics Summary", + "devices": devices, + "date_from": date_from.strftime("%Y-%m-%d") if date_from else "", + "date_to": date_to.strftime("%Y-%m-%d") if date_to else "", + } + + +async def _alert_history( + db: AsyncSession, + tenant_id: UUID, + date_from: Optional[datetime], + date_to: Optional[datetime], +) -> dict[str, Any]: + """Gather alert history data.""" + result = await db.execute( + text(""" + SELECT ae.fired_at, ae.resolved_at, ae.severity, ae.status, + ae.message, d.hostname, + EXTRACT(EPOCH FROM (ae.resolved_at - ae.fired_at)) AS duration_secs + FROM alert_events ae + LEFT JOIN devices d ON d.id = ae.device_id + WHERE ae.fired_at >= :date_from + AND ae.fired_at <= :date_to + ORDER BY ae.fired_at DESC + """), + { + "date_from": date_from, + "date_to": date_to, + }, + ) + rows = result.fetchall() + + alerts = [] + critical_count = 0 + warning_count = 0 + info_count = 0 + resolved_durations: list[float] = [] + + for row in rows: + severity = row[2] + if severity == "critical": + critical_count += 1 + elif severity == "warning": + warning_count += 1 + else: + info_count += 1 + + duration_secs = float(row[6]) if row[6] is not None else None + if duration_secs is not None: + resolved_durations.append(duration_secs) + + alerts.append({ + "fired_at": row[0].strftime("%Y-%m-%d %H:%M") if row[0] else "-", + "hostname": row[5], + "severity": severity, + "status": row[3], + "message": row[4], + "duration": _format_duration(duration_secs) if duration_secs is not None else None, + }) + + mttr_minutes = None + mttr_display = None + if resolved_durations: + avg_secs = sum(resolved_durations) / len(resolved_durations) + mttr_minutes = round(avg_secs / 60, 1) + mttr_display = _format_duration(avg_secs) + + return { + "report_title": "Alert History", + "alerts": alerts, + "total_alerts": len(alerts), + "critical_count": critical_count, + "warning_count": warning_count, + "info_count": info_count, + "mttr_minutes": mttr_minutes, + "mttr_display": mttr_display, + "date_from": date_from.strftime("%Y-%m-%d") if date_from else "", + "date_to": date_to.strftime("%Y-%m-%d") if date_to else "", + } + + +async def _change_log( + db: AsyncSession, + tenant_id: UUID, + date_from: Optional[datetime], + date_to: Optional[datetime], +) -> dict[str, Any]: + """Gather change log data -- try audit_logs table first, fall back to config_backups.""" + # Check if audit_logs table exists (17-01 may not have run yet) + has_audit_logs = await _table_exists(db, "audit_logs") + + if has_audit_logs: + return await _change_log_from_audit(db, date_from, date_to) + else: + return await _change_log_from_backups(db, date_from, date_to) + + +async def _table_exists(db: AsyncSession, table_name: str) -> bool: + """Check if a table exists in the database.""" + result = await db.execute( + text(""" + SELECT EXISTS ( + SELECT 1 FROM information_schema.tables + WHERE table_schema = 'public' AND table_name = :table_name + ) + """), + {"table_name": table_name}, + ) + return bool(result.scalar()) + + +async def _change_log_from_audit( + db: AsyncSession, + date_from: Optional[datetime], + date_to: Optional[datetime], +) -> dict[str, Any]: + """Build change log from audit_logs table.""" + result = await db.execute( + text(""" + SELECT al.created_at, u.name AS user_name, al.action, + d.hostname, al.resource_type, + al.details + FROM audit_logs al + LEFT JOIN users u ON u.id = al.user_id + LEFT JOIN devices d ON d.id = al.device_id + WHERE al.created_at >= :date_from + AND al.created_at <= :date_to + ORDER BY al.created_at DESC + """), + { + "date_from": date_from, + "date_to": date_to, + }, + ) + rows = result.fetchall() + + entries = [] + for row in rows: + entries.append({ + "timestamp": row[0].strftime("%Y-%m-%d %H:%M") if row[0] else "-", + "user": row[1], + "action": row[2], + "device": row[3], + "details": row[4] or row[5] or "", + }) + + return { + "report_title": "Change Log", + "entries": entries, + "total_entries": len(entries), + "data_source": "Audit Logs", + "date_from": date_from.strftime("%Y-%m-%d") if date_from else "", + "date_to": date_to.strftime("%Y-%m-%d") if date_to else "", + } + + +async def _change_log_from_backups( + db: AsyncSession, + date_from: Optional[datetime], + date_to: Optional[datetime], +) -> dict[str, Any]: + """Build change log from config_backups + alert_events as fallback.""" + # Config backups as change events + backup_result = await db.execute( + text(""" + SELECT cb.created_at, 'system' AS user_name, 'config_backup' AS action, + d.hostname, cb.trigger_type AS details + FROM config_backups cb + JOIN devices d ON d.id = cb.device_id + WHERE cb.created_at >= :date_from + AND cb.created_at <= :date_to + """), + { + "date_from": date_from, + "date_to": date_to, + }, + ) + backup_rows = backup_result.fetchall() + + # Alert events as change events + alert_result = await db.execute( + text(""" + SELECT ae.fired_at, 'system' AS user_name, + ae.severity || '_alert' AS action, + d.hostname, ae.message AS details + FROM alert_events ae + LEFT JOIN devices d ON d.id = ae.device_id + WHERE ae.fired_at >= :date_from + AND ae.fired_at <= :date_to + """), + { + "date_from": date_from, + "date_to": date_to, + }, + ) + alert_rows = alert_result.fetchall() + + # Merge and sort by timestamp descending + entries = [] + for row in backup_rows: + entries.append({ + "timestamp": row[0].strftime("%Y-%m-%d %H:%M") if row[0] else "-", + "user": row[1], + "action": row[2], + "device": row[3], + "details": row[4] or "", + }) + for row in alert_rows: + entries.append({ + "timestamp": row[0].strftime("%Y-%m-%d %H:%M") if row[0] else "-", + "user": row[1], + "action": row[2], + "device": row[3], + "details": row[4] or "", + }) + + # Sort by timestamp string descending + entries.sort(key=lambda e: e["timestamp"], reverse=True) + + return { + "report_title": "Change Log", + "entries": entries, + "total_entries": len(entries), + "data_source": "Backups + Alerts", + "date_from": date_from.strftime("%Y-%m-%d") if date_from else "", + "date_to": date_to.strftime("%Y-%m-%d") if date_to else "", + } + + +# --------------------------------------------------------------------------- +# Rendering helpers +# --------------------------------------------------------------------------- + + +def _render_pdf(report_type: str, context: dict[str, Any]) -> bytes: + """Render HTML template and convert to PDF via weasyprint.""" + import weasyprint + + template = _jinja_env.get_template(f"reports/{report_type}.html") + html_str = template.render(**context) + pdf_bytes = weasyprint.HTML(string=html_str).write_pdf() + return pdf_bytes + + +def _render_csv(report_type: str, data: dict[str, Any]) -> bytes: + """Render report data as CSV bytes.""" + output = io.StringIO() + writer = csv.writer(output) + + if report_type == "device_inventory": + writer.writerow([ + "Hostname", "IP Address", "Model", "RouterOS Version", + "Status", "Last Seen", "Uptime", "Groups", + ]) + for d in data.get("devices", []): + writer.writerow([ + d["hostname"], d["ip_address"], d["model"] or "", + d["routeros_version"] or "", d["status"], + d["last_seen"] or "", d["uptime"] or "", + d["groups"] or "", + ]) + + elif report_type == "metrics_summary": + writer.writerow([ + "Hostname", "Avg CPU %", "Peak CPU %", "Avg Memory %", + "Peak Memory %", "Avg Disk %", "Avg Temp", "Data Points", + ]) + for d in data.get("devices", []): + writer.writerow([ + d["hostname"], + f"{d['avg_cpu']:.1f}" if d["avg_cpu"] is not None else "", + f"{d['peak_cpu']:.1f}" if d["peak_cpu"] is not None else "", + f"{d['avg_mem']:.1f}" if d["avg_mem"] is not None else "", + f"{d['peak_mem']:.1f}" if d["peak_mem"] is not None else "", + f"{d['avg_disk']:.1f}" if d["avg_disk"] is not None else "", + f"{d['avg_temp']:.1f}" if d["avg_temp"] is not None else "", + d["data_points"], + ]) + + elif report_type == "alert_history": + writer.writerow([ + "Timestamp", "Device", "Severity", "Message", "Status", "Duration", + ]) + for a in data.get("alerts", []): + writer.writerow([ + a["fired_at"], a["hostname"] or "", a["severity"], + a["message"] or "", a["status"], a["duration"] or "", + ]) + + elif report_type == "change_log": + writer.writerow([ + "Timestamp", "User", "Action", "Device", "Details", + ]) + for e in data.get("entries", []): + writer.writerow([ + e["timestamp"], e["user"] or "", e["action"], + e["device"] or "", e["details"] or "", + ]) + + return output.getvalue().encode("utf-8") + + +# --------------------------------------------------------------------------- +# Formatting utilities +# --------------------------------------------------------------------------- + + +def _format_uptime(seconds: int) -> str: + """Format uptime seconds as human-readable string.""" + days = seconds // 86400 + hours = (seconds % 86400) // 3600 + minutes = (seconds % 3600) // 60 + if days > 0: + return f"{days}d {hours}h {minutes}m" + elif hours > 0: + return f"{hours}h {minutes}m" + else: + return f"{minutes}m" + + +def _format_duration(seconds: float) -> str: + """Format a duration in seconds as a human-readable string.""" + if seconds < 60: + return f"{int(seconds)}s" + elif seconds < 3600: + return f"{int(seconds // 60)}m {int(seconds % 60)}s" + elif seconds < 86400: + hours = int(seconds // 3600) + mins = int((seconds % 3600) // 60) + return f"{hours}h {mins}m" + else: + days = int(seconds // 86400) + hours = int((seconds % 86400) // 3600) + return f"{days}d {hours}h" diff --git a/backend/app/services/restore_service.py b/backend/app/services/restore_service.py new file mode 100644 index 0000000..f21b934 --- /dev/null +++ b/backend/app/services/restore_service.py @@ -0,0 +1,599 @@ +"""Two-phase config push with panic-revert safety for RouterOS devices. + +This module implements the critical safety mechanism for config restoration: + +Phase 1 — Push: + 1. Pre-backup (mandatory) — snapshot current config before any changes + 2. Install panic-revert RouterOS scheduler — auto-reverts if device becomes + unreachable (the scheduler fires after 90s and loads the pre-push backup) + 3. Push the target config via SSH /import + +Phase 2 — Verification (60s settle window): + 4. Wait 60s for config to settle (scheduled processes restart, etc.) + 5. Reachability check via asyncssh + 6a. Reachable — remove panic-revert scheduler; mark operation committed + 6b. Unreachable — RouterOS is auto-reverting; mark operation reverted + +Pitfall 6 handling: + If the API pod restarts during the 60s window, the config_push_operations + row with status='pending_verification' serves as the recovery signal. + On startup, recover_stale_push_operations() resolves any stale rows. + +Security policy: + known_hosts=None — RouterOS self-signed host keys; mirrors InsecureSkipVerify + used in the poller's TLS connection. See Pitfall 2 in 04-RESEARCH.md. +""" + +import asyncio +import json +import logging +from datetime import datetime, timedelta, timezone + +import asyncssh +from sqlalchemy.ext.asyncio import AsyncSession + +from app.config import settings +from app.database import set_tenant_context, AdminAsyncSessionLocal +from app.models.config_backup import ConfigPushOperation +from app.models.device import Device +from app.services import backup_service, git_store +from app.services.event_publisher import publish_event +from app.services.push_tracker import record_push, clear_push + +logger = logging.getLogger(__name__) + +# Name of the panic-revert scheduler installed on the RouterOS device +_PANIC_REVERT_SCHEDULER = "mikrotik-portal-panic-revert" +# Name of the pre-push binary backup saved on device flash +_PRE_PUSH_BACKUP = "portal-pre-push" +# Name of the RSC file used for /import on device +_RESTORE_RSC = "portal-restore.rsc" + + +async def _publish_push_progress( + tenant_id: str, + device_id: str, + stage: str, + message: str, + push_op_id: str | None = None, + error: str | None = None, +) -> None: + """Publish config push progress event to NATS (fire-and-forget).""" + payload = { + "event_type": "config_push", + "tenant_id": tenant_id, + "device_id": device_id, + "stage": stage, + "message": message, + "timestamp": datetime.now(timezone.utc).isoformat(), + "push_operation_id": push_op_id, + } + if error: + payload["error"] = error + await publish_event(f"config.push.{tenant_id}.{device_id}", payload) + + +async def restore_config( + device_id: str, + tenant_id: str, + commit_sha: str, + db_session: AsyncSession, +) -> dict: + """Restore a device config to a specific backup version via two-phase push. + + Args: + device_id: Device UUID as string. + tenant_id: Tenant UUID as string. + commit_sha: Git commit SHA of the backup version to restore. + db_session: AsyncSession with RLS context already set (from API endpoint). + + Returns: + { + "status": "committed" | "reverted" | "failed", + "message": str, + "pre_backup_sha": str, + } + + Raises: + ValueError: If device not found or missing credentials. + Exception: On SSH failure during push phase (reverted status logged). + """ + loop = asyncio.get_event_loop() + + # ------------------------------------------------------------------ + # Step 1: Load device from DB and decrypt credentials + # ------------------------------------------------------------------ + from sqlalchemy import select + + result = await db_session.execute( + select(Device).where(Device.id == device_id) # type: ignore[arg-type] + ) + device = result.scalar_one_or_none() + if device is None: + raise ValueError(f"Device {device_id!r} not found") + + if not device.encrypted_credentials_transit and not device.encrypted_credentials: + raise ValueError( + f"Device {device_id!r} has no stored credentials — cannot perform restore" + ) + + key = settings.get_encryption_key_bytes() + from app.services.crypto import decrypt_credentials_hybrid + creds_json = await decrypt_credentials_hybrid( + device.encrypted_credentials_transit, + device.encrypted_credentials, + str(device.tenant_id), + key, + ) + creds = json.loads(creds_json) + ssh_username = creds.get("username", "") + ssh_password = creds.get("password", "") + ip = device.ip_address + + hostname = device.hostname or ip + + # Publish "started" progress event + await _publish_push_progress(tenant_id, device_id, "started", f"Config restore started for {hostname}") + + # ------------------------------------------------------------------ + # Step 2: Read the target export.rsc from the backup commit + # ------------------------------------------------------------------ + try: + export_bytes = await loop.run_in_executor( + None, + git_store.read_file, + tenant_id, + commit_sha, + device_id, + "export.rsc", + ) + except (KeyError, Exception) as exc: + raise ValueError( + f"Backup version {commit_sha!r} not found for device {device_id!r}: {exc}" + ) from exc + + export_text = export_bytes.decode("utf-8", errors="replace") + + # ------------------------------------------------------------------ + # Step 3: Mandatory pre-backup before push + # ------------------------------------------------------------------ + await _publish_push_progress(tenant_id, device_id, "backing_up", f"Creating pre-restore backup for {hostname}") + + logger.info( + "Starting pre-restore backup for device %s (%s) before pushing commit %s", + hostname, + ip, + commit_sha[:8], + ) + pre_backup_result = await backup_service.run_backup( + device_id=device_id, + tenant_id=tenant_id, + trigger_type="pre-restore", + db_session=db_session, + ) + pre_backup_sha = pre_backup_result["commit_sha"] + logger.info("Pre-restore backup complete: %s", pre_backup_sha[:8]) + + # ------------------------------------------------------------------ + # Step 4: Record push operation (pending_verification for recovery) + # ------------------------------------------------------------------ + push_op = ConfigPushOperation( + device_id=device.id, + tenant_id=device.tenant_id, + pre_push_commit_sha=pre_backup_sha, + scheduler_name=_PANIC_REVERT_SCHEDULER, + status="pending_verification", + ) + db_session.add(push_op) + await db_session.flush() + push_op_id = push_op.id + + logger.info( + "Push op %s in pending_verification — if API restarts, " + "recover_stale_push_operations() will resolve on next startup", + push_op.id, + ) + + # ------------------------------------------------------------------ + # Step 5: SSH to device — install panic-revert, push config + # ------------------------------------------------------------------ + push_op_id_str = str(push_op_id) + await _publish_push_progress(tenant_id, device_id, "pushing", f"Pushing config to {hostname}", push_op_id=push_op_id_str) + + logger.info( + "Pushing config to device %s (%s): installing panic-revert scheduler and uploading config", + hostname, + ip, + ) + + try: + async with asyncssh.connect( + ip, + port=22, + username=ssh_username, + password=ssh_password, + known_hosts=None, # RouterOS self-signed host keys — see module docstring + connect_timeout=30, + ) as conn: + # 5a: Create binary backup on device as revert point + await conn.run( + f"/system backup save name={_PRE_PUSH_BACKUP} dont-encrypt=yes", + check=True, + ) + logger.debug("Pre-push binary backup saved on device as %s.backup", _PRE_PUSH_BACKUP) + + # 5b: Install panic-revert RouterOS scheduler + # The scheduler fires after 90s on startup and loads the pre-push backup. + # This is the safety net: if the device becomes unreachable after push, + # RouterOS will auto-revert to the known-good config on the next reboot + # or after 90s of uptime. + await conn.run( + f"/system scheduler add " + f'name="{_PANIC_REVERT_SCHEDULER}" ' + f"interval=90s " + f'on-event=":delay 0; /system backup load name={_PRE_PUSH_BACKUP}" ' + f"start-time=startup", + check=True, + ) + logger.debug("Panic-revert scheduler installed on device") + + # 5c: Upload export.rsc and /import it + # Write the RSC content to the device filesystem via SSH exec, + # then use /import to apply it. The file is cleaned up after import. + # We use a here-doc approach: write content line-by-line via /file set. + # RouterOS supports writing files via /tool fetch or direct file commands. + # Simplest approach for large configs: use asyncssh's write_into to + # write file content, then /import. + # + # RouterOS doesn't support direct SFTP uploads via SSH open_sftp() easily + # for config files. Use the script approach instead: + # /system script add + run + remove (avoids flash write concerns). + # + # Actually the simplest method: write the export.rsc line by line via + # /file print / set commands is RouterOS 6 only and unreliable. + # Best approach for RouterOS 7: use SFTP to upload the file. + async with conn.start_sftp_client() as sftp: + async with sftp.open(_RESTORE_RSC, "wb") as f: + await f.write(export_text.encode("utf-8")) + logger.debug("Uploaded %s to device flash", _RESTORE_RSC) + + # /import the config file + import_result = await conn.run( + f"/import file={_RESTORE_RSC}", + check=False, # Don't raise on non-zero exit — import may succeed with warnings + ) + logger.info( + "Config import result for device %s: exit_status=%s stdout=%r", + hostname, + import_result.exit_status, + (import_result.stdout or "")[:200], + ) + + # Clean up the uploaded RSC file (best-effort) + try: + await conn.run(f"/file remove {_RESTORE_RSC}", check=True) + except Exception as cleanup_err: + logger.warning( + "Failed to clean up %s from device %s: %s", + _RESTORE_RSC, + ip, + cleanup_err, + ) + + except Exception as push_err: + logger.error( + "SSH push phase failed for device %s (%s): %s", + hostname, + ip, + push_err, + ) + # Update push operation to failed + await _update_push_op_status(push_op_id, "failed", db_session) + await _publish_push_progress( + tenant_id, device_id, "failed", + f"Config push failed for {hostname}: {push_err}", + push_op_id=push_op_id_str, error=str(push_err), + ) + return { + "status": "failed", + "message": f"Config push failed during SSH phase: {push_err}", + "pre_backup_sha": pre_backup_sha, + } + + # Record push in Redis so the poller can detect post-push offline events + await record_push( + device_id=device_id, + tenant_id=tenant_id, + push_type="restore", + push_operation_id=push_op_id_str, + pre_push_commit_sha=pre_backup_sha, + ) + + # ------------------------------------------------------------------ + # Step 6: Wait 60s for config to settle + # ------------------------------------------------------------------ + await _publish_push_progress(tenant_id, device_id, "settling", f"Config pushed to {hostname} — waiting 60s for settle", push_op_id=push_op_id_str) + + logger.info( + "Config pushed to device %s — waiting 60s for config to settle", + hostname, + ) + await asyncio.sleep(60) + + # ------------------------------------------------------------------ + # Step 7: Reachability check + # ------------------------------------------------------------------ + await _publish_push_progress(tenant_id, device_id, "verifying", f"Verifying device {hostname} reachability", push_op_id=push_op_id_str) + + reachable = await _check_reachability(ip, ssh_username, ssh_password) + + if reachable: + # ------------------------------------------------------------------ + # Step 8a: Device is reachable — remove panic-revert scheduler + cleanup + # ------------------------------------------------------------------ + logger.info("Device %s (%s) is reachable after push — committing", hostname, ip) + try: + async with asyncssh.connect( + ip, + port=22, + username=ssh_username, + password=ssh_password, + known_hosts=None, + connect_timeout=30, + ) as conn: + # Remove the panic-revert scheduler + await conn.run( + f'/system scheduler remove "{_PANIC_REVERT_SCHEDULER}"', + check=False, # Non-fatal if already removed + ) + # Clean up the pre-push binary backup from device flash + await conn.run( + f"/file remove {_PRE_PUSH_BACKUP}.backup", + check=False, # Non-fatal if already removed + ) + except Exception as cleanup_err: + # Cleanup failure is non-fatal — scheduler will eventually fire but + # the backup is now the correct config, so it's acceptable. + logger.warning( + "Failed to clean up panic-revert scheduler/backup on device %s: %s", + hostname, + cleanup_err, + ) + + await _update_push_op_status(push_op_id, "committed", db_session) + await clear_push(device_id) + await _publish_push_progress(tenant_id, device_id, "committed", f"Config restored successfully on {hostname}", push_op_id=push_op_id_str) + + return { + "status": "committed", + "message": "Config restored successfully", + "pre_backup_sha": pre_backup_sha, + } + + else: + # ------------------------------------------------------------------ + # Step 8b: Device unreachable — RouterOS is auto-reverting via scheduler + # ------------------------------------------------------------------ + logger.warning( + "Device %s (%s) is unreachable after push — RouterOS panic-revert scheduler " + "will auto-revert to %s.backup", + hostname, + ip, + _PRE_PUSH_BACKUP, + ) + + await _update_push_op_status(push_op_id, "reverted", db_session) + await _publish_push_progress( + tenant_id, device_id, "reverted", + f"Device {hostname} unreachable — auto-reverting via panic-revert scheduler", + push_op_id=push_op_id_str, + ) + + return { + "status": "reverted", + "message": ( + "Device unreachable after push; RouterOS is auto-reverting " + "via panic-revert scheduler" + ), + "pre_backup_sha": pre_backup_sha, + } + + +async def _check_reachability(ip: str, username: str, password: str) -> bool: + """Check if a RouterOS device is reachable via SSH. + + Attempts to connect and run a simple command (/system identity print). + Returns True if successful, False if the connection fails or times out. + + Uses asyncssh (not the poller's binary API) to avoid a circular import. + A 30-second timeout is used — if the device doesn't respond within that + window, it's considered unreachable (panic-revert will handle it). + + Args: + ip: Device IP address. + username: SSH username. + password: SSH password. + + Returns: + True if reachable, False if unreachable. + """ + try: + async with asyncssh.connect( + ip, + port=22, + username=username, + password=password, + known_hosts=None, + connect_timeout=30, + ) as conn: + result = await conn.run("/system identity print", check=True) + logger.debug("Reachability check OK for %s: %r", ip, result.stdout[:50]) + return True + except Exception as exc: + logger.info("Device %s unreachable after push: %s", ip, exc) + return False + + +async def _update_push_op_status( + push_op_id, + new_status: str, + db_session: AsyncSession, +) -> None: + """Update the status and completed_at of a ConfigPushOperation row. + + Args: + push_op_id: UUID of the ConfigPushOperation row. + new_status: New status value ('committed' | 'reverted' | 'failed'). + db_session: Database session (must already have tenant context set). + """ + from sqlalchemy import select, update + + await db_session.execute( + update(ConfigPushOperation) + .where(ConfigPushOperation.id == push_op_id) # type: ignore[arg-type] + .values( + status=new_status, + completed_at=datetime.now(timezone.utc), + ) + ) + # Don't commit here — the caller (endpoint) owns the transaction + + +async def _remove_panic_scheduler( + ip: str, username: str, password: str, scheduler_name: str +) -> bool: + """SSH to device and remove the panic-revert scheduler. Returns True if removed.""" + try: + async with asyncssh.connect( + ip, + username=username, + password=password, + known_hosts=None, + connect_timeout=30, + ) as conn: + # Check if scheduler exists + result = await conn.run( + f'/system scheduler print where name="{scheduler_name}"', + check=False, + ) + if scheduler_name in result.stdout: + await conn.run( + f'/system scheduler remove [find name="{scheduler_name}"]', + check=False, + ) + # Also clean up pre-push backup file + await conn.run( + f'/file remove [find name="{_PRE_PUSH_BACKUP}.backup"]', + check=False, + ) + return True + return False # Scheduler already gone (device reverted itself) + except Exception as e: + logger.error("Failed to remove panic scheduler from %s: %s", ip, e) + return False + + +async def recover_stale_push_operations(db_session: AsyncSession) -> None: + """Recover stale pending_verification push operations on API startup. + + Scans for operations older than 5 minutes that are still pending. + For each, checks device reachability and resolves the operation. + """ + from sqlalchemy import select + + from app.models.config_backup import ConfigPushOperation + from app.models.device import Device + from app.services.crypto import decrypt_credentials_hybrid + + cutoff = datetime.now(timezone.utc) - timedelta(minutes=5) + + result = await db_session.execute( + select(ConfigPushOperation).where( + ConfigPushOperation.status == "pending_verification", + ConfigPushOperation.started_at < cutoff, + ) + ) + stale_ops = result.scalars().all() + + if not stale_ops: + logger.info("No stale push operations to recover") + return + + logger.warning("Found %d stale push operations to recover", len(stale_ops)) + + key = settings.get_encryption_key_bytes() + + for op in stale_ops: + try: + # Load device + dev_result = await db_session.execute( + select(Device).where(Device.id == op.device_id) + ) + device = dev_result.scalar_one_or_none() + if not device: + logger.error("Device %s not found for stale op %s", op.device_id, op.id) + await _update_push_op_status(op.id, "failed", db_session) + continue + + # Decrypt credentials + creds_json = await decrypt_credentials_hybrid( + device.encrypted_credentials_transit, + device.encrypted_credentials, + str(op.tenant_id), + key, + ) + creds = json.loads(creds_json) + ssh_username = creds.get("username", "admin") + ssh_password = creds.get("password", "") + + # Check reachability + reachable = await _check_reachability( + device.ip_address, ssh_username, ssh_password + ) + + if reachable: + # Try to remove scheduler (if still there, push was good) + removed = await _remove_panic_scheduler( + device.ip_address, + ssh_username, + ssh_password, + op.scheduler_name, + ) + if removed: + logger.info("Recovery: committed op %s (scheduler removed)", op.id) + else: + # Scheduler already gone — device may have reverted + logger.warning( + "Recovery: op %s — scheduler gone, device may have reverted. " + "Marking committed (device is reachable).", + op.id, + ) + await _update_push_op_status(op.id, "committed", db_session) + + await _publish_push_progress( + str(op.tenant_id), + str(op.device_id), + "committed", + "Recovered after API restart", + push_op_id=str(op.id), + ) + else: + logger.warning( + "Recovery: device %s unreachable, marking op %s failed", + op.device_id, + op.id, + ) + await _update_push_op_status(op.id, "failed", db_session) + await _publish_push_progress( + str(op.tenant_id), + str(op.device_id), + "failed", + "Device unreachable during recovery after API restart", + push_op_id=str(op.id), + ) + + except Exception as e: + logger.error("Recovery failed for op %s: %s", op.id, e) + await _update_push_op_status(op.id, "failed", db_session) + + await db_session.commit() diff --git a/backend/app/services/routeros_proxy.py b/backend/app/services/routeros_proxy.py new file mode 100644 index 0000000..5b92066 --- /dev/null +++ b/backend/app/services/routeros_proxy.py @@ -0,0 +1,165 @@ +"""RouterOS command proxy via NATS request-reply. + +Sends command requests to the Go poller's CmdResponder subscription +(device.cmd.{device_id}) and returns structured RouterOS API response data. + +Used by: +- Config editor API (browse menu paths, add/edit/delete entries) +- Template push service (execute rendered template commands) +""" + +import json +import logging +from typing import Any + +import nats +import nats.aio.client + +from app.config import settings + +logger = logging.getLogger(__name__) + +# Module-level NATS connection (lazy initialized) +_nc: nats.aio.client.Client | None = None + + +async def _get_nats() -> nats.aio.client.Client: + """Get or create a NATS connection for command proxy requests.""" + global _nc + if _nc is None or _nc.is_closed: + _nc = await nats.connect(settings.NATS_URL) + logger.info("RouterOS proxy NATS connection established") + return _nc + + +async def execute_command( + device_id: str, + command: str, + args: list[str] | None = None, + timeout: float = 15.0, +) -> dict[str, Any]: + """Execute a RouterOS API command on a device via the Go poller. + + Args: + device_id: UUID string of the target device. + command: Full RouterOS API path, e.g. "/ip/address/print". + args: Optional list of RouterOS API args, e.g. ["=.proplist=.id,address"]. + timeout: NATS request timeout in seconds (default 15s). + + Returns: + {"success": bool, "data": list[dict], "error": str|None} + """ + nc = await _get_nats() + request = { + "device_id": device_id, + "command": command, + "args": args or [], + } + + try: + reply = await nc.request( + f"device.cmd.{device_id}", + json.dumps(request).encode(), + timeout=timeout, + ) + return json.loads(reply.data) + except nats.errors.TimeoutError: + return { + "success": False, + "data": [], + "error": "Device command timed out — device may be offline or unreachable", + } + except Exception as exc: + logger.error("NATS request failed for device %s: %s", device_id, exc) + return {"success": False, "data": [], "error": str(exc)} + + +async def browse_menu(device_id: str, path: str) -> dict[str, Any]: + """Browse a RouterOS menu path and return all entries. + + Args: + device_id: Device UUID string. + path: RouterOS menu path, e.g. "/ip/address" or "/interface". + + Returns: + {"success": bool, "data": list[dict], "error": str|None} + """ + command = f"{path}/print" + return await execute_command(device_id, command) + + +async def add_entry( + device_id: str, path: str, properties: dict[str, str] +) -> dict[str, Any]: + """Add a new entry to a RouterOS menu path. + + Args: + device_id: Device UUID. + path: Menu path, e.g. "/ip/address". + properties: Key-value pairs for the new entry. + + Returns: + Command response dict. + """ + args = [f"={k}={v}" for k, v in properties.items()] + return await execute_command(device_id, f"{path}/add", args) + + +async def update_entry( + device_id: str, path: str, entry_id: str | None, properties: dict[str, str] +) -> dict[str, Any]: + """Update an existing entry in a RouterOS menu path. + + Args: + device_id: Device UUID. + path: Menu path. + entry_id: RouterOS .id value (e.g. "*1"). None for singleton paths. + properties: Key-value pairs to update. + + Returns: + Command response dict. + """ + id_args = [f"=.id={entry_id}"] if entry_id else [] + args = id_args + [f"={k}={v}" for k, v in properties.items()] + return await execute_command(device_id, f"{path}/set", args) + + +async def remove_entry( + device_id: str, path: str, entry_id: str +) -> dict[str, Any]: + """Remove an entry from a RouterOS menu path. + + Args: + device_id: Device UUID. + path: Menu path. + entry_id: RouterOS .id value. + + Returns: + Command response dict. + """ + return await execute_command(device_id, f"{path}/remove", [f"=.id={entry_id}"]) + + +async def execute_cli(device_id: str, cli_command: str) -> dict[str, Any]: + """Execute an arbitrary RouterOS CLI command. + + For commands that don't follow the standard /path/action pattern. + The command is sent as-is to the RouterOS API. + + Args: + device_id: Device UUID. + cli_command: Full CLI command string. + + Returns: + Command response dict. + """ + return await execute_command(device_id, cli_command) + + +async def close() -> None: + """Close the NATS connection. Called on application shutdown.""" + global _nc + if _nc and not _nc.is_closed: + await _nc.drain() + _nc = None + logger.info("RouterOS proxy NATS connection closed") diff --git a/backend/app/services/rsc_parser.py b/backend/app/services/rsc_parser.py new file mode 100644 index 0000000..1448b65 --- /dev/null +++ b/backend/app/services/rsc_parser.py @@ -0,0 +1,220 @@ +"""RouterOS RSC export parser — extracts categories, validates syntax, computes impact.""" + +import re +import logging +from typing import Any + +logger = logging.getLogger(__name__) + +HIGH_RISK_PATHS = { + "/ip address", "/ip route", "/ip firewall filter", "/ip firewall nat", + "/interface", "/interface bridge", "/interface vlan", + "/system identity", "/ip service", "/ip ssh", "/user", +} + +MANAGEMENT_PATTERNS = [ + (re.compile(r"chain=input.*dst-port=(22|8291|8728|8729|443|80)", re.I), + "Modifies firewall rules for management ports (SSH/WinBox/API/Web)"), + (re.compile(r"chain=input.*action=drop", re.I), + "Adds drop rule on input chain — may block management access"), + (re.compile(r"/ip service", re.I), + "Modifies IP services — may disable API/SSH/WinBox access"), + (re.compile(r"/user.*set.*password", re.I), + "Changes user password — may affect automated access"), +] + + +def _join_continuation_lines(text: str) -> list[str]: + """Join lines ending with \\ into single logical lines.""" + lines = text.split("\n") + joined: list[str] = [] + buf = "" + for line in lines: + stripped = line.rstrip() + if stripped.endswith("\\"): + buf += stripped[:-1].rstrip() + " " + else: + if buf: + buf += stripped + joined.append(buf) + buf = "" + else: + joined.append(stripped) + if buf: + joined.append(buf + " <>") + return joined + + +def parse_rsc(text: str) -> dict[str, Any]: + """Parse a RouterOS /export compact output. + + Returns a dict with a "categories" list, each containing: + - path: the RouterOS command path (e.g. "/ip address") + - adds: count of "add" commands + - sets: count of "set" commands + - removes: count of "remove" commands + - commands: list of command strings under this path + """ + lines = _join_continuation_lines(text) + categories: dict[str, dict] = {} + current_path: str | None = None + + for line in lines: + line = line.strip() + if not line or line.startswith("#"): + continue + + if line.startswith("/"): + # Could be just a path header, or a path followed by a command + parts = line.split(None, 1) + if len(parts) == 1: + # Pure path header like "/interface bridge" + current_path = parts[0] + else: + # Check if second part starts with a known command verb + cmd_check = parts[1].strip().split(None, 1) + if cmd_check and cmd_check[0] in ("add", "set", "remove", "print", "enable", "disable"): + current_path = parts[0] + line = parts[1].strip() + else: + # The whole line is a path (e.g. "/ip firewall filter") + current_path = line + continue + + if current_path and current_path not in categories: + categories[current_path] = { + "path": current_path, + "adds": 0, + "sets": 0, + "removes": 0, + "commands": [], + } + + if len(parts) == 1: + continue + + if current_path is None: + continue + + if current_path not in categories: + categories[current_path] = { + "path": current_path, + "adds": 0, + "sets": 0, + "removes": 0, + "commands": [], + } + + cat = categories[current_path] + cat["commands"].append(line) + + if line.startswith("add ") or line.startswith("add\t"): + cat["adds"] += 1 + elif line.startswith("set "): + cat["sets"] += 1 + elif line.startswith("remove "): + cat["removes"] += 1 + + return {"categories": list(categories.values())} + + +def validate_rsc(text: str) -> dict[str, Any]: + """Validate RSC export syntax. + + Checks for: + - Unbalanced quotes (indicates truncation or corruption) + - Trailing continuation lines (indicates truncated export) + + Returns dict with "valid" (bool) and "errors" (list of strings). + """ + errors: list[str] = [] + + # Check for unbalanced quotes across the entire file + in_quote = False + for line in text.split("\n"): + stripped = line.rstrip() + if stripped.endswith("\\"): + stripped = stripped[:-1] + # Count unescaped quotes + count = stripped.count('"') - stripped.count('\\"') + if count % 2 != 0: + in_quote = not in_quote + + if in_quote: + errors.append("Unbalanced quote detected — file may be truncated") + + # Check if file ends with a continuation backslash + lines = text.rstrip().split("\n") + if lines and lines[-1].rstrip().endswith("\\"): + errors.append("File ends with continuation line (\\) — truncated export") + + return {"valid": len(errors) == 0, "errors": errors} + + +def compute_impact( + current_parsed: dict[str, Any], + target_parsed: dict[str, Any], +) -> dict[str, Any]: + """Compare current vs target parsed RSC and compute impact analysis. + + Returns dict with: + - categories: list of per-path diffs with risk levels + - warnings: list of human-readable warning strings + - diff: summary counts (added, removed, modified) + """ + current_map = {c["path"]: c for c in current_parsed["categories"]} + target_map = {c["path"]: c for c in target_parsed["categories"]} + all_paths = sorted(set(list(current_map.keys()) + list(target_map.keys()))) + + result_categories = [] + warnings: list[str] = [] + total_added = total_removed = total_modified = 0 + + for path in all_paths: + curr = current_map.get(path, {"adds": 0, "sets": 0, "removes": 0, "commands": []}) + tgt = target_map.get(path, {"adds": 0, "sets": 0, "removes": 0, "commands": []}) + curr_cmds = set(curr.get("commands", [])) + tgt_cmds = set(tgt.get("commands", [])) + added = len(tgt_cmds - curr_cmds) + removed = len(curr_cmds - tgt_cmds) + total_added += added + total_removed += removed + + has_changes = added > 0 or removed > 0 + risk = "none" + if has_changes: + risk = "high" if path in HIGH_RISK_PATHS else "low" + result_categories.append({ + "path": path, + "adds": added, + "removes": removed, + "risk": risk, + }) + + # Check target commands against management patterns + target_text = "\n".join( + cmd for cat in target_parsed["categories"] for cmd in cat.get("commands", []) + ) + for pattern, message in MANAGEMENT_PATTERNS: + if pattern.search(target_text): + warnings.append(message) + + # Warn about removed IP addresses + if "/ip address" in current_map and "/ip address" in target_map: + curr_addrs = current_map["/ip address"].get("commands", []) + tgt_addrs = target_map["/ip address"].get("commands", []) + removed_addrs = set(curr_addrs) - set(tgt_addrs) + if removed_addrs: + warnings.append( + f"Removes {len(removed_addrs)} IP address(es) — verify none are management interfaces" + ) + + return { + "categories": result_categories, + "warnings": warnings, + "diff": { + "added": total_added, + "removed": total_removed, + "modified": total_modified, + }, + } diff --git a/backend/app/services/scanner.py b/backend/app/services/scanner.py new file mode 100644 index 0000000..ad0be3a --- /dev/null +++ b/backend/app/services/scanner.py @@ -0,0 +1,124 @@ +""" +Subnet scanner for MikroTik device discovery. + +Scans a CIDR range by attempting TCP connections to RouterOS API ports +(8728 and 8729) with configurable concurrency limits and timeouts. + +Security constraints: +- CIDR range limited to /20 or smaller (4096 IPs maximum) +- Maximum 50 concurrent connections to prevent network flooding +- 2-second timeout per connection attempt +""" + +import asyncio +import ipaddress +import socket +from typing import Optional + +from app.schemas.device import SubnetScanResult + +# Maximum concurrency for TCP probes +_MAX_CONCURRENT = 50 +# Timeout (seconds) per TCP connection attempt +_TCP_TIMEOUT = 2.0 +# RouterOS API port +_API_PORT = 8728 +# RouterOS SSL API port +_SSL_PORT = 8729 + + +async def _probe_host( + semaphore: asyncio.Semaphore, + ip_str: str, +) -> Optional[SubnetScanResult]: + """ + Probe a single IP for RouterOS API ports. + + Returns a SubnetScanResult if either port is open, None otherwise. + """ + async with semaphore: + api_open, ssl_open = await asyncio.gather( + _tcp_connect(ip_str, _API_PORT), + _tcp_connect(ip_str, _SSL_PORT), + return_exceptions=False, + ) + + if not api_open and not ssl_open: + return None + + # Attempt reverse DNS (best-effort; won't fail the scan) + hostname = await _reverse_dns(ip_str) + + return SubnetScanResult( + ip_address=ip_str, + hostname=hostname, + api_port_open=api_open, + api_ssl_port_open=ssl_open, + ) + + +async def _tcp_connect(ip: str, port: int) -> bool: + """Return True if a TCP connection to ip:port succeeds within _TCP_TIMEOUT.""" + try: + _, writer = await asyncio.wait_for( + asyncio.open_connection(ip, port), + timeout=_TCP_TIMEOUT, + ) + writer.close() + try: + await writer.wait_closed() + except Exception: + pass + return True + except Exception: + return False + + +async def _reverse_dns(ip: str) -> Optional[str]: + """Attempt a reverse DNS lookup. Returns None on failure.""" + try: + loop = asyncio.get_running_loop() + hostname, _, _ = await asyncio.wait_for( + loop.run_in_executor(None, socket.gethostbyaddr, ip), + timeout=1.5, + ) + return hostname + except Exception: + return None + + +async def scan_subnet(cidr: str) -> list[SubnetScanResult]: + """ + Scan a CIDR range for hosts with open RouterOS API ports. + + Args: + cidr: CIDR notation string, e.g. "192.168.1.0/24". + Must be /20 or smaller (validated by SubnetScanRequest). + + Returns: + List of SubnetScanResult for each host with at least one open API port. + + Raises: + ValueError: If CIDR is malformed or too large. + """ + try: + network = ipaddress.ip_network(cidr, strict=False) + except ValueError as e: + raise ValueError(f"Invalid CIDR: {e}") from e + + if network.num_addresses > 4096: + raise ValueError( + f"CIDR range too large ({network.num_addresses} addresses). " + "Maximum allowed is /20 (4096 addresses)." + ) + + # Skip network address and broadcast address for IPv4 + hosts = list(network.hosts()) if network.num_addresses > 2 else list(network) + + semaphore = asyncio.Semaphore(_MAX_CONCURRENT) + tasks = [_probe_host(semaphore, str(ip)) for ip in hosts] + + results = await asyncio.gather(*tasks, return_exceptions=False) + + # Filter out None (hosts with no open ports) + return [r for r in results if r is not None] diff --git a/backend/app/services/srp_service.py b/backend/app/services/srp_service.py new file mode 100644 index 0000000..b2efa53 --- /dev/null +++ b/backend/app/services/srp_service.py @@ -0,0 +1,113 @@ +"""SRP-6a server-side authentication service. + +Wraps the srptools library for the two-step SRP handshake. +All functions are async, using asyncio.to_thread() because +srptools operations are CPU-bound and synchronous. +""" + +import asyncio +import hashlib + +from srptools import SRPContext, SRPServerSession +from srptools.constants import PRIME_2048, PRIME_2048_GEN + +# Client uses Web Crypto SHA-256 — server must match. +# srptools defaults to SHA-1 which would cause proof mismatch. +_SRP_HASH = hashlib.sha256 + + +async def create_srp_verifier( + salt_hex: str, verifier_hex: str +) -> tuple[bytes, bytes]: + """Convert client-provided hex salt and verifier to bytes for storage. + + The client computes v = g^x mod N using 2SKD-derived SRP-x. + The server stores the verifier directly and never computes x + from the password. + + Returns: + Tuple of (salt_bytes, verifier_bytes) ready for database storage. + """ + return bytes.fromhex(salt_hex), bytes.fromhex(verifier_hex) + + +async def srp_init( + email: str, srp_verifier_hex: str +) -> tuple[str, str]: + """SRP Step 1: Generate server ephemeral (B) and private key (b). + + Args: + email: User email (SRP identity I). + srp_verifier_hex: Hex-encoded SRP verifier from database. + + Returns: + Tuple of (server_public_hex, server_private_hex). + Caller stores server_private in Redis with 60s TTL. + + Raises: + ValueError: If SRP initialization fails for any reason. + """ + def _init() -> tuple[str, str]: + context = SRPContext( + email, prime=PRIME_2048, generator=PRIME_2048_GEN, + hash_func=_SRP_HASH, + ) + server_session = SRPServerSession( + context, srp_verifier_hex + ) + return server_session.public, server_session.private + + try: + return await asyncio.to_thread(_init) + except Exception as e: + raise ValueError(f"SRP initialization failed: {e}") from e + + +async def srp_verify( + email: str, + srp_verifier_hex: str, + server_private: str, + client_public: str, + client_proof: str, + srp_salt_hex: str, +) -> tuple[bool, str | None]: + """SRP Step 2: Verify client proof M1, return server proof M2. + + Args: + email: User email (SRP identity I). + srp_verifier_hex: Hex-encoded SRP verifier from database. + server_private: Server private ephemeral from Redis session. + client_public: Hex-encoded client public ephemeral A. + client_proof: Hex-encoded client proof M1. + srp_salt_hex: Hex-encoded SRP salt. + + Returns: + Tuple of (is_valid, server_proof_hex_or_none). + If valid, server_proof is M2 for the client to verify. + """ + def _verify() -> tuple[bool, str | None]: + import logging + log = logging.getLogger("srp_debug") + context = SRPContext( + email, prime=PRIME_2048, generator=PRIME_2048_GEN, + hash_func=_SRP_HASH, + ) + server_session = SRPServerSession( + context, srp_verifier_hex, private=server_private + ) + _key, _key_proof, _key_proof_hash = server_session.process(client_public, srp_salt_hex) + # srptools verify_proof has a Python 3 bug: hexlify() returns bytes + # but client_proof is str, so bytes == str is always False. + # Compare manually with consistent types. + server_m1 = _key_proof if isinstance(_key_proof, str) else _key_proof.decode('ascii') + is_valid = client_proof.lower() == server_m1.lower() + if not is_valid: + return False, None + # Return M2 (key_proof_hash), also fixing the bytes/str issue + m2 = _key_proof_hash if isinstance(_key_proof_hash, str) else _key_proof_hash.decode('ascii') + return True, m2 + + try: + return await asyncio.to_thread(_verify) + except Exception as e: + raise ValueError(f"SRP verification failed: {e}") from e diff --git a/backend/app/services/sse_manager.py b/backend/app/services/sse_manager.py new file mode 100644 index 0000000..db241b5 --- /dev/null +++ b/backend/app/services/sse_manager.py @@ -0,0 +1,311 @@ +"""SSE Connection Manager -- bridges NATS JetStream to per-client asyncio queues. + +Each SSE client gets its own NATS connection with ephemeral consumers. +Events are tenant-filtered and placed onto an asyncio.Queue that the +SSE router drains via EventSourceResponse. +""" + +import asyncio +import json +from typing import Optional + +import nats +import structlog +from nats.js.api import ConsumerConfig, DeliverPolicy, StreamConfig + +from app.config import settings + +logger = structlog.get_logger(__name__) + +# Subjects per stream for SSE subscriptions +# Note: config.push.* subjects live in DEVICE_EVENTS (created by Go poller) +_DEVICE_EVENT_SUBJECTS = [ + "device.status.>", + "device.metrics.>", + "config.push.rollback.>", + "config.push.alert.>", +] +_ALERT_EVENT_SUBJECTS = ["alert.fired.>", "alert.resolved.>"] +_OPERATION_EVENT_SUBJECTS = ["firmware.progress.>"] + + +def _map_subject_to_event_type(subject: str) -> str: + """Map a NATS subject prefix to an SSE event type string.""" + if subject.startswith("device.status."): + return "device_status" + if subject.startswith("device.metrics."): + return "metric_update" + if subject.startswith("alert.fired."): + return "alert_fired" + if subject.startswith("alert.resolved."): + return "alert_resolved" + if subject.startswith("config.push."): + return "config_push" + if subject.startswith("firmware.progress."): + return "firmware_progress" + return "unknown" + + +async def ensure_sse_streams() -> None: + """Create ALERT_EVENTS and OPERATION_EVENTS NATS streams if they don't exist. + + Called once during app startup so the streams are ready before any + SSE connection or event publisher needs them. Idempotent -- uses + add_stream which acts as create-or-update. + """ + nc = None + try: + nc = await nats.connect(settings.NATS_URL) + js = nc.jetstream() + + await js.add_stream( + StreamConfig( + name="ALERT_EVENTS", + subjects=["alert.fired.>", "alert.resolved.>"], + max_age=3600, # 1 hour retention + ) + ) + logger.info("nats.stream.ensured", stream="ALERT_EVENTS") + + await js.add_stream( + StreamConfig( + name="OPERATION_EVENTS", + subjects=["firmware.progress.>"], + max_age=3600, # 1 hour retention + ) + ) + logger.info("nats.stream.ensured", stream="OPERATION_EVENTS") + + except Exception as exc: + logger.warning("sse.streams.ensure_failed", error=str(exc)) + raise + finally: + if nc: + try: + await nc.close() + except Exception: + pass + + +class SSEConnectionManager: + """Manages a single SSE client's lifecycle: NATS connection, subscriptions, and event queue.""" + + def __init__(self) -> None: + self._nc: Optional[nats.aio.client.Client] = None + self._subscriptions: list = [] + self._queue: Optional[asyncio.Queue] = None + self._tenant_id: Optional[str] = None + self._connection_id: Optional[str] = None + + async def connect( + self, + connection_id: str, + tenant_id: Optional[str], + last_event_id: Optional[str] = None, + ) -> asyncio.Queue: + """Set up NATS subscriptions and return an asyncio.Queue for SSE events. + + Args: + connection_id: Unique identifier for this SSE connection. + tenant_id: Tenant UUID string to filter events. None for super_admin + (receives events from all tenants). + last_event_id: NATS stream sequence number from the Last-Event-ID header. + If provided, replay starts from sequence + 1. + + Returns: + asyncio.Queue that the SSE generator should drain. + """ + self._connection_id = connection_id + self._tenant_id = tenant_id + self._queue = asyncio.Queue(maxsize=256) + + self._nc = await nats.connect( + settings.NATS_URL, + max_reconnect_attempts=5, + reconnect_time_wait=2, + ) + js = self._nc.jetstream() + + logger.info( + "sse.connecting", + connection_id=connection_id, + tenant_id=tenant_id, + last_event_id=last_event_id, + ) + + # Build consumer config for replay support + if last_event_id is not None: + try: + start_seq = int(last_event_id) + 1 + consumer_cfg = ConsumerConfig(deliver_policy=DeliverPolicy.BY_START_SEQUENCE, opt_start_seq=start_seq) + except (ValueError, TypeError): + consumer_cfg = ConsumerConfig(deliver_policy=DeliverPolicy.NEW) + else: + consumer_cfg = ConsumerConfig(deliver_policy=DeliverPolicy.NEW) + + # Subscribe to device events (DEVICE_EVENTS stream -- created by Go poller) + for subject in _DEVICE_EVENT_SUBJECTS: + try: + sub = await js.subscribe( + subject, + stream="DEVICE_EVENTS", + config=consumer_cfg, + ) + self._subscriptions.append(sub) + except Exception as exc: + logger.warning( + "sse.subscribe_failed", + subject=subject, + stream="DEVICE_EVENTS", + error=str(exc), + ) + + # Subscribe to alert events (ALERT_EVENTS stream) + # Lazily create the stream if it doesn't exist yet (startup race) + for subject in _ALERT_EVENT_SUBJECTS: + try: + sub = await js.subscribe( + subject, + stream="ALERT_EVENTS", + config=consumer_cfg, + ) + self._subscriptions.append(sub) + except Exception as exc: + if "stream not found" in str(exc): + try: + await js.add_stream(StreamConfig( + name="ALERT_EVENTS", + subjects=_ALERT_EVENT_SUBJECTS, + max_age=3600, + )) + sub = await js.subscribe(subject, stream="ALERT_EVENTS", config=consumer_cfg) + self._subscriptions.append(sub) + logger.info("sse.stream_created_lazily", stream="ALERT_EVENTS") + except Exception as retry_exc: + logger.warning("sse.subscribe_failed", subject=subject, stream="ALERT_EVENTS", error=str(retry_exc)) + else: + logger.warning("sse.subscribe_failed", subject=subject, stream="ALERT_EVENTS", error=str(exc)) + + # Subscribe to operation events (OPERATION_EVENTS stream) + for subject in _OPERATION_EVENT_SUBJECTS: + try: + sub = await js.subscribe( + subject, + stream="OPERATION_EVENTS", + config=consumer_cfg, + ) + self._subscriptions.append(sub) + except Exception as exc: + if "stream not found" in str(exc): + try: + await js.add_stream(StreamConfig( + name="OPERATION_EVENTS", + subjects=_OPERATION_EVENT_SUBJECTS, + max_age=3600, + )) + sub = await js.subscribe(subject, stream="OPERATION_EVENTS", config=consumer_cfg) + self._subscriptions.append(sub) + logger.info("sse.stream_created_lazily", stream="OPERATION_EVENTS") + except Exception as retry_exc: + logger.warning("sse.subscribe_failed", subject=subject, stream="OPERATION_EVENTS", error=str(retry_exc)) + else: + logger.warning("sse.subscribe_failed", subject=subject, stream="OPERATION_EVENTS", error=str(exc)) + + # Start background task to pull messages from subscriptions into the queue + asyncio.create_task(self._pump_messages()) + + logger.info( + "sse.connected", + connection_id=connection_id, + subscription_count=len(self._subscriptions), + ) + + return self._queue + + async def _pump_messages(self) -> None: + """Read messages from all NATS push subscriptions and push them onto the asyncio queue. + + Uses next_msg with a short timeout so we can interleave across + subscriptions without blocking. Runs until the NATS connection is closed + or drained. + """ + while self._nc and self._nc.is_connected: + for sub in self._subscriptions: + try: + msg = await sub.next_msg(timeout=0.5) + await self._handle_message(msg) + except nats.errors.TimeoutError: + # No messages available on this subscription -- move on + continue + except Exception as exc: + if self._nc and self._nc.is_connected: + logger.warning( + "sse.pump_error", + connection_id=self._connection_id, + error=str(exc), + ) + break + # Brief yield to avoid tight-looping + await asyncio.sleep(0.1) + + async def _handle_message(self, msg) -> None: + """Parse a NATS message, apply tenant filter, and enqueue as SSE event.""" + try: + data = json.loads(msg.data) + except (json.JSONDecodeError, UnicodeDecodeError): + await msg.ack() + return + + # Tenant filtering: skip messages not matching this connection's tenant + if self._tenant_id is not None: + msg_tenant = data.get("tenant_id", "") + if str(msg_tenant) != self._tenant_id: + await msg.ack() + return + + event_type = _map_subject_to_event_type(msg.subject) + + # Extract NATS stream sequence for Last-Event-ID support + seq_id = "0" + if msg.metadata and msg.metadata.sequence: + seq_id = str(msg.metadata.sequence.stream) + + sse_event = { + "event": event_type, + "data": json.dumps(data), + "id": seq_id, + } + + try: + self._queue.put_nowait(sse_event) + except asyncio.QueueFull: + logger.warning( + "sse.queue_full", + connection_id=self._connection_id, + dropped_event=event_type, + ) + + await msg.ack() + + async def disconnect(self) -> None: + """Unsubscribe from all NATS subscriptions and close the connection.""" + logger.info("sse.disconnecting", connection_id=self._connection_id) + + for sub in self._subscriptions: + try: + await sub.unsubscribe() + except Exception: + pass + self._subscriptions.clear() + + if self._nc: + try: + await self._nc.drain() + except Exception: + try: + await self._nc.close() + except Exception: + pass + self._nc = None + + logger.info("sse.disconnected", connection_id=self._connection_id) diff --git a/backend/app/services/template_service.py b/backend/app/services/template_service.py new file mode 100644 index 0000000..8032f69 --- /dev/null +++ b/backend/app/services/template_service.py @@ -0,0 +1,480 @@ +"""Config template service: Jinja2 rendering, variable extraction, and multi-device push. + +Provides: +- extract_variables: Parse template content to find all undeclared Jinja2 variables +- render_template: Render a template with device context and custom variables +- validate_variable: Type-check a variable value against its declared type +- push_to_devices: Sequential multi-device push with pause-on-failure +- push_single_device: Two-phase panic-revert push for a single device + +The push logic follows the same two-phase pattern as restore_service but uses +separate scheduler and file names to avoid conflicts with restore operations. +""" + +import asyncio +import io +import ipaddress +import json +import logging +import uuid +from datetime import datetime, timezone + +import asyncssh +from jinja2 import meta +from jinja2.sandbox import SandboxedEnvironment +from sqlalchemy import select, text + +from app.config import settings +from app.database import AdminAsyncSessionLocal +from app.models.config_template import TemplatePushJob +from app.models.device import Device + +logger = logging.getLogger(__name__) + +# Sandboxed Jinja2 environment prevents template injection +_env = SandboxedEnvironment() + +# Names used on the RouterOS device during template push +_PANIC_REVERT_SCHEDULER = "mikrotik-portal-template-revert" +_PRE_PUSH_BACKUP = "portal-template-pre-push" +_TEMPLATE_RSC = "portal-template.rsc" + + +# --------------------------------------------------------------------------- +# Variable extraction & rendering +# --------------------------------------------------------------------------- + + +def extract_variables(template_content: str) -> list[str]: + """Extract all undeclared variables from a Jinja2 template. + + Returns a sorted list of variable names, excluding the built-in 'device' + variable which is auto-populated at render time. + """ + ast = _env.parse(template_content) + all_vars = meta.find_undeclared_variables(ast) + # 'device' is a built-in variable, not user-provided + return sorted(v for v in all_vars if v != "device") + + +def render_template( + template_content: str, + device: dict, + custom_variables: dict[str, str], +) -> str: + """Render a Jinja2 template with device context and custom variables. + + The 'device' variable is auto-populated from the device dict. + Custom variables are user-provided at push time. + + Uses SandboxedEnvironment to prevent template injection. + + Args: + template_content: Jinja2 template string. + device: Device info dict with keys: hostname, ip_address, model. + custom_variables: User-supplied variable values. + + Returns: + Rendered template string. + + Raises: + jinja2.TemplateSyntaxError: If template has syntax errors. + jinja2.UndefinedError: If required variables are missing. + """ + context = { + "device": { + "hostname": device.get("hostname", ""), + "ip": device.get("ip_address", ""), + "model": device.get("model", ""), + }, + **custom_variables, + } + tpl = _env.from_string(template_content) + return tpl.render(context) + + +def validate_variable(name: str, value: str, var_type: str) -> str | None: + """Validate a variable value against its declared type. + + Returns None on success, or an error message string on failure. + """ + if var_type == "string": + return None # any string is valid + elif var_type == "ip": + try: + ipaddress.ip_address(value) + return None + except ValueError: + return f"'{name}' must be a valid IP address" + elif var_type == "subnet": + try: + ipaddress.ip_network(value, strict=False) + return None + except ValueError: + return f"'{name}' must be a valid subnet (e.g., 192.168.1.0/24)" + elif var_type == "integer": + try: + int(value) + return None + except ValueError: + return f"'{name}' must be an integer" + elif var_type == "boolean": + if value.lower() in ("true", "false", "yes", "no", "1", "0"): + return None + return f"'{name}' must be a boolean (true/false)" + return None # unknown type, allow + + +# --------------------------------------------------------------------------- +# Multi-device push orchestration +# --------------------------------------------------------------------------- + + +async def push_to_devices(rollout_id: str) -> dict: + """Execute sequential template push for all jobs in a rollout. + + Processes devices one at a time. If any device fails or reverts, + remaining jobs stay pending (paused). Follows the same pattern as + firmware upgrade_service.start_mass_upgrade. + + This runs as a background task (asyncio.create_task) after the + API creates the push jobs and returns the rollout_id. + """ + try: + return await _run_push_rollout(rollout_id) + except Exception as exc: + logger.error( + "Uncaught exception in template push rollout %s: %s", + rollout_id, exc, exc_info=True, + ) + return {"completed": 0, "failed": 1, "pending": 0} + + +async def _run_push_rollout(rollout_id: str) -> dict: + """Internal rollout implementation.""" + # Load all jobs for this rollout + async with AdminAsyncSessionLocal() as session: + result = await session.execute( + text(""" + SELECT j.id::text, j.status, d.hostname + FROM template_push_jobs j + JOIN devices d ON d.id = j.device_id + WHERE j.rollout_id = CAST(:rollout_id AS uuid) + ORDER BY j.created_at ASC + """), + {"rollout_id": rollout_id}, + ) + jobs = result.fetchall() + + if not jobs: + logger.warning("No jobs found for template push rollout %s", rollout_id) + return {"completed": 0, "failed": 0, "pending": 0} + + completed = 0 + failed = False + + for job_id, current_status, hostname in jobs: + if current_status != "pending": + if current_status == "committed": + completed += 1 + continue + + logger.info( + "Template push rollout %s: pushing to device %s (job %s)", + rollout_id, hostname, job_id, + ) + + await push_single_device(job_id) + + # Check resulting status + async with AdminAsyncSessionLocal() as session: + result = await session.execute( + text("SELECT status FROM template_push_jobs WHERE id = CAST(:id AS uuid)"), + {"id": job_id}, + ) + row = result.fetchone() + + if row and row[0] == "committed": + completed += 1 + elif row and row[0] in ("failed", "reverted"): + failed = True + logger.error( + "Template push rollout %s paused: device %s %s", + rollout_id, hostname, row[0], + ) + break + + # Count remaining pending jobs + remaining = sum(1 for _, s, _ in jobs if s == "pending") - completed - (1 if failed else 0) + + return { + "completed": completed, + "failed": 1 if failed else 0, + "pending": max(0, remaining), + } + + +async def push_single_device(job_id: str) -> None: + """Push rendered template content to a single device. + + Implements the two-phase panic-revert pattern: + 1. Pre-backup (mandatory) + 2. Install panic-revert scheduler on device + 3. Write template content as RSC file via SFTP + 4. /import the RSC file + 5. Wait 60s for config to settle + 6. Reachability check -> committed or reverted + + All errors are caught and recorded in the job row. + """ + try: + await _run_single_push(job_id) + except Exception as exc: + logger.error( + "Uncaught exception in template push job %s: %s", + job_id, exc, exc_info=True, + ) + await _update_job(job_id, status="failed", error_message=f"Unexpected error: {exc}") + + +async def _run_single_push(job_id: str) -> None: + """Internal single-device push implementation.""" + + # Step 1: Load job and device info + async with AdminAsyncSessionLocal() as session: + result = await session.execute( + text(""" + SELECT j.id, j.device_id, j.tenant_id, j.rendered_content, + d.ip_address, d.hostname, d.encrypted_credentials, + d.encrypted_credentials_transit + FROM template_push_jobs j + JOIN devices d ON d.id = j.device_id + WHERE j.id = CAST(:job_id AS uuid) + """), + {"job_id": job_id}, + ) + row = result.fetchone() + + if not row: + logger.error("Template push job %s not found", job_id) + return + + ( + _, device_id, tenant_id, rendered_content, + ip_address, hostname, encrypted_credentials, + encrypted_credentials_transit, + ) = row + + device_id = str(device_id) + tenant_id = str(tenant_id) + hostname = hostname or ip_address + + # Step 2: Update status to pushing + await _update_job(job_id, status="pushing", started_at=datetime.now(timezone.utc)) + + # Step 3: Decrypt credentials (dual-read: Transit preferred, legacy fallback) + if not encrypted_credentials_transit and not encrypted_credentials: + await _update_job(job_id, status="failed", error_message="Device has no stored credentials") + return + + try: + from app.services.crypto import decrypt_credentials_hybrid + key = settings.get_encryption_key_bytes() + creds_json = await decrypt_credentials_hybrid( + encrypted_credentials_transit, encrypted_credentials, tenant_id, key, + ) + creds = json.loads(creds_json) + ssh_username = creds.get("username", "") + ssh_password = creds.get("password", "") + except Exception as cred_err: + await _update_job( + job_id, status="failed", + error_message=f"Failed to decrypt credentials: {cred_err}", + ) + return + + # Step 4: Mandatory pre-push backup + logger.info("Running mandatory pre-push backup for device %s (%s)", hostname, ip_address) + try: + from app.services import backup_service + backup_result = await backup_service.run_backup( + device_id=device_id, + tenant_id=tenant_id, + trigger_type="pre-template-push", + ) + backup_sha = backup_result["commit_sha"] + await _update_job(job_id, pre_push_backup_sha=backup_sha) + logger.info("Pre-push backup complete: %s", backup_sha[:8]) + except Exception as backup_err: + logger.error("Pre-push backup failed for %s: %s", hostname, backup_err) + await _update_job( + job_id, status="failed", + error_message=f"Pre-push backup failed: {backup_err}", + ) + return + + # Step 5: SSH to device - install panic-revert, push config + logger.info( + "Pushing template to device %s (%s): installing panic-revert and uploading config", + hostname, ip_address, + ) + + try: + async with asyncssh.connect( + ip_address, + port=22, + username=ssh_username, + password=ssh_password, + known_hosts=None, + connect_timeout=30, + ) as conn: + # 5a: Create binary backup on device as revert point + await conn.run( + f"/system backup save name={_PRE_PUSH_BACKUP} dont-encrypt=yes", + check=True, + ) + logger.debug("Pre-push binary backup saved on device as %s.backup", _PRE_PUSH_BACKUP) + + # 5b: Install panic-revert RouterOS scheduler + await conn.run( + f"/system scheduler add " + f'name="{_PANIC_REVERT_SCHEDULER}" ' + f"interval=90s " + f'on-event=":delay 0; /system backup load name={_PRE_PUSH_BACKUP}" ' + f"start-time=startup", + check=True, + ) + logger.debug("Panic-revert scheduler installed on device") + + # 5c: Upload rendered template as RSC file via SFTP + async with conn.start_sftp_client() as sftp: + async with sftp.open(_TEMPLATE_RSC, "wb") as f: + await f.write(rendered_content.encode("utf-8")) + logger.debug("Uploaded %s to device flash", _TEMPLATE_RSC) + + # 5d: /import the config file + import_result = await conn.run( + f"/import file={_TEMPLATE_RSC}", + check=False, + ) + logger.info( + "Template import result for device %s: exit_status=%s stdout=%r", + hostname, import_result.exit_status, + (import_result.stdout or "")[:200], + ) + + # 5e: Clean up the uploaded RSC file (best-effort) + try: + await conn.run(f"/file remove {_TEMPLATE_RSC}", check=True) + except Exception as cleanup_err: + logger.warning( + "Failed to clean up %s from device %s: %s", + _TEMPLATE_RSC, ip_address, cleanup_err, + ) + + except Exception as push_err: + logger.error( + "SSH push phase failed for device %s (%s): %s", + hostname, ip_address, push_err, + ) + await _update_job( + job_id, status="failed", + error_message=f"Config push failed during SSH phase: {push_err}", + ) + return + + # Step 6: Wait 60s for config to settle + logger.info("Template pushed to device %s - waiting 60s for config to settle", hostname) + await asyncio.sleep(60) + + # Step 7: Reachability check + reachable = await _check_reachability(ip_address, ssh_username, ssh_password) + + if reachable: + # Step 8a: Device is reachable - remove panic-revert scheduler + cleanup + logger.info("Device %s (%s) is reachable after push - committing", hostname, ip_address) + try: + async with asyncssh.connect( + ip_address, port=22, + username=ssh_username, password=ssh_password, + known_hosts=None, connect_timeout=30, + ) as conn: + await conn.run( + f'/system scheduler remove "{_PANIC_REVERT_SCHEDULER}"', + check=False, + ) + await conn.run( + f"/file remove {_PRE_PUSH_BACKUP}.backup", + check=False, + ) + except Exception as cleanup_err: + logger.warning( + "Failed to clean up panic-revert scheduler/backup on device %s: %s", + hostname, cleanup_err, + ) + + await _update_job( + job_id, status="committed", + completed_at=datetime.now(timezone.utc), + ) + else: + # Step 8b: Device unreachable - RouterOS is auto-reverting + logger.warning( + "Device %s (%s) is unreachable after push - panic-revert scheduler " + "will auto-revert to %s.backup", + hostname, ip_address, _PRE_PUSH_BACKUP, + ) + await _update_job( + job_id, status="reverted", + error_message="Device unreachable after push; auto-reverted via panic-revert scheduler", + completed_at=datetime.now(timezone.utc), + ) + + +# --------------------------------------------------------------------------- +# Internal helpers +# --------------------------------------------------------------------------- + + +async def _check_reachability(ip: str, username: str, password: str) -> bool: + """Check if a RouterOS device is reachable via SSH.""" + try: + async with asyncssh.connect( + ip, port=22, + username=username, password=password, + known_hosts=None, connect_timeout=30, + ) as conn: + result = await conn.run("/system identity print", check=True) + logger.debug("Reachability check OK for %s: %r", ip, result.stdout[:50]) + return True + except Exception as exc: + logger.info("Device %s unreachable after push: %s", ip, exc) + return False + + +async def _update_job(job_id: str, **kwargs) -> None: + """Update TemplatePushJob fields via raw SQL (background task, no RLS).""" + sets = [] + params: dict = {"job_id": job_id} + + for key, value in kwargs.items(): + param_name = f"v_{key}" + if value is None and key in ("error_message", "started_at", "completed_at", "pre_push_backup_sha"): + sets.append(f"{key} = NULL") + else: + sets.append(f"{key} = :{param_name}") + params[param_name] = value + + if not sets: + return + + async with AdminAsyncSessionLocal() as session: + await session.execute( + text(f""" + UPDATE template_push_jobs + SET {', '.join(sets)} + WHERE id = CAST(:job_id AS uuid) + """), + params, + ) + await session.commit() diff --git a/backend/app/services/upgrade_service.py b/backend/app/services/upgrade_service.py new file mode 100644 index 0000000..ead083b --- /dev/null +++ b/backend/app/services/upgrade_service.py @@ -0,0 +1,564 @@ +"""Firmware upgrade orchestration service. + +Handles single-device and mass firmware upgrades with: +- Mandatory pre-upgrade config backup +- NPK download and SFTP upload to device +- Reboot trigger and reconnect polling +- Post-upgrade version verification +- Sequential mass rollout with pause-on-failure +- Scheduled upgrades via APScheduler DateTrigger + +All DB operations use AdminAsyncSessionLocal to bypass RLS since upgrade +jobs may span multiple tenants and run in background asyncio tasks. +""" + +import asyncio +import io +import json +import logging +from datetime import datetime, timezone +from pathlib import Path + +import asyncssh +from sqlalchemy import text + +from app.config import settings +from app.database import AdminAsyncSessionLocal +from app.services.event_publisher import publish_event + +logger = logging.getLogger(__name__) + +# Maximum time to wait for a device to reconnect after reboot (seconds) +_RECONNECT_TIMEOUT = 300 # 5 minutes +_RECONNECT_POLL_INTERVAL = 15 # seconds +_INITIAL_WAIT = 60 # Wait before first reconnect attempt (boot cycle) + + +async def start_upgrade(job_id: str) -> None: + """Execute a single device firmware upgrade. + + Lifecycle: pending -> downloading -> uploading -> rebooting -> verifying -> completed/failed + + This function is designed to run as a background asyncio.create_task or + APScheduler job. It never raises — all errors are caught and recorded + in the FirmwareUpgradeJob row. + """ + try: + await _run_upgrade(job_id) + except Exception as exc: + logger.error("Uncaught exception in firmware upgrade %s: %s", job_id, exc, exc_info=True) + await _update_job(job_id, status="failed", error_message=f"Unexpected error: {exc}") + + +async def _publish_upgrade_progress( + tenant_id: str, + device_id: str, + job_id: str, + stage: str, + target_version: str, + message: str, + error: str | None = None, +) -> None: + """Publish firmware upgrade progress event to NATS (fire-and-forget).""" + payload = { + "event_type": "firmware_progress", + "tenant_id": tenant_id, + "device_id": device_id, + "job_id": job_id, + "stage": stage, + "target_version": target_version, + "message": message, + "timestamp": datetime.now(timezone.utc).isoformat(), + } + if error: + payload["error"] = error + await publish_event(f"firmware.progress.{tenant_id}.{device_id}", payload) + + +async def _run_upgrade(job_id: str) -> None: + """Internal upgrade implementation.""" + + # Step 1: Load job + async with AdminAsyncSessionLocal() as session: + result = await session.execute( + text(""" + SELECT j.id, j.device_id, j.tenant_id, j.target_version, + j.architecture, j.channel, j.status, j.confirmed_major_upgrade, + d.ip_address, d.hostname, d.encrypted_credentials, + d.routeros_version, d.encrypted_credentials_transit + FROM firmware_upgrade_jobs j + JOIN devices d ON d.id = j.device_id + WHERE j.id = CAST(:job_id AS uuid) + """), + {"job_id": job_id}, + ) + row = result.fetchone() + + if not row: + logger.error("Upgrade job %s not found", job_id) + return + + ( + _, device_id, tenant_id, target_version, + architecture, channel, status, confirmed_major, + ip_address, hostname, encrypted_credentials, + current_version, encrypted_credentials_transit, + ) = row + + device_id = str(device_id) + tenant_id = str(tenant_id) + hostname = hostname or ip_address + + # Skip if already running or completed + if status not in ("pending", "scheduled"): + logger.info("Upgrade job %s already in status %s — skipping", job_id, status) + return + + logger.info( + "Starting firmware upgrade for %s (%s): %s -> %s", + hostname, ip_address, current_version, target_version, + ) + + # Step 2: Update status to downloading + await _update_job(job_id, status="downloading", started_at=datetime.now(timezone.utc)) + await _publish_upgrade_progress(tenant_id, device_id, job_id, "downloading", target_version, f"Downloading firmware {target_version} for {hostname}") + + # Step 3: Check major version upgrade confirmation + if current_version and target_version: + current_major = current_version.split(".")[0] if current_version else "" + target_major = target_version.split(".")[0] + if current_major != target_major and not confirmed_major: + await _update_job( + job_id, + status="failed", + error_message="Major version upgrade requires explicit confirmation", + ) + await _publish_upgrade_progress(tenant_id, device_id, job_id, "failed", target_version, f"Major version upgrade requires explicit confirmation for {hostname}", error="Major version upgrade requires explicit confirmation") + return + + # Step 4: Mandatory config backup + logger.info("Running mandatory pre-upgrade backup for %s", hostname) + try: + from app.services import backup_service + backup_result = await backup_service.run_backup( + device_id=device_id, + tenant_id=tenant_id, + trigger_type="pre-upgrade", + ) + backup_sha = backup_result["commit_sha"] + await _update_job(job_id, pre_upgrade_backup_sha=backup_sha) + logger.info("Pre-upgrade backup complete: %s", backup_sha[:8]) + except Exception as backup_err: + logger.error("Pre-upgrade backup failed for %s: %s", hostname, backup_err) + await _update_job( + job_id, + status="failed", + error_message=f"Pre-upgrade backup failed: {backup_err}", + ) + await _publish_upgrade_progress(tenant_id, device_id, job_id, "failed", target_version, f"Pre-upgrade backup failed for {hostname}", error=str(backup_err)) + return + + # Step 5: Download NPK + logger.info("Downloading firmware %s for %s/%s", target_version, architecture, channel) + try: + from app.services.firmware_service import download_firmware + npk_path = await download_firmware(architecture, channel, target_version) + logger.info("Firmware cached at %s", npk_path) + except Exception as dl_err: + logger.error("Firmware download failed: %s", dl_err) + await _update_job( + job_id, + status="failed", + error_message=f"Firmware download failed: {dl_err}", + ) + await _publish_upgrade_progress(tenant_id, device_id, job_id, "failed", target_version, f"Firmware download failed for {hostname}", error=str(dl_err)) + return + + # Step 6: Upload NPK to device via SFTP + await _update_job(job_id, status="uploading") + await _publish_upgrade_progress(tenant_id, device_id, job_id, "uploading", target_version, f"Uploading firmware to {hostname}") + + # Decrypt device credentials (dual-read: Transit preferred, legacy fallback) + if not encrypted_credentials_transit and not encrypted_credentials: + await _update_job(job_id, status="failed", error_message="Device has no stored credentials") + await _publish_upgrade_progress(tenant_id, device_id, job_id, "failed", target_version, f"No stored credentials for {hostname}", error="Device has no stored credentials") + return + + try: + from app.services.crypto import decrypt_credentials_hybrid + key = settings.get_encryption_key_bytes() + creds_json = await decrypt_credentials_hybrid( + encrypted_credentials_transit, encrypted_credentials, tenant_id, key, + ) + creds = json.loads(creds_json) + ssh_username = creds.get("username", "") + ssh_password = creds.get("password", "") + except Exception as cred_err: + await _update_job( + job_id, + status="failed", + error_message=f"Failed to decrypt credentials: {cred_err}", + ) + await _publish_upgrade_progress(tenant_id, device_id, job_id, "failed", target_version, f"Failed to decrypt credentials for {hostname}", error=str(cred_err)) + return + + try: + npk_data = Path(npk_path).read_bytes() + npk_filename = Path(npk_path).name + + async with asyncssh.connect( + ip_address, + port=22, + username=ssh_username, + password=ssh_password, + known_hosts=None, + connect_timeout=30, + ) as conn: + async with conn.start_sftp_client() as sftp: + async with sftp.open(f"/{npk_filename}", "wb") as f: + await f.write(npk_data) + logger.info("Uploaded %s to %s", npk_filename, hostname) + except Exception as upload_err: + logger.error("NPK upload failed for %s: %s", hostname, upload_err) + await _update_job( + job_id, + status="failed", + error_message=f"NPK upload failed: {upload_err}", + ) + await _publish_upgrade_progress(tenant_id, device_id, job_id, "failed", target_version, f"NPK upload failed for {hostname}", error=str(upload_err)) + return + + # Step 7: Trigger reboot + await _update_job(job_id, status="rebooting") + await _publish_upgrade_progress(tenant_id, device_id, job_id, "rebooting", target_version, f"Rebooting {hostname} for firmware install") + try: + async with asyncssh.connect( + ip_address, + port=22, + username=ssh_username, + password=ssh_password, + known_hosts=None, + connect_timeout=30, + ) as conn: + # RouterOS will install NPK on boot + await conn.run("/system reboot", check=False) + logger.info("Reboot command sent to %s", hostname) + except Exception as reboot_err: + # Device may drop connection during reboot — this is expected + logger.info("Device %s dropped connection after reboot command (expected): %s", hostname, reboot_err) + + # Step 8: Wait for reconnect + logger.info("Waiting %ds before polling %s for reconnect", _INITIAL_WAIT, hostname) + await asyncio.sleep(_INITIAL_WAIT) + + reconnected = False + elapsed = 0 + while elapsed < _RECONNECT_TIMEOUT: + if await _check_ssh_reachable(ip_address, ssh_username, ssh_password): + reconnected = True + break + await asyncio.sleep(_RECONNECT_POLL_INTERVAL) + elapsed += _RECONNECT_POLL_INTERVAL + + if not reconnected: + logger.error("Device %s did not reconnect within %ds", hostname, _RECONNECT_TIMEOUT) + await _update_job( + job_id, + status="failed", + error_message=f"Device did not reconnect within {_RECONNECT_TIMEOUT // 60} minutes after reboot", + ) + await _publish_upgrade_progress(tenant_id, device_id, job_id, "failed", target_version, f"Device {hostname} did not reconnect within {_RECONNECT_TIMEOUT // 60} minutes", error="Reconnect timeout") + return + + # Step 9: Verify upgrade + await _update_job(job_id, status="verifying") + await _publish_upgrade_progress(tenant_id, device_id, job_id, "verifying", target_version, f"Verifying firmware version on {hostname}") + try: + actual_version = await _get_device_version(ip_address, ssh_username, ssh_password) + if actual_version and target_version in actual_version: + logger.info( + "Firmware upgrade verified for %s: %s", + hostname, actual_version, + ) + await _update_job( + job_id, + status="completed", + completed_at=datetime.now(timezone.utc), + ) + await _publish_upgrade_progress(tenant_id, device_id, job_id, "completed", target_version, f"Firmware upgrade to {target_version} completed on {hostname}") + else: + logger.error( + "Version mismatch for %s: expected %s, got %s", + hostname, target_version, actual_version, + ) + await _update_job( + job_id, + status="failed", + error_message=f"Expected {target_version} but got {actual_version}", + ) + await _publish_upgrade_progress(tenant_id, device_id, job_id, "failed", target_version, f"Version mismatch on {hostname}: expected {target_version}, got {actual_version}", error=f"Expected {target_version} but got {actual_version}") + except Exception as verify_err: + logger.error("Post-upgrade verification failed for %s: %s", hostname, verify_err) + await _update_job( + job_id, + status="failed", + error_message=f"Post-upgrade verification failed: {verify_err}", + ) + await _publish_upgrade_progress(tenant_id, device_id, job_id, "failed", target_version, f"Post-upgrade verification failed for {hostname}", error=str(verify_err)) + + +async def start_mass_upgrade(rollout_group_id: str) -> dict: + """Execute a sequential mass firmware upgrade. + + Processes upgrade jobs one at a time. If any device fails, + all remaining jobs in the group are paused. + + Returns summary dict with completed/failed/paused counts. + """ + async with AdminAsyncSessionLocal() as session: + result = await session.execute( + text(""" + SELECT j.id, j.status, d.hostname + FROM firmware_upgrade_jobs j + JOIN devices d ON d.id = j.device_id + WHERE j.rollout_group_id = CAST(:group_id AS uuid) + ORDER BY j.created_at ASC + """), + {"group_id": rollout_group_id}, + ) + jobs = result.fetchall() + + if not jobs: + logger.warning("No jobs found for rollout group %s", rollout_group_id) + return {"completed": 0, "failed": 0, "paused": 0} + + completed = 0 + failed_device = None + + for job_id, current_status, hostname in jobs: + job_id_str = str(job_id) + + # Only process pending/scheduled jobs + if current_status not in ("pending", "scheduled"): + if current_status == "completed": + completed += 1 + continue + + logger.info("Mass rollout: upgrading device %s (job %s)", hostname, job_id_str) + await start_upgrade(job_id_str) + + # Check if it completed or failed + async with AdminAsyncSessionLocal() as session: + result = await session.execute( + text("SELECT status FROM firmware_upgrade_jobs WHERE id = CAST(:id AS uuid)"), + {"id": job_id_str}, + ) + row = result.fetchone() + + if row and row[0] == "completed": + completed += 1 + elif row and row[0] == "failed": + failed_device = hostname + logger.error("Mass rollout paused: %s failed", hostname) + break + + # Pause remaining jobs if one failed + paused = 0 + if failed_device: + async with AdminAsyncSessionLocal() as session: + result = await session.execute( + text(""" + UPDATE firmware_upgrade_jobs + SET status = 'paused' + WHERE rollout_group_id = CAST(:group_id AS uuid) + AND status IN ('pending', 'scheduled') + RETURNING id + """), + {"group_id": rollout_group_id}, + ) + paused = len(result.fetchall()) + await session.commit() + + return { + "completed": completed, + "failed": 1 if failed_device else 0, + "failed_device": failed_device, + "paused": paused, + } + + +def schedule_upgrade(job_id: str, scheduled_at: datetime) -> None: + """Schedule a firmware upgrade for future execution via APScheduler.""" + from app.services.backup_scheduler import backup_scheduler + + backup_scheduler.add_job( + start_upgrade, + trigger="date", + run_date=scheduled_at, + args=[job_id], + id=f"fw_upgrade_{job_id}", + name=f"Firmware upgrade {job_id}", + max_instances=1, + replace_existing=True, + ) + logger.info("Scheduled firmware upgrade %s for %s", job_id, scheduled_at) + + +def schedule_mass_upgrade(rollout_group_id: str, scheduled_at: datetime) -> None: + """Schedule a mass firmware upgrade for future execution.""" + from app.services.backup_scheduler import backup_scheduler + + backup_scheduler.add_job( + start_mass_upgrade, + trigger="date", + run_date=scheduled_at, + args=[rollout_group_id], + id=f"fw_mass_upgrade_{rollout_group_id}", + name=f"Mass firmware upgrade {rollout_group_id}", + max_instances=1, + replace_existing=True, + ) + logger.info("Scheduled mass firmware upgrade %s for %s", rollout_group_id, scheduled_at) + + +async def cancel_upgrade(job_id: str) -> None: + """Cancel a scheduled or pending upgrade.""" + from app.services.backup_scheduler import backup_scheduler + + # Remove APScheduler job if it exists + try: + backup_scheduler.remove_job(f"fw_upgrade_{job_id}") + except Exception: + pass # Job might not be scheduled + + await _update_job( + job_id, + status="failed", + error_message="Cancelled by operator", + completed_at=datetime.now(timezone.utc), + ) + logger.info("Upgrade job %s cancelled", job_id) + + +async def retry_failed_upgrade(job_id: str) -> None: + """Reset a failed upgrade job to pending and re-execute.""" + await _update_job( + job_id, + status="pending", + error_message=None, + started_at=None, + completed_at=None, + ) + asyncio.create_task(start_upgrade(job_id)) + logger.info("Retrying upgrade job %s", job_id) + + +async def resume_mass_upgrade(rollout_group_id: str) -> None: + """Resume a paused mass rollout from where it left off.""" + # Reset first paused job to pending, then restart sequential processing + async with AdminAsyncSessionLocal() as session: + result = await session.execute( + text(""" + UPDATE firmware_upgrade_jobs + SET status = 'pending' + WHERE rollout_group_id = CAST(:group_id AS uuid) + AND status = 'paused' + """), + {"group_id": rollout_group_id}, + ) + await session.commit() + + asyncio.create_task(start_mass_upgrade(rollout_group_id)) + logger.info("Resuming mass rollout %s", rollout_group_id) + + +async def abort_mass_upgrade(rollout_group_id: str) -> int: + """Abort all remaining jobs in a paused mass rollout.""" + async with AdminAsyncSessionLocal() as session: + result = await session.execute( + text(""" + UPDATE firmware_upgrade_jobs + SET status = 'failed', + error_message = 'Aborted by operator', + completed_at = NOW() + WHERE rollout_group_id = CAST(:group_id AS uuid) + AND status IN ('pending', 'scheduled', 'paused') + RETURNING id + """), + {"group_id": rollout_group_id}, + ) + aborted = len(result.fetchall()) + await session.commit() + + logger.info("Aborted %d remaining jobs in rollout %s", aborted, rollout_group_id) + return aborted + + +# --------------------------------------------------------------------------- +# Internal helpers +# --------------------------------------------------------------------------- + + +async def _update_job(job_id: str, **kwargs) -> None: + """Update FirmwareUpgradeJob fields.""" + sets = [] + params: dict = {"job_id": job_id} + + for key, value in kwargs.items(): + param_name = f"v_{key}" + if value is None and key in ("error_message", "started_at", "completed_at"): + sets.append(f"{key} = NULL") + else: + sets.append(f"{key} = :{param_name}") + params[param_name] = value + + if not sets: + return + + async with AdminAsyncSessionLocal() as session: + await session.execute( + text(f""" + UPDATE firmware_upgrade_jobs + SET {', '.join(sets)} + WHERE id = CAST(:job_id AS uuid) + """), + params, + ) + await session.commit() + + +async def _check_ssh_reachable(ip: str, username: str, password: str) -> bool: + """Check if a device is reachable via SSH.""" + try: + async with asyncssh.connect( + ip, + port=22, + username=username, + password=password, + known_hosts=None, + connect_timeout=15, + ) as conn: + await conn.run("/system identity print", check=True) + return True + except Exception: + return False + + +async def _get_device_version(ip: str, username: str, password: str) -> str: + """Get the current RouterOS version from a device via SSH.""" + async with asyncssh.connect( + ip, + port=22, + username=username, + password=password, + known_hosts=None, + connect_timeout=30, + ) as conn: + result = await conn.run("/system resource print", check=True) + # Parse version from output: "version: 7.17 (stable)" + for line in result.stdout.splitlines(): + if "version" in line.lower(): + parts = line.split(":", 1) + if len(parts) == 2: + return parts[1].strip() + return "" diff --git a/backend/app/services/vpn_service.py b/backend/app/services/vpn_service.py new file mode 100644 index 0000000..947715c --- /dev/null +++ b/backend/app/services/vpn_service.py @@ -0,0 +1,392 @@ +"""WireGuard VPN management service. + +Handles key generation, peer management, config file sync, and RouterOS command generation. +""" + +import base64 +import ipaddress +import json +import os +import uuid +from datetime import datetime, timezone +from pathlib import Path +from typing import Optional + +import structlog +from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey +from cryptography.hazmat.primitives.serialization import ( + Encoding, + NoEncryption, + PrivateFormat, + PublicFormat, +) +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.config import settings +from app.models.device import Device +from app.models.vpn import VpnConfig, VpnPeer +from app.services.crypto import decrypt_credentials, encrypt_credentials, encrypt_credentials_transit + +logger = structlog.get_logger(__name__) + + +# ── Key Generation ── + + +def generate_wireguard_keypair() -> tuple[str, str]: + """Generate a WireGuard X25519 keypair. Returns (private_key_b64, public_key_b64).""" + private_key = X25519PrivateKey.generate() + priv_bytes = private_key.private_bytes(Encoding.Raw, PrivateFormat.Raw, NoEncryption()) + pub_bytes = private_key.public_key().public_bytes(Encoding.Raw, PublicFormat.Raw) + return base64.b64encode(priv_bytes).decode(), base64.b64encode(pub_bytes).decode() + + +def generate_preshared_key() -> str: + """Generate a WireGuard preshared key (32 random bytes, base64).""" + return base64.b64encode(os.urandom(32)).decode() + + +# ── Config File Management ── + + +def _get_wg_config_path() -> Path: + """Return the path to the shared WireGuard config directory.""" + return Path(os.getenv("WIREGUARD_CONFIG_PATH", "/data/wireguard")) + + +async def sync_wireguard_config(db: AsyncSession, tenant_id: uuid.UUID) -> None: + """Regenerate wg0.conf from database state and write to shared volume.""" + config = await get_vpn_config(db, tenant_id) + if not config or not config.is_enabled: + return + + key_bytes = settings.get_encryption_key_bytes() + server_private_key = decrypt_credentials(config.server_private_key, key_bytes) + + result = await db.execute( + select(VpnPeer).where(VpnPeer.tenant_id == tenant_id, VpnPeer.is_enabled.is_(True)) + ) + peers = result.scalars().all() + + # Build wg0.conf + lines = [ + "[Interface]", + f"Address = {config.server_address}", + f"ListenPort = {config.server_port}", + f"PrivateKey = {server_private_key}", + "", + ] + + for peer in peers: + peer_ip = peer.assigned_ip.split("/")[0] # strip CIDR for AllowedIPs + allowed_ips = [f"{peer_ip}/32"] + if peer.additional_allowed_ips: + # Comma-separated additional subnets (e.g. site-to-site routing) + extra = [s.strip() for s in peer.additional_allowed_ips.split(",") if s.strip()] + allowed_ips.extend(extra) + lines.append("[Peer]") + lines.append(f"PublicKey = {peer.peer_public_key}") + if peer.preshared_key: + psk = decrypt_credentials(peer.preshared_key, key_bytes) + lines.append(f"PresharedKey = {psk}") + lines.append(f"AllowedIPs = {', '.join(allowed_ips)}") + lines.append("") + + config_dir = _get_wg_config_path() + wg_confs_dir = config_dir / "wg_confs" + wg_confs_dir.mkdir(parents=True, exist_ok=True) + + conf_path = wg_confs_dir / "wg0.conf" + conf_path.write_text("\n".join(lines)) + + # Signal WireGuard container to reload config + reload_flag = wg_confs_dir / ".reload" + reload_flag.write_text("1") + + logger.info("wireguard config synced", tenant_id=str(tenant_id), peers=len(peers)) + + +# ── Live Status ── + + +def read_wg_status() -> dict[str, dict]: + """Read live WireGuard peer status from the shared volume. + + The WireGuard container writes wg_status.json every 15 seconds + with output from `wg show wg0 dump`. Returns a dict keyed by + peer public key with handshake timestamp and transfer stats. + """ + status_path = _get_wg_config_path() / "wg_status.json" + if not status_path.exists(): + return {} + try: + data = json.loads(status_path.read_text()) + return {entry["public_key"]: entry for entry in data} + except (json.JSONDecodeError, KeyError, OSError): + return {} + + +def get_peer_handshake(wg_status: dict[str, dict], public_key: str) -> Optional[datetime]: + """Get last_handshake datetime for a peer from live WireGuard status.""" + entry = wg_status.get(public_key) + if not entry: + return None + ts = entry.get("last_handshake", 0) + if ts and ts > 0: + return datetime.fromtimestamp(ts, tz=timezone.utc) + return None + + +# ── CRUD Operations ── + + +async def get_vpn_config(db: AsyncSession, tenant_id: uuid.UUID) -> Optional[VpnConfig]: + """Get the VPN config for a tenant.""" + result = await db.execute(select(VpnConfig).where(VpnConfig.tenant_id == tenant_id)) + return result.scalar_one_or_none() + + +async def setup_vpn( + db: AsyncSession, tenant_id: uuid.UUID, endpoint: Optional[str] = None +) -> VpnConfig: + """Initialize VPN for a tenant — generates server keys and creates config.""" + existing = await get_vpn_config(db, tenant_id) + if existing: + raise ValueError("VPN already configured for this tenant") + + private_key_b64, public_key_b64 = generate_wireguard_keypair() + + key_bytes = settings.get_encryption_key_bytes() + encrypted_private = encrypt_credentials(private_key_b64, key_bytes) + + config = VpnConfig( + tenant_id=tenant_id, + server_private_key=encrypted_private, + server_public_key=public_key_b64, + endpoint=endpoint, + is_enabled=True, + ) + db.add(config) + await db.flush() + + await sync_wireguard_config(db, tenant_id) + return config + + +async def update_vpn_config( + db: AsyncSession, tenant_id: uuid.UUID, endpoint: Optional[str] = None, is_enabled: Optional[bool] = None +) -> VpnConfig: + """Update VPN config settings.""" + config = await get_vpn_config(db, tenant_id) + if not config: + raise ValueError("VPN not configured for this tenant") + + if endpoint is not None: + config.endpoint = endpoint + if is_enabled is not None: + config.is_enabled = is_enabled + + await db.flush() + await sync_wireguard_config(db, tenant_id) + return config + + +async def get_peers(db: AsyncSession, tenant_id: uuid.UUID) -> list[VpnPeer]: + """List all VPN peers for a tenant.""" + result = await db.execute( + select(VpnPeer).where(VpnPeer.tenant_id == tenant_id).order_by(VpnPeer.created_at) + ) + return list(result.scalars().all()) + + +async def _next_available_ip(db: AsyncSession, tenant_id: uuid.UUID, config: VpnConfig) -> str: + """Allocate the next available IP in the VPN subnet.""" + # Parse subnet: e.g. "10.10.0.0/24" → start from .2 (server is .1) + network = ipaddress.ip_network(config.subnet, strict=False) + hosts = list(network.hosts()) + + # Get already assigned IPs + result = await db.execute(select(VpnPeer.assigned_ip).where(VpnPeer.tenant_id == tenant_id)) + used_ips = {row[0].split("/")[0] for row in result.all()} + used_ips.add(config.server_address.split("/")[0]) # exclude server IP + + for host in hosts[1:]: # skip .1 (server) + if str(host) not in used_ips: + return f"{host}/24" + + raise ValueError("No available IPs in VPN subnet") + + +async def add_peer(db: AsyncSession, tenant_id: uuid.UUID, device_id: uuid.UUID, additional_allowed_ips: Optional[str] = None) -> VpnPeer: + """Add a device as a VPN peer.""" + config = await get_vpn_config(db, tenant_id) + if not config: + raise ValueError("VPN not configured — enable VPN first") + + # Check device exists + device = await db.execute(select(Device).where(Device.id == device_id, Device.tenant_id == tenant_id)) + if not device.scalar_one_or_none(): + raise ValueError("Device not found") + + # Check if already a peer + existing = await db.execute(select(VpnPeer).where(VpnPeer.device_id == device_id)) + if existing.scalar_one_or_none(): + raise ValueError("Device is already a VPN peer") + + private_key_b64, public_key_b64 = generate_wireguard_keypair() + psk = generate_preshared_key() + + key_bytes = settings.get_encryption_key_bytes() + encrypted_private = encrypt_credentials(private_key_b64, key_bytes) + encrypted_psk = encrypt_credentials(psk, key_bytes) + + assigned_ip = await _next_available_ip(db, tenant_id, config) + + peer = VpnPeer( + tenant_id=tenant_id, + device_id=device_id, + peer_private_key=encrypted_private, + peer_public_key=public_key_b64, + preshared_key=encrypted_psk, + assigned_ip=assigned_ip, + additional_allowed_ips=additional_allowed_ips, + ) + db.add(peer) + await db.flush() + + await sync_wireguard_config(db, tenant_id) + return peer + + +async def remove_peer(db: AsyncSession, tenant_id: uuid.UUID, peer_id: uuid.UUID) -> None: + """Remove a VPN peer.""" + result = await db.execute( + select(VpnPeer).where(VpnPeer.id == peer_id, VpnPeer.tenant_id == tenant_id) + ) + peer = result.scalar_one_or_none() + if not peer: + raise ValueError("Peer not found") + + await db.delete(peer) + await db.flush() + await sync_wireguard_config(db, tenant_id) + + +async def get_peer_config(db: AsyncSession, tenant_id: uuid.UUID, peer_id: uuid.UUID) -> dict: + """Get the full config for a peer — includes private key for device setup.""" + config = await get_vpn_config(db, tenant_id) + if not config: + raise ValueError("VPN not configured") + + result = await db.execute( + select(VpnPeer).where(VpnPeer.id == peer_id, VpnPeer.tenant_id == tenant_id) + ) + peer = result.scalar_one_or_none() + if not peer: + raise ValueError("Peer not found") + + key_bytes = settings.get_encryption_key_bytes() + private_key = decrypt_credentials(peer.peer_private_key, key_bytes) + psk = decrypt_credentials(peer.preshared_key, key_bytes) if peer.preshared_key else None + + endpoint = config.endpoint or "YOUR_SERVER_IP:51820" + peer_ip_no_cidr = peer.assigned_ip.split("/")[0] + + routeros_commands = [ + f'/interface wireguard add name=wg-portal listen-port=13231 private-key="{private_key}"', + f'/interface wireguard peers add interface=wg-portal public-key="{config.server_public_key}" ' + f'endpoint-address={endpoint.split(":")[0]} endpoint-port={endpoint.split(":")[-1]} ' + f'allowed-address={config.subnet} persistent-keepalive=25' + + (f' preshared-key="{psk}"' if psk else ""), + f"/ip address add address={peer.assigned_ip} interface=wg-portal", + ] + + return { + "peer_private_key": private_key, + "peer_public_key": peer.peer_public_key, + "assigned_ip": peer.assigned_ip, + "server_public_key": config.server_public_key, + "server_endpoint": endpoint, + "allowed_ips": config.subnet, + "routeros_commands": routeros_commands, + } + + +async def onboard_device( + db: AsyncSession, + tenant_id: uuid.UUID, + hostname: str, + username: str, + password: str, +) -> dict: + """Create device + VPN peer in one transaction. Returns device, peer, and RouterOS commands. + + Unlike regular device creation, this skips TCP connectivity checks because + the VPN tunnel isn't up yet. The device IP is set to the VPN-assigned address. + """ + config = await get_vpn_config(db, tenant_id) + if not config: + raise ValueError("VPN not configured — enable VPN first") + + # Allocate VPN IP before creating device + assigned_ip = await _next_available_ip(db, tenant_id, config) + vpn_ip_no_cidr = assigned_ip.split("/")[0] + + # Create device with VPN IP (skip TCP check — tunnel not up yet) + credentials_json = json.dumps({"username": username, "password": password}) + transit_ciphertext = await encrypt_credentials_transit(credentials_json, str(tenant_id)) + + device = Device( + tenant_id=tenant_id, + hostname=hostname, + ip_address=vpn_ip_no_cidr, + api_port=8728, + api_ssl_port=8729, + encrypted_credentials_transit=transit_ciphertext, + status="unknown", + ) + db.add(device) + await db.flush() + + # Create VPN peer linked to this device + private_key_b64, public_key_b64 = generate_wireguard_keypair() + psk = generate_preshared_key() + + key_bytes = settings.get_encryption_key_bytes() + encrypted_private = encrypt_credentials(private_key_b64, key_bytes) + encrypted_psk = encrypt_credentials(psk, key_bytes) + + peer = VpnPeer( + tenant_id=tenant_id, + device_id=device.id, + peer_private_key=encrypted_private, + peer_public_key=public_key_b64, + preshared_key=encrypted_psk, + assigned_ip=assigned_ip, + ) + db.add(peer) + await db.flush() + + await sync_wireguard_config(db, tenant_id) + + # Generate RouterOS commands + endpoint = config.endpoint or "YOUR_SERVER_IP:51820" + psk_decrypted = decrypt_credentials(encrypted_psk, key_bytes) + + routeros_commands = [ + f'/interface wireguard add name=wg-portal listen-port=13231 private-key="{private_key_b64}"', + f'/interface wireguard peers add interface=wg-portal public-key="{config.server_public_key}" ' + f'endpoint-address={endpoint.split(":")[0]} endpoint-port={endpoint.split(":")[-1]} ' + f'allowed-address={config.subnet} persistent-keepalive=25' + f' preshared-key="{psk_decrypted}"', + f"/ip address add address={assigned_ip} interface=wg-portal", + ] + + return { + "device_id": device.id, + "peer_id": peer.id, + "hostname": hostname, + "assigned_ip": assigned_ip, + "routeros_commands": routeros_commands, + } diff --git a/backend/app/templates/reports/alert_history.html b/backend/app/templates/reports/alert_history.html new file mode 100644 index 0000000..3382240 --- /dev/null +++ b/backend/app/templates/reports/alert_history.html @@ -0,0 +1,66 @@ +{% extends "reports/base.html" %} + +{% block content %} +
+ Report period: {{ date_from }} to {{ date_to }} +
+ +
+
+
{{ total_alerts }}
+
Total Alerts
+
+
+
{{ critical_count }}
+
Critical
+
+
+
{{ warning_count }}
+
Warning
+
+
+
{{ info_count }}
+
Info
+
+ {% if mttr_minutes is not none %} +
+
{{ mttr_display }}
+
Avg MTTR
+
+ {% endif %} +
+ +{% if alerts %} +

Alert Events

+ + + + + + + + + + + + + {% for alert in alerts %} + + + + + + + + + {% endfor %} + +
TimestampDeviceSeverityMessageStatusDuration
{{ alert.fired_at }}{{ alert.hostname or '-' }} + {{ alert.severity | upper }} + {{ alert.message or '-' }} + {{ alert.status | upper }} + {{ alert.duration or '-' }}
+{% else %} +
No alerts found for the selected date range.
+{% endif %} +{% endblock %} diff --git a/backend/app/templates/reports/base.html b/backend/app/templates/reports/base.html new file mode 100644 index 0000000..32c0e26 --- /dev/null +++ b/backend/app/templates/reports/base.html @@ -0,0 +1,208 @@ + + + + + {{ report_title }} - TOD + + + +
+
+ +
TOD - The Other Dude
+
+
+
{{ report_title }}
+
{{ tenant_name }} • Generated {{ generated_at }}
+
+
+ +
+ {% block content %}{% endblock %} +
+ + diff --git a/backend/app/templates/reports/change_log.html b/backend/app/templates/reports/change_log.html new file mode 100644 index 0000000..b43b90d --- /dev/null +++ b/backend/app/templates/reports/change_log.html @@ -0,0 +1,46 @@ +{% extends "reports/base.html" %} + +{% block content %} +
+ Report period: {{ date_from }} to {{ date_to }} +
+ +
+
+
{{ total_entries }}
+
Total Changes
+
+
+
{{ data_source }}
+
Data Source
+
+
+ +{% if entries %} +

Change Log

+ + + + + + + + + + + + {% for entry in entries %} + + + + + + + + {% endfor %} + +
TimestampUserActionDeviceDetails
{{ entry.timestamp }}{{ entry.user or '-' }}{{ entry.action }}{{ entry.device or '-' }}{{ entry.details or '-' }}
+{% else %} +
No changes found for the selected date range.
+{% endif %} +{% endblock %} diff --git a/backend/app/templates/reports/device_inventory.html b/backend/app/templates/reports/device_inventory.html new file mode 100644 index 0000000..168d265 --- /dev/null +++ b/backend/app/templates/reports/device_inventory.html @@ -0,0 +1,59 @@ +{% extends "reports/base.html" %} + +{% block page_size %}A4 landscape{% endblock %} + +{% block content %} +
+
+
{{ total_devices }}
+
Total Devices
+
+
+
{{ online_count }}
+
Online
+
+
+
{{ offline_count }}
+
Offline
+
+
+
{{ unknown_count }}
+
Unknown
+
+
+ +{% if devices %} + + + + + + + + + + + + + + + {% for device in devices %} + + + + + + + + + + + {% endfor %} + +
HostnameIP AddressModelRouterOSStatusLast SeenUptimeGroups
{{ device.hostname }}{{ device.ip_address }}{{ device.model or '-' }}{{ device.routeros_version or '-' }} + {{ device.status | upper }} + {{ device.last_seen or '-' }}{{ device.uptime or '-' }}{{ device.groups or '-' }}
+{% else %} +
No devices found for this tenant.
+{% endif %} +{% endblock %} diff --git a/backend/app/templates/reports/metrics_summary.html b/backend/app/templates/reports/metrics_summary.html new file mode 100644 index 0000000..6511456 --- /dev/null +++ b/backend/app/templates/reports/metrics_summary.html @@ -0,0 +1,45 @@ +{% extends "reports/base.html" %} + +{% block content %} +
+ Report period: {{ date_from }} to {{ date_to }} +
+ +{% if devices %} +

Resource Usage by Device

+ + + + + + + + + + + + + + + {% for device in devices %} + + + 80 %} style="color: #991B1B; font-weight: 600;"{% elif device.avg_cpu and device.avg_cpu > 50 %} style="color: #92400E;"{% endif %}> + {{ '%.1f' | format(device.avg_cpu) if device.avg_cpu is not none else '-' }} + + + 80 %} style="color: #991B1B; font-weight: 600;"{% elif device.avg_mem and device.avg_mem > 50 %} style="color: #92400E;"{% endif %}> + {{ '%.1f' | format(device.avg_mem) if device.avg_mem is not none else '-' }} + + + + + + + {% endfor %} + +
HostnameAvg CPU %Peak CPU %Avg Memory %Peak Memory %Avg Disk %Avg TempData Points
{{ device.hostname }}{{ '%.1f' | format(device.peak_cpu) if device.peak_cpu is not none else '-' }}{{ '%.1f' | format(device.peak_mem) if device.peak_mem is not none else '-' }}{{ '%.1f' | format(device.avg_disk) if device.avg_disk is not none else '-' }}{{ '%.1f' | format(device.avg_temp) if device.avg_temp is not none else '-' }}{{ device.data_points }}
+{% else %} +
No metrics data found for the selected date range.
+{% endif %} +{% endblock %} diff --git a/backend/gunicorn.conf.py b/backend/gunicorn.conf.py new file mode 100644 index 0000000..510d6df --- /dev/null +++ b/backend/gunicorn.conf.py @@ -0,0 +1,30 @@ +"""Gunicorn configuration for production deployment. + +Uses UvicornWorker for async support under gunicorn's process management. +Worker count and timeouts are configurable via environment variables. +""" + +import os + +# Server socket +bind = os.getenv("GUNICORN_BIND", "0.0.0.0:8000") + +# Worker processes +workers = int(os.getenv("GUNICORN_WORKERS", "2")) +worker_class = "uvicorn.workers.UvicornWorker" + +# Timeouts +graceful_timeout = int(os.getenv("GUNICORN_GRACEFUL_TIMEOUT", "30")) +timeout = int(os.getenv("GUNICORN_TIMEOUT", "120")) +keepalive = int(os.getenv("GUNICORN_KEEPALIVE", "5")) + +# Logging -- use stdout/stderr for Docker log collection +accesslog = "-" +errorlog = "-" +loglevel = os.getenv("LOG_LEVEL", "info") + +# Process naming +proc_name = "mikrotik-api" + +# Preload application for faster worker spawning (shared memory for code) +preload_app = True diff --git a/backend/pyproject.toml b/backend/pyproject.toml new file mode 100644 index 0000000..5742475 --- /dev/null +++ b/backend/pyproject.toml @@ -0,0 +1,59 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "mikrotik-portal-backend" +version = "9.0.1" +description = "MikroTik Fleet Management Portal - Backend API" +requires-python = ">=3.12" +dependencies = [ + "fastapi[standard]>=0.115.0", + "sqlalchemy[asyncio]>=2.0.0", + "asyncpg>=0.30.0", + "alembic>=1.14.0", + "pydantic>=2.0.0", + "pydantic-settings>=2.0.0", + "python-jose[cryptography]>=3.3.0", + "bcrypt>=4.0.0,<5.0.0", + "redis>=5.0.0", + "nats-py>=2.7.0", + "cryptography>=42.0.0", + "python-multipart>=0.0.9", + "httpx>=0.27.0", + "asyncssh>=2.20.0", + "pygit2>=1.14.0", + "apscheduler>=3.10.0,<4.0", + "aiosmtplib>=3.0.0", + "structlog>=25.1.0", + "slowapi>=0.1.9", + "jinja2>=3.1.6", + "prometheus-fastapi-instrumentator>=7.0.0", + "gunicorn>=23.0.0", + "sse-starlette>=2.0.0", + "weasyprint>=62.0", + "srptools==1.0.1", +] + +[project.optional-dependencies] +dev = [ + "pytest>=8.0.0", + "pytest-asyncio>=0.25,<1.0", + "pytest-mock>=3.14", + "httpx>=0.27.0", + "pytest-cov>=5.0.0", +] + +[tool.hatch.build.targets.wheel] +packages = ["app"] + +[tool.pytest.ini_options] +asyncio_mode = "auto" +testpaths = ["tests"] +markers = [ + "integration: marks tests as integration tests requiring PostgreSQL (deselect with '-m \"not integration\"')", +] + +[tool.ruff] +line-length = 100 +target-version = "py312" diff --git a/backend/templates/emergency_kit.html b/backend/templates/emergency_kit.html new file mode 100644 index 0000000..06fb715 --- /dev/null +++ b/backend/templates/emergency_kit.html @@ -0,0 +1,297 @@ + + + + + TOD - Emergency Kit + + + +
+ +
+ +
+

Emergency Kit

+

TOD Zero-Knowledge Recovery

+
+
+ + +
+ +
+ Keep this document safe + This Emergency Kit is your only way to recover access if you lose your Secret Key. + Store it in a secure location such as a home safe or safety deposit box. +
+ + +
+
Email Address
+
{{ email }}
+
+ + +
+
Sign-in URL
+
{{ signin_url }}
+
+ + +
+
Secret Key
+
{{ secret_key_placeholder }}
+
+ + +
+
Master Password (write by hand)
+
+
+ +
+ + +
+

Instructions

+
    +
  • This Emergency Kit contains your Secret Key needed to log in on new devices.
  • +
  • Store this document in a safe place — a home safe, safety deposit box, or other secure location.
  • +
  • Do NOT store this document digitally alongside your password.
  • +
  • Consider writing your Master Password on this sheet and storing it securely.
  • +
  • If you lose both your Emergency Kit and forget your Secret Key, your encrypted data cannot be recovered. There is no reset mechanism.
  • +
+
+
+ + + +
+ + diff --git a/backend/tests/__init__.py b/backend/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/tests/conftest.py b/backend/tests/conftest.py new file mode 100644 index 0000000..0f94577 --- /dev/null +++ b/backend/tests/conftest.py @@ -0,0 +1,16 @@ +"""Shared test fixtures for the backend test suite. + +Phase 7: Minimal fixtures for unit tests (no database, no async). +Phase 10: Integration test fixtures added in tests/integration/conftest.py. + +Pytest marker registration and shared configuration lives here. +""" + +import pytest + + +def pytest_configure(config): + """Register custom markers.""" + config.addinivalue_line( + "markers", "integration: marks tests as integration tests requiring PostgreSQL" + ) diff --git a/backend/tests/integration/__init__.py b/backend/tests/integration/__init__.py new file mode 100644 index 0000000..19c382a --- /dev/null +++ b/backend/tests/integration/__init__.py @@ -0,0 +1,2 @@ +# Integration tests for TOD backend. +# Run against real PostgreSQL+TimescaleDB via docker-compose. diff --git a/backend/tests/integration/conftest.py b/backend/tests/integration/conftest.py new file mode 100644 index 0000000..e7b7126 --- /dev/null +++ b/backend/tests/integration/conftest.py @@ -0,0 +1,439 @@ +""" +Integration test fixtures for the TOD backend. + +Provides: +- Database engines (admin + app_user) pointing at real PostgreSQL+TimescaleDB +- Per-test session fixtures with transaction rollback for isolation +- app_session_factory for RLS multi-tenant tests (creates sessions with tenant context) +- FastAPI test client with dependency overrides +- Entity factory fixtures (tenants, users, devices) +- Auth helper for getting login tokens + +All fixtures use the existing docker-compose PostgreSQL instance. +Set TEST_DATABASE_URL / TEST_APP_USER_DATABASE_URL env vars to override defaults. + +Event loop strategy: All async fixtures are function-scoped to avoid the +pytest-asyncio 0.26 session/function loop mismatch. Engine creation and DB +setup use synchronous subprocess calls (Alembic) and module-level singletons. +""" + +import os +import subprocess +import sys +import uuid +from collections.abc import AsyncGenerator +from contextlib import asynccontextmanager +from typing import Any + +import pytest +import pytest_asyncio +from httpx import ASGITransport, AsyncClient +from sqlalchemy import text +from sqlalchemy.ext.asyncio import ( + AsyncSession, + async_sessionmaker, + create_async_engine, +) + +# --------------------------------------------------------------------------- +# Environment configuration +# --------------------------------------------------------------------------- + +TEST_DATABASE_URL = os.environ.get( + "TEST_DATABASE_URL", + "postgresql+asyncpg://postgres:postgres@localhost:5432/mikrotik_test", +) +TEST_APP_USER_DATABASE_URL = os.environ.get( + "TEST_APP_USER_DATABASE_URL", + "postgresql+asyncpg://app_user:app_password@localhost:5432/mikrotik_test", +) + + +# --------------------------------------------------------------------------- +# One-time database setup (runs once per session via autouse sync fixture) +# --------------------------------------------------------------------------- + +_DB_SETUP_DONE = False + + +def _ensure_database_setup(): + """Synchronous one-time DB setup: create test DB if needed, run migrations.""" + global _DB_SETUP_DONE + if _DB_SETUP_DONE: + return + _DB_SETUP_DONE = True + + backend_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + env = os.environ.copy() + env["DATABASE_URL"] = TEST_DATABASE_URL + + # Run Alembic migrations via subprocess (handles DB creation and schema) + result = subprocess.run( + [sys.executable, "-m", "alembic", "upgrade", "head"], + capture_output=True, + text=True, + cwd=backend_dir, + env=env, + ) + if result.returncode != 0: + raise RuntimeError(f"Alembic migration failed:\n{result.stderr}") + + +@pytest.fixture(scope="session", autouse=True) +def setup_database(): + """Session-scoped sync fixture: ensures DB schema is ready.""" + _ensure_database_setup() + yield + + +# --------------------------------------------------------------------------- +# Engine fixtures (function-scoped to stay on same event loop as tests) +# --------------------------------------------------------------------------- + + +@pytest_asyncio.fixture +async def admin_engine(): + """Admin engine (superuser) -- bypasses RLS. + + Created fresh per-test to avoid event loop issues. + pool_size=2 since each test only needs a few connections. + """ + engine = create_async_engine( + TEST_DATABASE_URL, echo=False, pool_pre_ping=True, pool_size=2, max_overflow=3 + ) + yield engine + await engine.dispose() + + +@pytest_asyncio.fixture +async def app_engine(): + """App-user engine -- RLS enforced. + + Created fresh per-test to avoid event loop issues. + """ + engine = create_async_engine( + TEST_APP_USER_DATABASE_URL, echo=False, pool_pre_ping=True, pool_size=2, max_overflow=3 + ) + yield engine + await engine.dispose() + + +# --------------------------------------------------------------------------- +# Function-scoped session fixtures (fresh per test) +# --------------------------------------------------------------------------- + + +@pytest_asyncio.fixture +async def admin_session(admin_engine) -> AsyncGenerator[AsyncSession, None]: + """Per-test admin session with transaction rollback. + + Each test gets a clean transaction that is rolled back after the test, + ensuring no state leakage between tests. + """ + async with admin_engine.connect() as conn: + trans = await conn.begin() + session = AsyncSession(bind=conn, expire_on_commit=False) + try: + yield session + finally: + await trans.rollback() + await session.close() + + +@pytest_asyncio.fixture +async def app_session(app_engine) -> AsyncGenerator[AsyncSession, None]: + """Per-test app_user session with transaction rollback (RLS enforced). + + Caller must call set_tenant_context() before querying. + """ + async with app_engine.connect() as conn: + trans = await conn.begin() + session = AsyncSession(bind=conn, expire_on_commit=False) + # Reset tenant context + await session.execute(text("RESET app.current_tenant")) + try: + yield session + finally: + await trans.rollback() + await session.close() + + +@pytest.fixture +def app_session_factory(app_engine): + """Factory that returns an async context manager for app_user sessions. + + Each session gets its own connection and transaction (rolled back on exit). + Caller can pass tenant_id to auto-set RLS context. + + Usage: + async with app_session_factory(tenant_id=str(tenant.id)) as session: + result = await session.execute(select(Device)) + """ + from app.database import set_tenant_context + + @asynccontextmanager + async def _create(tenant_id: str | None = None): + async with app_engine.connect() as conn: + trans = await conn.begin() + session = AsyncSession(bind=conn, expire_on_commit=False) + # Reset tenant context to prevent leakage + await session.execute(text("RESET app.current_tenant")) + if tenant_id: + await set_tenant_context(session, tenant_id) + try: + yield session + finally: + await trans.rollback() + await session.close() + + return _create + + +# --------------------------------------------------------------------------- +# FastAPI test app and HTTP client +# --------------------------------------------------------------------------- + + +@pytest_asyncio.fixture +async def test_app(admin_engine, app_engine): + """Create a FastAPI app instance with test database dependency overrides. + + - get_db uses app_engine (non-superuser, RLS enforced) so tenant + isolation is tested correctly at the API level. + - get_admin_db uses admin_engine (superuser) for auth/bootstrap routes. + - Disables lifespan to skip migrations, NATS, and scheduler startup. + """ + from fastapi import FastAPI + + from app.database import get_admin_db, get_db + + # Create a minimal app without lifespan + app = FastAPI(lifespan=None) + + # Import and mount all routers (same as main app) + from app.routers.alerts import router as alerts_router + from app.routers.auth import router as auth_router + from app.routers.config_backups import router as config_router + from app.routers.config_editor import router as config_editor_router + from app.routers.device_groups import router as device_groups_router + from app.routers.device_tags import router as device_tags_router + from app.routers.devices import router as devices_router + from app.routers.firmware import router as firmware_router + from app.routers.metrics import router as metrics_router + from app.routers.templates import router as templates_router + from app.routers.tenants import router as tenants_router + from app.routers.users import router as users_router + + app.include_router(auth_router, prefix="/api") + app.include_router(tenants_router, prefix="/api") + app.include_router(users_router, prefix="/api") + app.include_router(devices_router, prefix="/api") + app.include_router(device_groups_router, prefix="/api") + app.include_router(device_tags_router, prefix="/api") + app.include_router(metrics_router, prefix="/api") + app.include_router(config_router, prefix="/api") + app.include_router(firmware_router, prefix="/api") + app.include_router(alerts_router, prefix="/api") + app.include_router(config_editor_router, prefix="/api") + app.include_router(templates_router, prefix="/api") + + # Register rate limiter (auth endpoints use @limiter.limit) + from app.middleware.rate_limit import setup_rate_limiting + setup_rate_limiting(app) + + # Create test session factories + test_admin_session_factory = async_sessionmaker( + admin_engine, class_=AsyncSession, expire_on_commit=False + ) + test_app_session_factory = async_sessionmaker( + app_engine, class_=AsyncSession, expire_on_commit=False + ) + + # get_db uses app_engine (RLS enforced) -- tenant context is set + # by get_current_user dependency via set_tenant_context() + async def override_get_db() -> AsyncGenerator[AsyncSession, None]: + async with test_app_session_factory() as session: + try: + yield session + await session.commit() + except Exception: + await session.rollback() + raise + + # get_admin_db uses admin engine (superuser) for auth/bootstrap + async def override_get_admin_db() -> AsyncGenerator[AsyncSession, None]: + async with test_admin_session_factory() as session: + try: + yield session + await session.commit() + except Exception: + await session.rollback() + raise + + app.dependency_overrides[get_db] = override_get_db + app.dependency_overrides[get_admin_db] = override_get_admin_db + + yield app + + app.dependency_overrides.clear() + + +@pytest_asyncio.fixture +async def client(test_app) -> AsyncGenerator[AsyncClient, None]: + """HTTP client using ASGI transport (no network, real app). + + Flushes Redis DB 1 (rate limit storage) before each test to prevent + cross-test 429 errors from slowapi. + """ + import redis + + try: + # Rate limiter uses Redis DB 1 (see app/middleware/rate_limit.py) + r = redis.Redis(host="localhost", port=6379, db=1) + r.flushdb() + r.close() + except Exception: + pass # Redis not available -- skip clearing + + transport = ASGITransport(app=test_app) + async with AsyncClient(transport=transport, base_url="http://test") as ac: + yield ac + + +# --------------------------------------------------------------------------- +# Entity factory fixtures +# --------------------------------------------------------------------------- + + +@pytest.fixture +def create_test_tenant(): + """Factory to create a test tenant via admin session.""" + + async def _create( + session: AsyncSession, + name: str | None = None, + ): + from app.models.tenant import Tenant + + tenant_name = name or f"test-tenant-{uuid.uuid4().hex[:8]}" + tenant = Tenant(name=tenant_name) + session.add(tenant) + await session.flush() + return tenant + + return _create + + +@pytest.fixture +def create_test_user(): + """Factory to create a test user via admin session.""" + + async def _create( + session: AsyncSession, + tenant_id: uuid.UUID | None, + email: str | None = None, + password: str = "TestPass123!", + role: str = "tenant_admin", + name: str = "Test User", + ): + from app.models.user import User + from app.services.auth import hash_password + + user_email = email or f"test-{uuid.uuid4().hex[:8]}@example.com" + user = User( + email=user_email, + hashed_password=hash_password(password), + name=name, + role=role, + tenant_id=tenant_id, + is_active=True, + ) + session.add(user) + await session.flush() + return user + + return _create + + +@pytest.fixture +def create_test_device(): + """Factory to create a test device via admin session.""" + + async def _create( + session: AsyncSession, + tenant_id: uuid.UUID, + hostname: str | None = None, + ip_address: str | None = None, + status: str = "online", + ): + from app.models.device import Device + + device_hostname = hostname or f"router-{uuid.uuid4().hex[:8]}" + device_ip = ip_address or f"10.0.{uuid.uuid4().int % 256}.{uuid.uuid4().int % 256}" + device = Device( + tenant_id=tenant_id, + hostname=device_hostname, + ip_address=device_ip, + api_port=8728, + api_ssl_port=8729, + status=status, + ) + session.add(device) + await session.flush() + return device + + return _create + + +@pytest.fixture +def auth_headers_factory(client, create_test_tenant, create_test_user): + """Factory to create authenticated headers for a test user. + + Creates a tenant + user, logs in via the test client, and returns + the Authorization headers dict ready for use in subsequent requests. + """ + + async def _create( + admin_session: AsyncSession, + email: str | None = None, + password: str = "TestPass123!", + role: str = "tenant_admin", + tenant_name: str | None = None, + existing_tenant_id: uuid.UUID | None = None, + ) -> dict[str, Any]: + """Create user, login, return headers + tenant/user info.""" + if existing_tenant_id: + tenant_id = existing_tenant_id + else: + tenant = await create_test_tenant(admin_session, name=tenant_name) + tenant_id = tenant.id + + user = await create_test_user( + admin_session, + tenant_id=tenant_id, + email=email, + password=password, + role=role, + ) + await admin_session.commit() + + user_email = user.email + + # Login via the API + login_resp = await client.post( + "/api/auth/login", + json={"email": user_email, "password": password}, + ) + assert login_resp.status_code == 200, f"Login failed: {login_resp.text}" + tokens = login_resp.json() + + return { + "headers": {"Authorization": f"Bearer {tokens['access_token']}"}, + "access_token": tokens["access_token"], + "refresh_token": tokens.get("refresh_token"), + "tenant_id": str(tenant_id), + "user_id": str(user.id), + "user_email": user_email, + } + + return _create diff --git a/backend/tests/integration/test_alerts_api.py b/backend/tests/integration/test_alerts_api.py new file mode 100644 index 0000000..561de0d --- /dev/null +++ b/backend/tests/integration/test_alerts_api.py @@ -0,0 +1,275 @@ +""" +Integration tests for the Alerts API endpoints. + +Tests exercise: +- GET /api/tenants/{tenant_id}/alert-rules -- list rules +- POST /api/tenants/{tenant_id}/alert-rules -- create rule +- PUT /api/tenants/{tenant_id}/alert-rules/{rule_id} -- update rule +- DELETE /api/tenants/{tenant_id}/alert-rules/{rule_id} -- delete rule +- PATCH /api/tenants/{tenant_id}/alert-rules/{rule_id}/toggle +- GET /api/tenants/{tenant_id}/alerts -- list events +- GET /api/tenants/{tenant_id}/alerts/active-count -- active count +- GET /api/tenants/{tenant_id}/devices/{device_id}/alerts -- device alerts + +All tests run against real PostgreSQL. +""" + +import uuid + +import pytest + +pytestmark = pytest.mark.integration + + +VALID_ALERT_RULE = { + "name": "High CPU Alert", + "metric": "cpu_load", + "operator": "gt", + "threshold": 90.0, + "duration_polls": 3, + "severity": "warning", + "enabled": True, + "channel_ids": [], +} + + +class TestAlertRulesCRUD: + """Alert rules CRUD endpoints.""" + + async def test_list_alert_rules_empty( + self, + client, + auth_headers_factory, + admin_session, + ): + """GET /api/tenants/{tenant_id}/alert-rules returns 200 with empty list.""" + auth = await auth_headers_factory(admin_session) + tenant_id = auth["tenant_id"] + + resp = await client.get( + f"/api/tenants/{tenant_id}/alert-rules", + headers=auth["headers"], + ) + assert resp.status_code == 200 + data = resp.json() + assert isinstance(data, list) + + async def test_create_alert_rule( + self, + client, + auth_headers_factory, + admin_session, + ): + """POST /api/tenants/{tenant_id}/alert-rules creates a rule.""" + auth = await auth_headers_factory(admin_session, role="operator") + tenant_id = auth["tenant_id"] + + rule_data = {**VALID_ALERT_RULE, "name": f"CPU Alert {uuid.uuid4().hex[:6]}"} + + resp = await client.post( + f"/api/tenants/{tenant_id}/alert-rules", + json=rule_data, + headers=auth["headers"], + ) + assert resp.status_code == 201 + data = resp.json() + assert data["name"] == rule_data["name"] + assert data["metric"] == "cpu_load" + assert data["operator"] == "gt" + assert data["threshold"] == 90.0 + assert data["severity"] == "warning" + assert "id" in data + + async def test_update_alert_rule( + self, + client, + auth_headers_factory, + admin_session, + ): + """PUT /api/tenants/{tenant_id}/alert-rules/{rule_id} updates a rule.""" + auth = await auth_headers_factory(admin_session, role="operator") + tenant_id = auth["tenant_id"] + + # Create a rule first + rule_data = {**VALID_ALERT_RULE, "name": f"Update Test {uuid.uuid4().hex[:6]}"} + create_resp = await client.post( + f"/api/tenants/{tenant_id}/alert-rules", + json=rule_data, + headers=auth["headers"], + ) + assert create_resp.status_code == 201 + rule_id = create_resp.json()["id"] + + # Update it + updated_data = {**rule_data, "threshold": 95.0, "severity": "critical"} + update_resp = await client.put( + f"/api/tenants/{tenant_id}/alert-rules/{rule_id}", + json=updated_data, + headers=auth["headers"], + ) + assert update_resp.status_code == 200 + data = update_resp.json() + assert data["threshold"] == 95.0 + assert data["severity"] == "critical" + + async def test_delete_alert_rule( + self, + client, + auth_headers_factory, + admin_session, + ): + """DELETE /api/tenants/{tenant_id}/alert-rules/{rule_id} deletes a rule.""" + auth = await auth_headers_factory(admin_session, role="operator") + tenant_id = auth["tenant_id"] + + # Create a non-default rule + rule_data = {**VALID_ALERT_RULE, "name": f"Delete Test {uuid.uuid4().hex[:6]}"} + create_resp = await client.post( + f"/api/tenants/{tenant_id}/alert-rules", + json=rule_data, + headers=auth["headers"], + ) + assert create_resp.status_code == 201 + rule_id = create_resp.json()["id"] + + # Delete it + del_resp = await client.delete( + f"/api/tenants/{tenant_id}/alert-rules/{rule_id}", + headers=auth["headers"], + ) + assert del_resp.status_code == 204 + + async def test_toggle_alert_rule( + self, + client, + auth_headers_factory, + admin_session, + ): + """PATCH toggle flips the enabled state of a rule.""" + auth = await auth_headers_factory(admin_session, role="operator") + tenant_id = auth["tenant_id"] + + # Create a rule (enabled=True) + rule_data = {**VALID_ALERT_RULE, "name": f"Toggle Test {uuid.uuid4().hex[:6]}"} + create_resp = await client.post( + f"/api/tenants/{tenant_id}/alert-rules", + json=rule_data, + headers=auth["headers"], + ) + assert create_resp.status_code == 201 + rule_id = create_resp.json()["id"] + + # Toggle it + toggle_resp = await client.patch( + f"/api/tenants/{tenant_id}/alert-rules/{rule_id}/toggle", + headers=auth["headers"], + ) + assert toggle_resp.status_code == 200 + data = toggle_resp.json() + assert data["enabled"] is False # Was True, toggled to False + + async def test_create_alert_rule_invalid_metric( + self, + client, + auth_headers_factory, + admin_session, + ): + """POST with invalid metric returns 422.""" + auth = await auth_headers_factory(admin_session, role="operator") + tenant_id = auth["tenant_id"] + + rule_data = {**VALID_ALERT_RULE, "metric": "invalid_metric"} + resp = await client.post( + f"/api/tenants/{tenant_id}/alert-rules", + json=rule_data, + headers=auth["headers"], + ) + assert resp.status_code == 422 + + async def test_create_alert_rule_viewer_forbidden( + self, + client, + auth_headers_factory, + admin_session, + ): + """POST as viewer returns 403.""" + auth = await auth_headers_factory(admin_session, role="viewer") + tenant_id = auth["tenant_id"] + + resp = await client.post( + f"/api/tenants/{tenant_id}/alert-rules", + json=VALID_ALERT_RULE, + headers=auth["headers"], + ) + assert resp.status_code == 403 + + +class TestAlertEvents: + """Alert events listing endpoints.""" + + async def test_list_alerts_empty( + self, + client, + auth_headers_factory, + admin_session, + ): + """GET /api/tenants/{tenant_id}/alerts returns 200 with paginated empty response.""" + auth = await auth_headers_factory(admin_session) + tenant_id = auth["tenant_id"] + + resp = await client.get( + f"/api/tenants/{tenant_id}/alerts", + headers=auth["headers"], + ) + assert resp.status_code == 200 + data = resp.json() + assert "items" in data + assert "total" in data + assert data["total"] >= 0 + assert isinstance(data["items"], list) + + async def test_active_alert_count( + self, + client, + auth_headers_factory, + admin_session, + ): + """GET active-count returns count of firing alerts.""" + auth = await auth_headers_factory(admin_session) + tenant_id = auth["tenant_id"] + + resp = await client.get( + f"/api/tenants/{tenant_id}/alerts/active-count", + headers=auth["headers"], + ) + assert resp.status_code == 200 + data = resp.json() + assert "count" in data + assert isinstance(data["count"], int) + assert data["count"] >= 0 + + async def test_device_alerts_empty( + self, + client, + auth_headers_factory, + admin_session, + create_test_device, + create_test_tenant, + ): + """GET /api/tenants/{tenant_id}/devices/{device_id}/alerts returns paginated response.""" + tenant = await create_test_tenant(admin_session) + auth = await auth_headers_factory( + admin_session, existing_tenant_id=tenant.id + ) + tenant_id = auth["tenant_id"] + device = await create_test_device(admin_session, tenant.id) + await admin_session.commit() + + resp = await client.get( + f"/api/tenants/{tenant_id}/devices/{device.id}/alerts", + headers=auth["headers"], + ) + assert resp.status_code == 200 + data = resp.json() + assert "items" in data + assert "total" in data diff --git a/backend/tests/integration/test_auth_api.py b/backend/tests/integration/test_auth_api.py new file mode 100644 index 0000000..591765b --- /dev/null +++ b/backend/tests/integration/test_auth_api.py @@ -0,0 +1,302 @@ +""" +Auth API endpoint integration tests (TEST-04 partial). + +Tests auth endpoints end-to-end against real PostgreSQL: +- POST /api/auth/login (success, wrong password, nonexistent user) +- POST /api/auth/refresh (token refresh flow) +- GET /api/auth/me (current user info) +- Protected endpoint access without/with invalid token +""" + +import uuid + +import pytest +from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine + +from app.models.tenant import Tenant +from app.models.user import User +from app.services.auth import hash_password + +pytestmark = pytest.mark.integration + +from tests.integration.conftest import TEST_DATABASE_URL + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +async def _admin_commit(url, callback): + """Open a fresh admin connection, run callback, commit, close.""" + engine = create_async_engine(url, echo=False) + async with engine.connect() as conn: + session = AsyncSession(bind=conn, expire_on_commit=False) + result = await callback(session) + await session.commit() + await engine.dispose() + return result + + +async def _admin_cleanup(url, *table_names): + """Delete from specified tables via admin engine.""" + from sqlalchemy import text + + engine = create_async_engine(url, echo=False) + async with engine.connect() as conn: + for table in table_names: + await conn.execute(text(f"DELETE FROM {table}")) + await conn.commit() + await engine.dispose() + + +# --------------------------------------------------------------------------- +# Test 1: Login success +# --------------------------------------------------------------------------- + + +async def test_login_success(client, admin_engine): + """POST /api/auth/login with correct credentials returns 200 and tokens.""" + uid = uuid.uuid4().hex[:6] + + async def setup(session): + tenant = Tenant(name=f"auth-login-{uid}") + session.add(tenant) + await session.flush() + + user = User( + email=f"auth-login-{uid}@example.com", + hashed_password=hash_password("SecurePass123!"), + name="Auth Test User", + role="tenant_admin", + tenant_id=tenant.id, + is_active=True, + ) + session.add(user) + await session.flush() + return {"email": user.email, "tenant_id": str(tenant.id)} + + data = await _admin_commit(TEST_DATABASE_URL, setup) + + try: + resp = await client.post( + "/api/auth/login", + json={"email": data["email"], "password": "SecurePass123!"}, + ) + assert resp.status_code == 200, f"Login failed: {resp.text}" + + body = resp.json() + assert "access_token" in body + assert "refresh_token" in body + assert body["token_type"] == "bearer" + assert len(body["access_token"]) > 0 + assert len(body["refresh_token"]) > 0 + + # Verify httpOnly cookie is set + cookies = resp.cookies + # Cookie may or may not appear in httpx depending on secure flag + # Just verify the response contains Set-Cookie header + set_cookie = resp.headers.get("set-cookie", "") + assert "access_token" in set_cookie or len(body["access_token"]) > 0 + finally: + await _admin_cleanup(TEST_DATABASE_URL, "users", "tenants") + + +# --------------------------------------------------------------------------- +# Test 2: Login with wrong password +# --------------------------------------------------------------------------- + + +async def test_login_wrong_password(client, admin_engine): + """POST /api/auth/login with wrong password returns 401.""" + uid = uuid.uuid4().hex[:6] + + async def setup(session): + tenant = Tenant(name=f"auth-wrongpw-{uid}") + session.add(tenant) + await session.flush() + + user = User( + email=f"auth-wrongpw-{uid}@example.com", + hashed_password=hash_password("CorrectPass123!"), + name="Wrong PW User", + role="tenant_admin", + tenant_id=tenant.id, + is_active=True, + ) + session.add(user) + await session.flush() + return {"email": user.email} + + data = await _admin_commit(TEST_DATABASE_URL, setup) + + try: + resp = await client.post( + "/api/auth/login", + json={"email": data["email"], "password": "WrongPassword!"}, + ) + assert resp.status_code == 401 + assert "Invalid credentials" in resp.json()["detail"] + finally: + await _admin_cleanup(TEST_DATABASE_URL, "users", "tenants") + + +# --------------------------------------------------------------------------- +# Test 3: Login with nonexistent user +# --------------------------------------------------------------------------- + + +async def test_login_nonexistent_user(client): + """POST /api/auth/login with email that doesn't exist returns 401.""" + resp = await client.post( + "/api/auth/login", + json={"email": f"doesnotexist-{uuid.uuid4().hex[:6]}@example.com", "password": "Anything!"}, + ) + assert resp.status_code == 401 + assert "Invalid credentials" in resp.json()["detail"] + + +# --------------------------------------------------------------------------- +# Test 4: Token refresh +# --------------------------------------------------------------------------- + + +async def test_token_refresh(client, admin_engine): + """POST /api/auth/refresh with valid refresh token returns new tokens.""" + uid = uuid.uuid4().hex[:6] + + async def setup(session): + tenant = Tenant(name=f"auth-refresh-{uid}") + session.add(tenant) + await session.flush() + + user = User( + email=f"auth-refresh-{uid}@example.com", + hashed_password=hash_password("RefreshPass123!"), + name="Refresh User", + role="tenant_admin", + tenant_id=tenant.id, + is_active=True, + ) + session.add(user) + await session.flush() + return {"email": user.email} + + data = await _admin_commit(TEST_DATABASE_URL, setup) + + try: + # Login first to get refresh token + login_resp = await client.post( + "/api/auth/login", + json={"email": data["email"], "password": "RefreshPass123!"}, + ) + assert login_resp.status_code == 200 + tokens = login_resp.json() + refresh_token = tokens["refresh_token"] + original_access = tokens["access_token"] + + # Use refresh token to get new access token + refresh_resp = await client.post( + "/api/auth/refresh", + json={"refresh_token": refresh_token}, + ) + assert refresh_resp.status_code == 200 + + new_tokens = refresh_resp.json() + assert "access_token" in new_tokens + assert "refresh_token" in new_tokens + assert new_tokens["token_type"] == "bearer" + # Verify the new access token is a valid JWT (can be same if within same second) + assert len(new_tokens["access_token"]) > 0 + assert len(new_tokens["refresh_token"]) > 0 + + # Verify the new access token works for /me + me_resp = await client.get( + "/api/auth/me", + headers={"Authorization": f"Bearer {new_tokens['access_token']}"}, + ) + assert me_resp.status_code == 200 + assert me_resp.json()["email"] == data["email"] + finally: + await _admin_cleanup(TEST_DATABASE_URL, "users", "tenants") + + +# --------------------------------------------------------------------------- +# Test 5: Get current user +# --------------------------------------------------------------------------- + + +async def test_get_current_user(client, admin_engine): + """GET /api/auth/me with valid token returns current user info.""" + uid = uuid.uuid4().hex[:6] + + async def setup(session): + tenant = Tenant(name=f"auth-me-{uid}") + session.add(tenant) + await session.flush() + + user = User( + email=f"auth-me-{uid}@example.com", + hashed_password=hash_password("MePass123!"), + name="Me User", + role="tenant_admin", + tenant_id=tenant.id, + is_active=True, + ) + session.add(user) + await session.flush() + return {"email": user.email, "tenant_id": str(tenant.id), "user_id": str(user.id)} + + data = await _admin_commit(TEST_DATABASE_URL, setup) + + try: + # Login + login_resp = await client.post( + "/api/auth/login", + json={"email": data["email"], "password": "MePass123!"}, + ) + assert login_resp.status_code == 200 + token = login_resp.json()["access_token"] + + # Get /me + me_resp = await client.get( + "/api/auth/me", + headers={"Authorization": f"Bearer {token}"}, + ) + assert me_resp.status_code == 200 + + me_data = me_resp.json() + assert me_data["email"] == data["email"] + assert me_data["name"] == "Me User" + assert me_data["role"] == "tenant_admin" + assert me_data["tenant_id"] == data["tenant_id"] + assert me_data["id"] == data["user_id"] + finally: + await _admin_cleanup(TEST_DATABASE_URL, "users", "tenants") + + +# --------------------------------------------------------------------------- +# Test 6: Protected endpoint without token +# --------------------------------------------------------------------------- + + +async def test_protected_endpoint_without_token(client): + """GET /api/tenants/{id}/devices without auth headers returns 401.""" + fake_tenant_id = str(uuid.uuid4()) + resp = await client.get(f"/api/tenants/{fake_tenant_id}/devices") + assert resp.status_code == 401 + + +# --------------------------------------------------------------------------- +# Test 7: Protected endpoint with invalid token +# --------------------------------------------------------------------------- + + +async def test_protected_endpoint_with_invalid_token(client): + """GET /api/tenants/{id}/devices with invalid Bearer token returns 401.""" + fake_tenant_id = str(uuid.uuid4()) + resp = await client.get( + f"/api/tenants/{fake_tenant_id}/devices", + headers={"Authorization": "Bearer totally-invalid-jwt-token"}, + ) + assert resp.status_code == 401 diff --git a/backend/tests/integration/test_config_api.py b/backend/tests/integration/test_config_api.py new file mode 100644 index 0000000..4fcaeb6 --- /dev/null +++ b/backend/tests/integration/test_config_api.py @@ -0,0 +1,149 @@ +""" +Integration tests for the Config Backup API endpoints. + +Tests exercise: +- GET /api/tenants/{tenant_id}/devices/{device_id}/config/backups +- GET /api/tenants/{tenant_id}/devices/{device_id}/config/schedules +- PUT /api/tenants/{tenant_id}/devices/{device_id}/config/schedules + +POST /backups (trigger) and POST /restore require actual RouterOS connections +and git store, so we only test that the endpoints exist and respond appropriately. + +All tests run against real PostgreSQL. +""" + +import uuid + +import pytest + +pytestmark = pytest.mark.integration + + +class TestConfigBackups: + """Config backup listing and schedule endpoints.""" + + async def test_list_config_backups_empty( + self, + client, + auth_headers_factory, + admin_session, + create_test_device, + create_test_tenant, + ): + """GET config backups for a device with no backups returns 200 + empty list.""" + tenant = await create_test_tenant(admin_session) + auth = await auth_headers_factory( + admin_session, existing_tenant_id=tenant.id + ) + tenant_id = auth["tenant_id"] + device = await create_test_device(admin_session, tenant.id) + await admin_session.commit() + + resp = await client.get( + f"/api/tenants/{tenant_id}/devices/{device.id}/config/backups", + headers=auth["headers"], + ) + assert resp.status_code == 200 + data = resp.json() + assert isinstance(data, list) + assert len(data) == 0 + + async def test_get_backup_schedule_default( + self, + client, + auth_headers_factory, + admin_session, + create_test_device, + create_test_tenant, + ): + """GET schedule returns synthetic default when no schedule configured.""" + tenant = await create_test_tenant(admin_session) + auth = await auth_headers_factory( + admin_session, existing_tenant_id=tenant.id + ) + tenant_id = auth["tenant_id"] + device = await create_test_device(admin_session, tenant.id) + await admin_session.commit() + + resp = await client.get( + f"/api/tenants/{tenant_id}/devices/{device.id}/config/schedules", + headers=auth["headers"], + ) + assert resp.status_code == 200 + data = resp.json() + assert data["is_default"] is True + assert data["cron_expression"] == "0 2 * * *" + assert data["enabled"] is True + + async def test_update_backup_schedule( + self, + client, + auth_headers_factory, + admin_session, + create_test_device, + create_test_tenant, + ): + """PUT schedule creates/updates device-specific backup schedule.""" + tenant = await create_test_tenant(admin_session) + auth = await auth_headers_factory( + admin_session, existing_tenant_id=tenant.id, role="operator" + ) + tenant_id = auth["tenant_id"] + device = await create_test_device(admin_session, tenant.id) + await admin_session.commit() + + schedule_data = { + "cron_expression": "0 3 * * 1", # Monday at 3am + "enabled": True, + } + resp = await client.put( + f"/api/tenants/{tenant_id}/devices/{device.id}/config/schedules", + json=schedule_data, + headers=auth["headers"], + ) + assert resp.status_code == 200 + data = resp.json() + assert data["cron_expression"] == "0 3 * * 1" + assert data["enabled"] is True + assert data["is_default"] is False + assert data["device_id"] == str(device.id) + + async def test_backup_endpoints_respond( + self, + client, + auth_headers_factory, + admin_session, + create_test_device, + create_test_tenant, + ): + """Config backup router responds (not 404) for expected paths.""" + tenant = await create_test_tenant(admin_session) + auth = await auth_headers_factory( + admin_session, existing_tenant_id=tenant.id + ) + tenant_id = auth["tenant_id"] + device = await create_test_device(admin_session, tenant.id) + await admin_session.commit() + + # List backups -- should respond + backups_resp = await client.get( + f"/api/tenants/{tenant_id}/devices/{device.id}/config/backups", + headers=auth["headers"], + ) + assert backups_resp.status_code != 404 + + # Get schedule -- should respond + schedule_resp = await client.get( + f"/api/tenants/{tenant_id}/devices/{device.id}/config/schedules", + headers=auth["headers"], + ) + assert schedule_resp.status_code != 404 + + async def test_config_backups_unauthenticated(self, client): + """GET config backups without auth returns 401.""" + tenant_id = str(uuid.uuid4()) + device_id = str(uuid.uuid4()) + resp = await client.get( + f"/api/tenants/{tenant_id}/devices/{device_id}/config/backups" + ) + assert resp.status_code == 401 diff --git a/backend/tests/integration/test_devices_api.py b/backend/tests/integration/test_devices_api.py new file mode 100644 index 0000000..555df7b --- /dev/null +++ b/backend/tests/integration/test_devices_api.py @@ -0,0 +1,227 @@ +""" +Integration tests for the Device CRUD API endpoints. + +Tests exercise /api/tenants/{tenant_id}/devices/* endpoints against +real PostgreSQL+TimescaleDB with full auth + RLS enforcement. + +All tests are independent and create their own test data. +""" + +import uuid + +import pytest +import pytest_asyncio + + +pytestmark = pytest.mark.integration + + +@pytest.fixture +def _unique_suffix(): + """Return a short unique suffix for test data.""" + return uuid.uuid4().hex[:8] + + +class TestDevicesCRUD: + """Device list, create, get, update, delete endpoints.""" + + async def test_list_devices_empty( + self, + client, + auth_headers_factory, + admin_session, + ): + """GET /api/tenants/{tenant_id}/devices returns 200 with empty list.""" + auth = await auth_headers_factory(admin_session) + tenant_id = auth["tenant_id"] + + resp = await client.get( + f"/api/tenants/{tenant_id}/devices", + headers=auth["headers"], + ) + assert resp.status_code == 200 + data = resp.json() + assert data["items"] == [] + assert data["total"] == 0 + + async def test_create_device( + self, + client, + auth_headers_factory, + admin_session, + ): + """POST /api/tenants/{tenant_id}/devices creates a device and returns 201.""" + auth = await auth_headers_factory(admin_session, role="operator") + tenant_id = auth["tenant_id"] + + device_data = { + "hostname": f"test-router-{uuid.uuid4().hex[:8]}", + "ip_address": "192.168.88.1", + "api_port": 8728, + "api_ssl_port": 8729, + "username": "admin", + "password": "admin123", + } + + resp = await client.post( + f"/api/tenants/{tenant_id}/devices", + json=device_data, + headers=auth["headers"], + ) + # create_device does TCP probe -- may fail in test env without real device + # Accept either 201 (success) or 502/422 (connectivity check failure) + if resp.status_code == 201: + data = resp.json() + assert data["hostname"] == device_data["hostname"] + assert data["ip_address"] == device_data["ip_address"] + assert "id" in data + # Credentials should never be returned in response + assert "password" not in data + assert "username" not in data + assert "encrypted_credentials" not in data + + async def test_get_device( + self, + client, + auth_headers_factory, + admin_session, + create_test_device, + create_test_tenant, + ): + """GET /api/tenants/{tenant_id}/devices/{device_id} returns correct device.""" + tenant = await create_test_tenant(admin_session) + auth = await auth_headers_factory( + admin_session, existing_tenant_id=tenant.id + ) + tenant_id = auth["tenant_id"] + + device = await create_test_device(admin_session, tenant.id) + await admin_session.commit() + + resp = await client.get( + f"/api/tenants/{tenant_id}/devices/{device.id}", + headers=auth["headers"], + ) + assert resp.status_code == 200 + data = resp.json() + assert data["id"] == str(device.id) + assert data["hostname"] == device.hostname + assert data["ip_address"] == device.ip_address + + async def test_update_device( + self, + client, + auth_headers_factory, + admin_session, + create_test_device, + create_test_tenant, + ): + """PUT /api/tenants/{tenant_id}/devices/{device_id} updates device fields.""" + tenant = await create_test_tenant(admin_session) + auth = await auth_headers_factory( + admin_session, existing_tenant_id=tenant.id, role="operator" + ) + tenant_id = auth["tenant_id"] + + device = await create_test_device(admin_session, tenant.id, hostname="old-hostname") + await admin_session.commit() + + update_data = {"hostname": f"new-hostname-{uuid.uuid4().hex[:8]}"} + resp = await client.put( + f"/api/tenants/{tenant_id}/devices/{device.id}", + json=update_data, + headers=auth["headers"], + ) + assert resp.status_code == 200 + data = resp.json() + assert data["hostname"] == update_data["hostname"] + + async def test_delete_device( + self, + client, + auth_headers_factory, + admin_session, + create_test_device, + create_test_tenant, + ): + """DELETE /api/tenants/{tenant_id}/devices/{device_id} removes the device.""" + tenant = await create_test_tenant(admin_session) + # delete requires tenant_admin or above + auth = await auth_headers_factory( + admin_session, existing_tenant_id=tenant.id, role="tenant_admin" + ) + tenant_id = auth["tenant_id"] + + device = await create_test_device(admin_session, tenant.id) + await admin_session.commit() + + resp = await client.delete( + f"/api/tenants/{tenant_id}/devices/{device.id}", + headers=auth["headers"], + ) + assert resp.status_code == 204 + + # Verify it's gone + get_resp = await client.get( + f"/api/tenants/{tenant_id}/devices/{device.id}", + headers=auth["headers"], + ) + assert get_resp.status_code == 404 + + async def test_list_devices_with_status_filter( + self, + client, + auth_headers_factory, + admin_session, + create_test_device, + create_test_tenant, + ): + """GET /api/tenants/{tenant_id}/devices?status=online returns filtered results.""" + tenant = await create_test_tenant(admin_session) + auth = await auth_headers_factory( + admin_session, existing_tenant_id=tenant.id + ) + tenant_id = auth["tenant_id"] + + # Create devices with different statuses + await create_test_device( + admin_session, tenant.id, hostname="online-device", status="online" + ) + await create_test_device( + admin_session, tenant.id, hostname="offline-device", status="offline" + ) + await admin_session.commit() + + # Filter for online only + resp = await client.get( + f"/api/tenants/{tenant_id}/devices?status=online", + headers=auth["headers"], + ) + assert resp.status_code == 200 + data = resp.json() + assert data["total"] >= 1 + for item in data["items"]: + assert item["status"] == "online" + + async def test_get_device_not_found( + self, + client, + auth_headers_factory, + admin_session, + ): + """GET /api/tenants/{tenant_id}/devices/{nonexistent} returns 404.""" + auth = await auth_headers_factory(admin_session) + tenant_id = auth["tenant_id"] + fake_id = str(uuid.uuid4()) + + resp = await client.get( + f"/api/tenants/{tenant_id}/devices/{fake_id}", + headers=auth["headers"], + ) + assert resp.status_code == 404 + + async def test_list_devices_unauthenticated(self, client): + """GET /api/tenants/{tenant_id}/devices without auth returns 401.""" + tenant_id = str(uuid.uuid4()) + resp = await client.get(f"/api/tenants/{tenant_id}/devices") + assert resp.status_code == 401 diff --git a/backend/tests/integration/test_firmware_api.py b/backend/tests/integration/test_firmware_api.py new file mode 100644 index 0000000..42bf18d --- /dev/null +++ b/backend/tests/integration/test_firmware_api.py @@ -0,0 +1,183 @@ +""" +Integration tests for the Firmware API endpoints. + +Tests exercise: +- GET /api/firmware/versions -- list firmware versions (global) +- GET /api/tenants/{tenant_id}/firmware/overview -- firmware overview per tenant +- GET /api/tenants/{tenant_id}/firmware/upgrades -- list upgrade jobs +- PATCH /api/tenants/{tenant_id}/devices/{device_id}/preferred-channel + +Upgrade endpoints (POST .../upgrade, .../mass-upgrade) require actual RouterOS +connections and NATS, so we verify the endpoint exists and handles missing +services gracefully. Download/cache endpoints require super_admin. + +All tests run against real PostgreSQL. +""" + +import uuid + +import pytest + +pytestmark = pytest.mark.integration + + +class TestFirmwareVersions: + """Firmware version listing endpoints.""" + + async def test_list_firmware_versions( + self, + client, + auth_headers_factory, + admin_session, + ): + """GET /api/firmware/versions returns 200 with list (may be empty).""" + auth = await auth_headers_factory(admin_session) + + resp = await client.get( + "/api/firmware/versions", + headers=auth["headers"], + ) + assert resp.status_code == 200 + data = resp.json() + assert isinstance(data, list) + + async def test_list_firmware_versions_with_filters( + self, + client, + auth_headers_factory, + admin_session, + ): + """GET /api/firmware/versions with filters returns 200.""" + auth = await auth_headers_factory(admin_session) + + resp = await client.get( + "/api/firmware/versions", + params={"architecture": "arm", "channel": "stable"}, + headers=auth["headers"], + ) + assert resp.status_code == 200 + assert isinstance(resp.json(), list) + + +class TestFirmwareOverview: + """Tenant-scoped firmware overview.""" + + async def test_firmware_overview( + self, + client, + auth_headers_factory, + admin_session, + ): + """GET /api/tenants/{tenant_id}/firmware/overview returns 200.""" + auth = await auth_headers_factory(admin_session) + tenant_id = auth["tenant_id"] + + resp = await client.get( + f"/api/tenants/{tenant_id}/firmware/overview", + headers=auth["headers"], + ) + # May return 200 or 500 if firmware_service depends on external state + # At minimum, it should not be 404 + assert resp.status_code != 404 + + +class TestPreferredChannel: + """Device preferred firmware channel endpoint.""" + + async def test_set_device_preferred_channel( + self, + client, + auth_headers_factory, + admin_session, + create_test_device, + create_test_tenant, + ): + """PATCH preferred channel updates the device firmware channel preference.""" + tenant = await create_test_tenant(admin_session) + auth = await auth_headers_factory( + admin_session, existing_tenant_id=tenant.id, role="operator" + ) + tenant_id = auth["tenant_id"] + device = await create_test_device(admin_session, tenant.id) + await admin_session.commit() + + resp = await client.patch( + f"/api/tenants/{tenant_id}/devices/{device.id}/preferred-channel", + json={"preferred_channel": "long-term"}, + headers=auth["headers"], + ) + assert resp.status_code == 200 + data = resp.json() + assert data["preferred_channel"] == "long-term" + assert data["status"] == "ok" + + async def test_set_invalid_preferred_channel( + self, + client, + auth_headers_factory, + admin_session, + create_test_device, + create_test_tenant, + ): + """PATCH with invalid channel returns 422.""" + tenant = await create_test_tenant(admin_session) + auth = await auth_headers_factory( + admin_session, existing_tenant_id=tenant.id, role="operator" + ) + tenant_id = auth["tenant_id"] + device = await create_test_device(admin_session, tenant.id) + await admin_session.commit() + + resp = await client.patch( + f"/api/tenants/{tenant_id}/devices/{device.id}/preferred-channel", + json={"preferred_channel": "invalid"}, + headers=auth["headers"], + ) + assert resp.status_code == 422 + + +class TestUpgradeJobs: + """Upgrade job listing endpoints.""" + + async def test_list_upgrade_jobs_empty( + self, + client, + auth_headers_factory, + admin_session, + ): + """GET /api/tenants/{tenant_id}/firmware/upgrades returns paginated response.""" + auth = await auth_headers_factory(admin_session) + tenant_id = auth["tenant_id"] + + resp = await client.get( + f"/api/tenants/{tenant_id}/firmware/upgrades", + headers=auth["headers"], + ) + assert resp.status_code == 200 + data = resp.json() + assert "items" in data + assert "total" in data + assert isinstance(data["items"], list) + assert data["total"] >= 0 + + async def test_get_upgrade_job_not_found( + self, + client, + auth_headers_factory, + admin_session, + ): + """GET /api/tenants/{tenant_id}/firmware/upgrades/{fake_id} returns 404.""" + auth = await auth_headers_factory(admin_session) + tenant_id = auth["tenant_id"] + fake_id = str(uuid.uuid4()) + + resp = await client.get( + f"/api/tenants/{tenant_id}/firmware/upgrades/{fake_id}", + headers=auth["headers"], + ) + assert resp.status_code == 404 + + async def test_firmware_unauthenticated(self, client): + """GET firmware versions without auth returns 401.""" + resp = await client.get("/api/firmware/versions") + assert resp.status_code == 401 diff --git a/backend/tests/integration/test_monitoring_api.py b/backend/tests/integration/test_monitoring_api.py new file mode 100644 index 0000000..738fb18 --- /dev/null +++ b/backend/tests/integration/test_monitoring_api.py @@ -0,0 +1,323 @@ +""" +Integration tests for the Monitoring / Metrics API endpoints. + +Tests exercise: +- /api/tenants/{tenant_id}/devices/{device_id}/metrics/health +- /api/tenants/{tenant_id}/devices/{device_id}/metrics/interfaces +- /api/tenants/{tenant_id}/devices/{device_id}/metrics/interfaces/list +- /api/tenants/{tenant_id}/devices/{device_id}/metrics/wireless +- /api/tenants/{tenant_id}/devices/{device_id}/metrics/wireless/latest +- /api/tenants/{tenant_id}/devices/{device_id}/metrics/sparkline +- /api/tenants/{tenant_id}/fleet/summary +- /api/fleet/summary (super_admin only) + +All tests run against real PostgreSQL+TimescaleDB. +""" + +import uuid +from datetime import datetime, timedelta, timezone + +import pytest +from sqlalchemy import text + +pytestmark = pytest.mark.integration + + +class TestHealthMetrics: + """Device health metrics endpoints.""" + + async def test_get_device_health_metrics_empty( + self, + client, + auth_headers_factory, + admin_session, + create_test_device, + create_test_tenant, + ): + """GET health metrics for a device with no data returns 200 + empty list.""" + tenant = await create_test_tenant(admin_session) + auth = await auth_headers_factory( + admin_session, existing_tenant_id=tenant.id + ) + tenant_id = auth["tenant_id"] + + device = await create_test_device(admin_session, tenant.id) + await admin_session.commit() + + now = datetime.now(timezone.utc) + start = (now - timedelta(hours=1)).isoformat() + end = now.isoformat() + + resp = await client.get( + f"/api/tenants/{tenant_id}/devices/{device.id}/metrics/health", + params={"start": start, "end": end}, + headers=auth["headers"], + ) + assert resp.status_code == 200 + data = resp.json() + assert isinstance(data, list) + assert len(data) == 0 + + async def test_get_device_health_metrics_with_data( + self, + client, + auth_headers_factory, + admin_session, + create_test_device, + create_test_tenant, + ): + """GET health metrics returns bucketed data when rows exist.""" + tenant = await create_test_tenant(admin_session) + auth = await auth_headers_factory( + admin_session, existing_tenant_id=tenant.id + ) + tenant_id = auth["tenant_id"] + device = await create_test_device(admin_session, tenant.id) + await admin_session.flush() + + # Insert test metric rows directly via admin session + now = datetime.now(timezone.utc) + for i in range(5): + ts = now - timedelta(minutes=i * 5) + await admin_session.execute( + text( + "INSERT INTO health_metrics " + "(device_id, time, cpu_load, free_memory, total_memory, " + "free_disk, total_disk, temperature) " + "VALUES (:device_id, :ts, :cpu, :free_mem, :total_mem, " + ":free_disk, :total_disk, :temp)" + ), + { + "device_id": str(device.id), + "ts": ts, + "cpu": 30 + i * 5, + "free_mem": 500000000, + "total_mem": 1000000000, + "free_disk": 200000000, + "total_disk": 500000000, + "temp": 45, + }, + ) + await admin_session.commit() + + start = (now - timedelta(hours=1)).isoformat() + end = now.isoformat() + + resp = await client.get( + f"/api/tenants/{tenant_id}/devices/{device.id}/metrics/health", + params={"start": start, "end": end}, + headers=auth["headers"], + ) + assert resp.status_code == 200 + data = resp.json() + assert isinstance(data, list) + assert len(data) > 0 + # Each bucket should have expected fields + for bucket in data: + assert "bucket" in bucket + assert "avg_cpu" in bucket + + +class TestInterfaceMetrics: + """Interface traffic metrics endpoints.""" + + async def test_get_interface_metrics_empty( + self, + client, + auth_headers_factory, + admin_session, + create_test_device, + create_test_tenant, + ): + """GET interface metrics for device with no data returns 200 + empty list.""" + tenant = await create_test_tenant(admin_session) + auth = await auth_headers_factory( + admin_session, existing_tenant_id=tenant.id + ) + tenant_id = auth["tenant_id"] + device = await create_test_device(admin_session, tenant.id) + await admin_session.commit() + + now = datetime.now(timezone.utc) + resp = await client.get( + f"/api/tenants/{tenant_id}/devices/{device.id}/metrics/interfaces", + params={ + "start": (now - timedelta(hours=1)).isoformat(), + "end": now.isoformat(), + }, + headers=auth["headers"], + ) + assert resp.status_code == 200 + assert isinstance(resp.json(), list) + + async def test_get_interface_list_empty( + self, + client, + auth_headers_factory, + admin_session, + create_test_device, + create_test_tenant, + ): + """GET interface list for device with no data returns 200 + empty list.""" + tenant = await create_test_tenant(admin_session) + auth = await auth_headers_factory( + admin_session, existing_tenant_id=tenant.id + ) + tenant_id = auth["tenant_id"] + device = await create_test_device(admin_session, tenant.id) + await admin_session.commit() + + resp = await client.get( + f"/api/tenants/{tenant_id}/devices/{device.id}/metrics/interfaces/list", + headers=auth["headers"], + ) + assert resp.status_code == 200 + assert isinstance(resp.json(), list) + + +class TestSparkline: + """Sparkline endpoint.""" + + async def test_sparkline_empty( + self, + client, + auth_headers_factory, + admin_session, + create_test_device, + create_test_tenant, + ): + """GET sparkline for device with no data returns 200 + empty list.""" + tenant = await create_test_tenant(admin_session) + auth = await auth_headers_factory( + admin_session, existing_tenant_id=tenant.id + ) + tenant_id = auth["tenant_id"] + device = await create_test_device(admin_session, tenant.id) + await admin_session.commit() + + resp = await client.get( + f"/api/tenants/{tenant_id}/devices/{device.id}/metrics/sparkline", + headers=auth["headers"], + ) + assert resp.status_code == 200 + assert isinstance(resp.json(), list) + + +class TestFleetSummary: + """Fleet summary endpoints.""" + + async def test_fleet_summary_empty( + self, + client, + auth_headers_factory, + admin_session, + create_test_tenant, + ): + """GET /api/tenants/{tenant_id}/fleet/summary returns 200 with empty fleet.""" + tenant = await create_test_tenant(admin_session) + auth = await auth_headers_factory( + admin_session, existing_tenant_id=tenant.id + ) + tenant_id = auth["tenant_id"] + + resp = await client.get( + f"/api/tenants/{tenant_id}/fleet/summary", + headers=auth["headers"], + ) + assert resp.status_code == 200 + data = resp.json() + assert isinstance(data, list) + + async def test_fleet_summary_with_devices( + self, + client, + auth_headers_factory, + admin_session, + create_test_device, + create_test_tenant, + ): + """GET fleet summary returns device data when devices exist.""" + tenant = await create_test_tenant(admin_session) + auth = await auth_headers_factory( + admin_session, existing_tenant_id=tenant.id + ) + tenant_id = auth["tenant_id"] + + await create_test_device(admin_session, tenant.id, hostname="fleet-dev-1") + await create_test_device(admin_session, tenant.id, hostname="fleet-dev-2") + await admin_session.commit() + + resp = await client.get( + f"/api/tenants/{tenant_id}/fleet/summary", + headers=auth["headers"], + ) + assert resp.status_code == 200 + data = resp.json() + assert isinstance(data, list) + assert len(data) >= 2 + hostnames = [d["hostname"] for d in data] + assert "fleet-dev-1" in hostnames + assert "fleet-dev-2" in hostnames + + async def test_fleet_summary_unauthenticated(self, client): + """GET fleet summary without auth returns 401.""" + tenant_id = str(uuid.uuid4()) + resp = await client.get(f"/api/tenants/{tenant_id}/fleet/summary") + assert resp.status_code == 401 + + +class TestWirelessMetrics: + """Wireless metrics endpoints.""" + + async def test_wireless_metrics_empty( + self, + client, + auth_headers_factory, + admin_session, + create_test_device, + create_test_tenant, + ): + """GET wireless metrics for device with no data returns 200 + empty list.""" + tenant = await create_test_tenant(admin_session) + auth = await auth_headers_factory( + admin_session, existing_tenant_id=tenant.id + ) + tenant_id = auth["tenant_id"] + device = await create_test_device(admin_session, tenant.id) + await admin_session.commit() + + now = datetime.now(timezone.utc) + resp = await client.get( + f"/api/tenants/{tenant_id}/devices/{device.id}/metrics/wireless", + params={ + "start": (now - timedelta(hours=1)).isoformat(), + "end": now.isoformat(), + }, + headers=auth["headers"], + ) + assert resp.status_code == 200 + assert isinstance(resp.json(), list) + + async def test_wireless_latest_empty( + self, + client, + auth_headers_factory, + admin_session, + create_test_device, + create_test_tenant, + ): + """GET wireless latest for device with no data returns 200 + empty list.""" + tenant = await create_test_tenant(admin_session) + auth = await auth_headers_factory( + admin_session, existing_tenant_id=tenant.id + ) + tenant_id = auth["tenant_id"] + device = await create_test_device(admin_session, tenant.id) + await admin_session.commit() + + resp = await client.get( + f"/api/tenants/{tenant_id}/devices/{device.id}/metrics/wireless/latest", + headers=auth["headers"], + ) + assert resp.status_code == 200 + assert isinstance(resp.json(), list) diff --git a/backend/tests/integration/test_rls_isolation.py b/backend/tests/integration/test_rls_isolation.py new file mode 100644 index 0000000..bbd1366 --- /dev/null +++ b/backend/tests/integration/test_rls_isolation.py @@ -0,0 +1,437 @@ +""" +RLS (Row Level Security) tenant isolation integration tests. + +Verifies that PostgreSQL RLS policies correctly isolate tenant data: +- Tenant A cannot see Tenant B's devices, alerts, or device groups +- Tenant A cannot insert data into Tenant B's namespace +- super_admin context sees all tenants +- API-level isolation matches DB-level isolation + +These tests commit real data to PostgreSQL and use the app_user engine +(which enforces RLS) to validate isolation. Each test creates unique +entity names to avoid collisions and cleans up via admin engine. +""" + +import uuid + +import pytest +from sqlalchemy import select, text +from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine + +from app.database import set_tenant_context +from app.models.alert import AlertRule +from app.models.device import Device, DeviceGroup +from app.models.tenant import Tenant +from app.models.user import User +from app.services.auth import hash_password + +pytestmark = pytest.mark.integration + +# Use the same test DB URLs as conftest +from tests.integration.conftest import TEST_APP_USER_DATABASE_URL, TEST_DATABASE_URL + + +# --------------------------------------------------------------------------- +# Helpers: create and commit entities, and cleanup +# --------------------------------------------------------------------------- + + +async def _admin_commit(url, callback): + """Open a fresh admin connection, run callback, commit, close.""" + engine = create_async_engine(url, echo=False) + async with engine.connect() as conn: + session = AsyncSession(bind=conn, expire_on_commit=False) + result = await callback(session) + await session.commit() + await engine.dispose() + return result + + +async def _app_query(url, tenant_id, model_class): + """Open a fresh app_user connection, set tenant context, query model, close.""" + engine = create_async_engine(url, echo=False) + async with engine.connect() as conn: + session = AsyncSession(bind=conn, expire_on_commit=False) + await set_tenant_context(session, tenant_id) + result = await session.execute(select(model_class)) + rows = result.scalars().all() + await engine.dispose() + return rows + + +async def _admin_cleanup(url, *table_names): + """Truncate specified tables via admin engine.""" + engine = create_async_engine(url, echo=False) + async with engine.connect() as conn: + for table in table_names: + await conn.execute(text(f"DELETE FROM {table}")) + await conn.commit() + await engine.dispose() + + +# --------------------------------------------------------------------------- +# Test 1: Tenant A cannot see Tenant B devices +# --------------------------------------------------------------------------- + + +async def test_tenant_a_cannot_see_tenant_b_devices(): + """Tenant A app_user session only returns Tenant A devices.""" + uid = uuid.uuid4().hex[:6] + + # Create tenants via admin + async def setup(session): + ta = Tenant(name=f"rls-dev-ta-{uid}") + tb = Tenant(name=f"rls-dev-tb-{uid}") + session.add_all([ta, tb]) + await session.flush() + + da = Device( + tenant_id=ta.id, hostname=f"rls-ra-{uid}", ip_address="10.1.1.1", + api_port=8728, api_ssl_port=8729, status="online", + ) + db = Device( + tenant_id=tb.id, hostname=f"rls-rb-{uid}", ip_address="10.1.1.2", + api_port=8728, api_ssl_port=8729, status="online", + ) + session.add_all([da, db]) + await session.flush() + return {"ta_id": str(ta.id), "tb_id": str(tb.id)} + + ids = await _admin_commit(TEST_DATABASE_URL, setup) + + try: + # Query as Tenant A + devices_a = await _app_query(TEST_APP_USER_DATABASE_URL, ids["ta_id"], Device) + assert len(devices_a) == 1 + assert devices_a[0].hostname == f"rls-ra-{uid}" + + # Query as Tenant B + devices_b = await _app_query(TEST_APP_USER_DATABASE_URL, ids["tb_id"], Device) + assert len(devices_b) == 1 + assert devices_b[0].hostname == f"rls-rb-{uid}" + finally: + await _admin_cleanup(TEST_DATABASE_URL, "devices", "tenants") + + +# --------------------------------------------------------------------------- +# Test 2: Tenant A cannot see Tenant B alerts +# --------------------------------------------------------------------------- + + +async def test_tenant_a_cannot_see_tenant_b_alerts(): + """Tenant A only sees its own alert rules.""" + uid = uuid.uuid4().hex[:6] + + async def setup(session): + ta = Tenant(name=f"rls-alrt-ta-{uid}") + tb = Tenant(name=f"rls-alrt-tb-{uid}") + session.add_all([ta, tb]) + await session.flush() + + ra = AlertRule( + tenant_id=ta.id, name=f"CPU Alert A {uid}", + metric="cpu_load", operator=">", threshold=90.0, severity="warning", + ) + rb = AlertRule( + tenant_id=tb.id, name=f"CPU Alert B {uid}", + metric="cpu_load", operator=">", threshold=85.0, severity="critical", + ) + session.add_all([ra, rb]) + await session.flush() + return {"ta_id": str(ta.id), "tb_id": str(tb.id)} + + ids = await _admin_commit(TEST_DATABASE_URL, setup) + + try: + rules_a = await _app_query(TEST_APP_USER_DATABASE_URL, ids["ta_id"], AlertRule) + assert len(rules_a) == 1 + assert rules_a[0].name == f"CPU Alert A {uid}" + finally: + await _admin_cleanup(TEST_DATABASE_URL, "alert_rules", "tenants") + + +# --------------------------------------------------------------------------- +# Test 3: Tenant A cannot see Tenant B device groups +# --------------------------------------------------------------------------- + + +async def test_tenant_a_cannot_see_tenant_b_device_groups(): + """Tenant A only sees its own device groups.""" + uid = uuid.uuid4().hex[:6] + + async def setup(session): + ta = Tenant(name=f"rls-grp-ta-{uid}") + tb = Tenant(name=f"rls-grp-tb-{uid}") + session.add_all([ta, tb]) + await session.flush() + + ga = DeviceGroup(tenant_id=ta.id, name=f"Group A {uid}") + gb = DeviceGroup(tenant_id=tb.id, name=f"Group B {uid}") + session.add_all([ga, gb]) + await session.flush() + return {"ta_id": str(ta.id), "tb_id": str(tb.id)} + + ids = await _admin_commit(TEST_DATABASE_URL, setup) + + try: + groups_a = await _app_query(TEST_APP_USER_DATABASE_URL, ids["ta_id"], DeviceGroup) + assert len(groups_a) == 1 + assert groups_a[0].name == f"Group A {uid}" + finally: + await _admin_cleanup(TEST_DATABASE_URL, "device_groups", "tenants") + + +# --------------------------------------------------------------------------- +# Test 4: Tenant A cannot insert device into Tenant B +# --------------------------------------------------------------------------- + + +async def test_tenant_a_cannot_insert_device_into_tenant_b(): + """Inserting a device with tenant_b's ID while in tenant_a context should fail or be invisible.""" + uid = uuid.uuid4().hex[:6] + + async def setup(session): + ta = Tenant(name=f"rls-ins-ta-{uid}") + tb = Tenant(name=f"rls-ins-tb-{uid}") + session.add_all([ta, tb]) + await session.flush() + return {"ta_id": str(ta.id), "tb_id": str(tb.id)} + + ids = await _admin_commit(TEST_DATABASE_URL, setup) + + try: + engine = create_async_engine(TEST_APP_USER_DATABASE_URL, echo=False) + async with engine.connect() as conn: + session = AsyncSession(bind=conn, expire_on_commit=False) + await set_tenant_context(session, ids["ta_id"]) + + # Attempt to insert a device with tenant_b's tenant_id + device = Device( + tenant_id=uuid.UUID(ids["tb_id"]), + hostname=f"evil-device-{uid}", + ip_address="10.99.99.99", + api_port=8728, + api_ssl_port=8729, + status="online", + ) + session.add(device) + + # RLS policy should prevent this -- either by raising an error + # or by making the row invisible after insert + try: + await session.flush() + # If the insert succeeded, verify the device is NOT visible + result = await session.execute(select(Device)) + visible = result.scalars().all() + cross_tenant = [d for d in visible if d.hostname == f"evil-device-{uid}"] + assert len(cross_tenant) == 0, ( + "Cross-tenant device should not be visible to tenant_a" + ) + except Exception: + # RLS violation raised -- this is the expected behavior + pass + await engine.dispose() + finally: + await _admin_cleanup(TEST_DATABASE_URL, "devices", "tenants") + + +# --------------------------------------------------------------------------- +# Test 5: super_admin sees all tenants +# --------------------------------------------------------------------------- + + +async def test_super_admin_sees_all_tenants(): + """super_admin bypasses RLS via admin engine (superuser) and sees all devices. + + The RLS policy does NOT have a special 'super_admin' tenant context. + Instead, super_admin users use the admin engine (PostgreSQL superuser) + which bypasses all RLS policies entirely. + """ + uid = uuid.uuid4().hex[:6] + + async def setup(session): + ta = Tenant(name=f"rls-sa-ta-{uid}") + tb = Tenant(name=f"rls-sa-tb-{uid}") + session.add_all([ta, tb]) + await session.flush() + + da = Device( + tenant_id=ta.id, hostname=f"sa-ra-{uid}", ip_address="10.2.1.1", + api_port=8728, api_ssl_port=8729, status="online", + ) + db = Device( + tenant_id=tb.id, hostname=f"sa-rb-{uid}", ip_address="10.2.1.2", + api_port=8728, api_ssl_port=8729, status="online", + ) + session.add_all([da, db]) + await session.flush() + return {"ta_id": str(ta.id), "tb_id": str(tb.id)} + + ids = await _admin_commit(TEST_DATABASE_URL, setup) + + try: + # super_admin uses admin engine (superuser) which bypasses RLS + engine = create_async_engine(TEST_DATABASE_URL, echo=False) + async with engine.connect() as conn: + session = AsyncSession(bind=conn, expire_on_commit=False) + result = await session.execute(select(Device)) + devices = result.scalars().all() + await engine.dispose() + + # Admin engine (superuser) should see devices from both tenants + hostnames = {d.hostname for d in devices} + assert f"sa-ra-{uid}" in hostnames, "admin engine should see tenant_a device" + assert f"sa-rb-{uid}" in hostnames, "admin engine should see tenant_b device" + + # Verify that app_user engine with a specific tenant only sees that tenant + devices_a = await _app_query(TEST_APP_USER_DATABASE_URL, ids["ta_id"], Device) + hostnames_a = {d.hostname for d in devices_a} + assert f"sa-ra-{uid}" in hostnames_a + assert f"sa-rb-{uid}" not in hostnames_a + finally: + await _admin_cleanup(TEST_DATABASE_URL, "devices", "tenants") + + +# --------------------------------------------------------------------------- +# Test 6: API-level RLS isolation (devices endpoint) +# --------------------------------------------------------------------------- + + +async def test_api_rls_isolation_devices_endpoint(client, admin_engine): + """Each user only sees their own tenant's devices via the API.""" + uid = uuid.uuid4().hex[:6] + + # Create data via admin engine (committed for API visibility) + async def setup(session): + ta = Tenant(name=f"api-rls-ta-{uid}") + tb = Tenant(name=f"api-rls-tb-{uid}") + session.add_all([ta, tb]) + await session.flush() + + ua = User( + email=f"api-ua-{uid}@example.com", + hashed_password=hash_password("TestPass123!"), + name="User A", role="tenant_admin", + tenant_id=ta.id, is_active=True, + ) + ub = User( + email=f"api-ub-{uid}@example.com", + hashed_password=hash_password("TestPass123!"), + name="User B", role="tenant_admin", + tenant_id=tb.id, is_active=True, + ) + session.add_all([ua, ub]) + await session.flush() + + da = Device( + tenant_id=ta.id, hostname=f"api-ra-{uid}", ip_address="10.3.1.1", + api_port=8728, api_ssl_port=8729, status="online", + ) + db = Device( + tenant_id=tb.id, hostname=f"api-rb-{uid}", ip_address="10.3.1.2", + api_port=8728, api_ssl_port=8729, status="online", + ) + session.add_all([da, db]) + await session.flush() + return { + "ta_id": str(ta.id), "tb_id": str(tb.id), + "ua_email": ua.email, "ub_email": ub.email, + } + + ids = await _admin_commit(TEST_DATABASE_URL, setup) + + try: + # Login as user A + login_a = await client.post( + "/api/auth/login", + json={"email": ids["ua_email"], "password": "TestPass123!"}, + ) + assert login_a.status_code == 200, f"Login A failed: {login_a.text}" + token_a = login_a.json()["access_token"] + + # Login as user B + login_b = await client.post( + "/api/auth/login", + json={"email": ids["ub_email"], "password": "TestPass123!"}, + ) + assert login_b.status_code == 200, f"Login B failed: {login_b.text}" + token_b = login_b.json()["access_token"] + + # User A lists devices for tenant A + resp_a = await client.get( + f"/api/tenants/{ids['ta_id']}/devices", + headers={"Authorization": f"Bearer {token_a}"}, + ) + assert resp_a.status_code == 200 + hostnames_a = [d["hostname"] for d in resp_a.json()["items"]] + assert f"api-ra-{uid}" in hostnames_a + assert f"api-rb-{uid}" not in hostnames_a + + # User B lists devices for tenant B + resp_b = await client.get( + f"/api/tenants/{ids['tb_id']}/devices", + headers={"Authorization": f"Bearer {token_b}"}, + ) + assert resp_b.status_code == 200 + hostnames_b = [d["hostname"] for d in resp_b.json()["items"]] + assert f"api-rb-{uid}" in hostnames_b + assert f"api-ra-{uid}" not in hostnames_b + finally: + await _admin_cleanup(TEST_DATABASE_URL, "devices", "users", "tenants") + + +# --------------------------------------------------------------------------- +# Test 7: API-level cross-tenant device access +# --------------------------------------------------------------------------- + + +async def test_api_rls_isolation_cross_tenant_device_access(client, admin_engine): + """Accessing another tenant's endpoint returns 403 (tenant access check).""" + uid = uuid.uuid4().hex[:6] + + async def setup(session): + ta = Tenant(name=f"api-xt-ta-{uid}") + tb = Tenant(name=f"api-xt-tb-{uid}") + session.add_all([ta, tb]) + await session.flush() + + ua = User( + email=f"api-xt-ua-{uid}@example.com", + hashed_password=hash_password("TestPass123!"), + name="User A", role="tenant_admin", + tenant_id=ta.id, is_active=True, + ) + session.add(ua) + await session.flush() + + db = Device( + tenant_id=tb.id, hostname=f"api-xt-rb-{uid}", ip_address="10.4.1.1", + api_port=8728, api_ssl_port=8729, status="online", + ) + session.add(db) + await session.flush() + return { + "ta_id": str(ta.id), "tb_id": str(tb.id), + "ua_email": ua.email, "db_id": str(db.id), + } + + ids = await _admin_commit(TEST_DATABASE_URL, setup) + + try: + # Login as user A + login_a = await client.post( + "/api/auth/login", + json={"email": ids["ua_email"], "password": "TestPass123!"}, + ) + assert login_a.status_code == 200 + token_a = login_a.json()["access_token"] + + # User A tries to access tenant B's devices endpoint + resp = await client.get( + f"/api/tenants/{ids['tb_id']}/devices", + headers={"Authorization": f"Bearer {token_a}"}, + ) + # Should be 403 -- tenant access check prevents cross-tenant access + assert resp.status_code == 403 + finally: + await _admin_cleanup(TEST_DATABASE_URL, "devices", "users", "tenants") diff --git a/backend/tests/integration/test_templates_api.py b/backend/tests/integration/test_templates_api.py new file mode 100644 index 0000000..1d1a378 --- /dev/null +++ b/backend/tests/integration/test_templates_api.py @@ -0,0 +1,322 @@ +""" +Integration tests for the Config Templates API endpoints. + +Tests exercise: +- GET /api/tenants/{tenant_id}/templates -- list templates +- POST /api/tenants/{tenant_id}/templates -- create template +- GET /api/tenants/{tenant_id}/templates/{id} -- get template +- PUT /api/tenants/{tenant_id}/templates/{id} -- update template +- DELETE /api/tenants/{tenant_id}/templates/{id} -- delete template +- POST /api/tenants/{tenant_id}/templates/{id}/preview -- preview rendered template + +Push endpoints (POST .../push) require actual RouterOS connections, so we +only test the preview endpoint which only needs a database device record. + +All tests run against real PostgreSQL. +""" + +import uuid + +import pytest + +pytestmark = pytest.mark.integration + +TEMPLATE_CONTENT = """/ip address add address={{ ip_address }}/24 interface=ether1 +/system identity set name={{ hostname }} +""" + +TEMPLATE_VARIABLES = [ + {"name": "ip_address", "type": "ip", "default": "192.168.1.1"}, + {"name": "hostname", "type": "string", "default": "router"}, +] + + +class TestTemplatesCRUD: + """Template list, create, get, update, delete endpoints.""" + + async def test_list_templates_empty( + self, + client, + auth_headers_factory, + admin_session, + ): + """GET /api/tenants/{tenant_id}/templates returns 200 with empty list.""" + auth = await auth_headers_factory(admin_session) + tenant_id = auth["tenant_id"] + + resp = await client.get( + f"/api/tenants/{tenant_id}/templates", + headers=auth["headers"], + ) + assert resp.status_code == 200 + data = resp.json() + assert isinstance(data, list) + + async def test_create_template( + self, + client, + auth_headers_factory, + admin_session, + ): + """POST /api/tenants/{tenant_id}/templates creates a template.""" + auth = await auth_headers_factory(admin_session, role="operator") + tenant_id = auth["tenant_id"] + + template_data = { + "name": f"Test Template {uuid.uuid4().hex[:6]}", + "description": "A test config template", + "content": TEMPLATE_CONTENT, + "variables": TEMPLATE_VARIABLES, + "tags": ["test", "integration"], + } + + resp = await client.post( + f"/api/tenants/{tenant_id}/templates", + json=template_data, + headers=auth["headers"], + ) + assert resp.status_code == 201 + data = resp.json() + assert data["name"] == template_data["name"] + assert data["description"] == "A test config template" + assert "id" in data + assert "content" in data + assert data["content"] == TEMPLATE_CONTENT + assert data["variable_count"] == 2 + assert set(data["tags"]) == {"test", "integration"} + + async def test_get_template( + self, + client, + auth_headers_factory, + admin_session, + ): + """GET /api/tenants/{tenant_id}/templates/{id} returns full template with content.""" + auth = await auth_headers_factory(admin_session, role="operator") + tenant_id = auth["tenant_id"] + + # Create first + create_data = { + "name": f"Get Test {uuid.uuid4().hex[:6]}", + "content": TEMPLATE_CONTENT, + "variables": TEMPLATE_VARIABLES, + "tags": [], + } + create_resp = await client.post( + f"/api/tenants/{tenant_id}/templates", + json=create_data, + headers=auth["headers"], + ) + assert create_resp.status_code == 201 + template_id = create_resp.json()["id"] + + # Get it + resp = await client.get( + f"/api/tenants/{tenant_id}/templates/{template_id}", + headers=auth["headers"], + ) + assert resp.status_code == 200 + data = resp.json() + assert data["id"] == template_id + assert data["content"] == TEMPLATE_CONTENT + assert "variables" in data + assert len(data["variables"]) == 2 + + async def test_update_template( + self, + client, + auth_headers_factory, + admin_session, + ): + """PUT /api/tenants/{tenant_id}/templates/{id} updates template content.""" + auth = await auth_headers_factory(admin_session, role="operator") + tenant_id = auth["tenant_id"] + + # Create first + create_data = { + "name": f"Update Test {uuid.uuid4().hex[:6]}", + "content": TEMPLATE_CONTENT, + "variables": TEMPLATE_VARIABLES, + "tags": ["original"], + } + create_resp = await client.post( + f"/api/tenants/{tenant_id}/templates", + json=create_data, + headers=auth["headers"], + ) + assert create_resp.status_code == 201 + template_id = create_resp.json()["id"] + + # Update it + updated_content = "/system identity set name={{ hostname }}-updated\n" + update_data = { + "name": create_data["name"], + "content": updated_content, + "variables": [{"name": "hostname", "type": "string"}], + "tags": ["updated"], + } + resp = await client.put( + f"/api/tenants/{tenant_id}/templates/{template_id}", + json=update_data, + headers=auth["headers"], + ) + assert resp.status_code == 200 + data = resp.json() + assert data["content"] == updated_content + assert data["variable_count"] == 1 + assert "updated" in data["tags"] + + async def test_delete_template( + self, + client, + auth_headers_factory, + admin_session, + ): + """DELETE /api/tenants/{tenant_id}/templates/{id} removes the template.""" + auth = await auth_headers_factory(admin_session, role="operator") + tenant_id = auth["tenant_id"] + + # Create first + create_data = { + "name": f"Delete Test {uuid.uuid4().hex[:6]}", + "content": "/system identity set name=test\n", + "variables": [], + "tags": [], + } + create_resp = await client.post( + f"/api/tenants/{tenant_id}/templates", + json=create_data, + headers=auth["headers"], + ) + assert create_resp.status_code == 201 + template_id = create_resp.json()["id"] + + # Delete it + resp = await client.delete( + f"/api/tenants/{tenant_id}/templates/{template_id}", + headers=auth["headers"], + ) + assert resp.status_code == 204 + + # Verify it's gone + get_resp = await client.get( + f"/api/tenants/{tenant_id}/templates/{template_id}", + headers=auth["headers"], + ) + assert get_resp.status_code == 404 + + async def test_get_template_not_found( + self, + client, + auth_headers_factory, + admin_session, + ): + """GET non-existent template returns 404.""" + auth = await auth_headers_factory(admin_session) + tenant_id = auth["tenant_id"] + fake_id = str(uuid.uuid4()) + + resp = await client.get( + f"/api/tenants/{tenant_id}/templates/{fake_id}", + headers=auth["headers"], + ) + assert resp.status_code == 404 + + +class TestTemplatePreview: + """Template preview endpoint.""" + + async def test_template_preview( + self, + client, + auth_headers_factory, + admin_session, + create_test_device, + create_test_tenant, + ): + """POST /api/tenants/{tenant_id}/templates/{id}/preview renders template for device.""" + tenant = await create_test_tenant(admin_session) + auth = await auth_headers_factory( + admin_session, existing_tenant_id=tenant.id, role="operator" + ) + tenant_id = auth["tenant_id"] + + # Create device for preview context + device = await create_test_device( + admin_session, tenant.id, hostname="preview-router", ip_address="10.0.1.1" + ) + await admin_session.commit() + + # Create template + template_data = { + "name": f"Preview Test {uuid.uuid4().hex[:6]}", + "content": "/system identity set name={{ hostname }}\n", + "variables": [], + "tags": [], + } + create_resp = await client.post( + f"/api/tenants/{tenant_id}/templates", + json=template_data, + headers=auth["headers"], + ) + assert create_resp.status_code == 201 + template_id = create_resp.json()["id"] + + # Preview it + preview_resp = await client.post( + f"/api/tenants/{tenant_id}/templates/{template_id}/preview", + json={"device_id": str(device.id), "variables": {}}, + headers=auth["headers"], + ) + assert preview_resp.status_code == 200 + data = preview_resp.json() + assert "rendered" in data + assert "preview-router" in data["rendered"] + assert data["device_hostname"] == "preview-router" + + async def test_template_preview_with_variables( + self, + client, + auth_headers_factory, + admin_session, + create_test_device, + create_test_tenant, + ): + """Preview with custom variables renders them into the template.""" + tenant = await create_test_tenant(admin_session) + auth = await auth_headers_factory( + admin_session, existing_tenant_id=tenant.id, role="operator" + ) + tenant_id = auth["tenant_id"] + + device = await create_test_device(admin_session, tenant.id) + await admin_session.commit() + + template_data = { + "name": f"VarPreview {uuid.uuid4().hex[:6]}", + "content": "/ip address add address={{ custom_ip }}/24 interface=ether1\n", + "variables": [{"name": "custom_ip", "type": "ip", "default": "192.168.1.1"}], + "tags": [], + } + create_resp = await client.post( + f"/api/tenants/{tenant_id}/templates", + json=template_data, + headers=auth["headers"], + ) + assert create_resp.status_code == 201 + template_id = create_resp.json()["id"] + + preview_resp = await client.post( + f"/api/tenants/{tenant_id}/templates/{template_id}/preview", + json={"device_id": str(device.id), "variables": {"custom_ip": "10.10.10.1"}}, + headers=auth["headers"], + ) + assert preview_resp.status_code == 200 + data = preview_resp.json() + assert "10.10.10.1" in data["rendered"] + + async def test_templates_unauthenticated(self, client): + """GET templates without auth returns 401.""" + tenant_id = str(uuid.uuid4()) + resp = await client.get(f"/api/tenants/{tenant_id}/templates") + assert resp.status_code == 401 diff --git a/backend/tests/test_backup_scheduler.py b/backend/tests/test_backup_scheduler.py new file mode 100644 index 0000000..1f278ba --- /dev/null +++ b/backend/tests/test_backup_scheduler.py @@ -0,0 +1,42 @@ +"""Tests for dynamic backup scheduling.""" + +import pytest +from unittest.mock import AsyncMock, patch, MagicMock + +from app.services.backup_scheduler import ( + build_schedule_map, + _cron_to_trigger, +) + + +def test_cron_to_trigger_parses_standard_cron(): + """Parse '0 2 * * *' into CronTrigger with hour=2, minute=0.""" + trigger = _cron_to_trigger("0 2 * * *") + assert trigger is not None + + +def test_cron_to_trigger_parses_every_6_hours(): + """Parse '0 */6 * * *' into CronTrigger.""" + trigger = _cron_to_trigger("0 */6 * * *") + assert trigger is not None + + +def test_cron_to_trigger_invalid_returns_none(): + """Invalid cron returns None (fallback to default).""" + trigger = _cron_to_trigger("not a cron") + assert trigger is None + + +@pytest.mark.asyncio +async def test_build_schedule_map_groups_by_cron(): + """Devices sharing a cron expression should be grouped together.""" + schedules = [ + MagicMock(device_id="dev1", tenant_id="t1", cron_expression="0 2 * * *", enabled=True), + MagicMock(device_id="dev2", tenant_id="t1", cron_expression="0 2 * * *", enabled=True), + MagicMock(device_id="dev3", tenant_id="t2", cron_expression="0 6 * * *", enabled=True), + ] + schedule_map = build_schedule_map(schedules) + assert "0 2 * * *" in schedule_map + assert "0 6 * * *" in schedule_map + assert len(schedule_map["0 2 * * *"]) == 2 + assert len(schedule_map["0 6 * * *"]) == 1 diff --git a/backend/tests/test_config_change_subscriber.py b/backend/tests/test_config_change_subscriber.py new file mode 100644 index 0000000..50168bc --- /dev/null +++ b/backend/tests/test_config_change_subscriber.py @@ -0,0 +1,55 @@ +"""Tests for config change NATS subscriber.""" + +import pytest +from datetime import datetime, timedelta, timezone +from unittest.mock import AsyncMock, patch, MagicMock +from uuid import uuid4 + +from app.services.config_change_subscriber import handle_config_changed + + +@pytest.mark.asyncio +async def test_triggers_backup_on_config_change(): + """Config change event should trigger a backup.""" + event = { + "device_id": str(uuid4()), + "tenant_id": str(uuid4()), + "old_timestamp": "2026-03-07 11:00:00", + "new_timestamp": "2026-03-07 12:00:00", + } + + with patch( + "app.services.config_change_subscriber.backup_service.run_backup", + new_callable=AsyncMock, + ) as mock_backup, patch( + "app.services.config_change_subscriber._last_backup_within_dedup_window", + new_callable=AsyncMock, + return_value=False, + ): + await handle_config_changed(event) + + mock_backup.assert_called_once() + assert mock_backup.call_args[1]["trigger_type"] == "config-change" + + +@pytest.mark.asyncio +async def test_skips_backup_within_dedup_window(): + """Should skip backup if last backup was < 5 minutes ago.""" + event = { + "device_id": str(uuid4()), + "tenant_id": str(uuid4()), + "old_timestamp": "2026-03-07 11:00:00", + "new_timestamp": "2026-03-07 12:00:00", + } + + with patch( + "app.services.config_change_subscriber.backup_service.run_backup", + new_callable=AsyncMock, + ) as mock_backup, patch( + "app.services.config_change_subscriber._last_backup_within_dedup_window", + new_callable=AsyncMock, + return_value=True, + ): + await handle_config_changed(event) + + mock_backup.assert_not_called() diff --git a/backend/tests/test_config_checkpoint.py b/backend/tests/test_config_checkpoint.py new file mode 100644 index 0000000..31e6a2d --- /dev/null +++ b/backend/tests/test_config_checkpoint.py @@ -0,0 +1,82 @@ +"""Tests for config checkpoint endpoint.""" + +import uuid +from unittest.mock import AsyncMock, patch, MagicMock + +import pytest + + +class TestCheckpointEndpointExists: + """Verify the checkpoint route is registered on the config_backups router.""" + + def test_router_has_checkpoint_route(self): + from app.routers.config_backups import router + + paths = [r.path for r in router.routes] + assert any("checkpoint" in p for p in paths), ( + f"No checkpoint route found. Routes: {paths}" + ) + + def test_checkpoint_route_is_post(self): + from app.routers.config_backups import router + + for route in router.routes: + if hasattr(route, "path") and "checkpoint" in route.path: + assert "POST" in route.methods, ( + f"Checkpoint route should be POST, got {route.methods}" + ) + break + else: + pytest.fail("No checkpoint route found") + + +class TestCheckpointFunction: + """Test the create_checkpoint handler logic.""" + + @pytest.mark.asyncio + async def test_checkpoint_calls_backup_service_with_checkpoint_trigger(self): + """create_checkpoint should call backup_service.run_backup with trigger_type='checkpoint'.""" + from app.routers.config_backups import create_checkpoint + + mock_result = { + "commit_sha": "abc1234", + "trigger_type": "checkpoint", + "lines_added": 100, + "lines_removed": 0, + } + + mock_db = AsyncMock() + mock_user = MagicMock() + + tenant_id = uuid.uuid4() + device_id = uuid.uuid4() + + mock_request = MagicMock() + + with patch( + "app.routers.config_backups.backup_service.run_backup", + new_callable=AsyncMock, + return_value=mock_result, + ) as mock_backup, patch( + "app.routers.config_backups._check_tenant_access", + new_callable=AsyncMock, + ), patch( + "app.routers.config_backups.limiter.enabled", + False, + ): + result = await create_checkpoint( + request=mock_request, + tenant_id=tenant_id, + device_id=device_id, + db=mock_db, + current_user=mock_user, + ) + + assert result["trigger_type"] == "checkpoint" + assert result["commit_sha"] == "abc1234" + mock_backup.assert_called_once_with( + device_id=str(device_id), + tenant_id=str(tenant_id), + trigger_type="checkpoint", + db_session=mock_db, + ) diff --git a/backend/tests/test_push_recovery.py b/backend/tests/test_push_recovery.py new file mode 100644 index 0000000..62aad3e --- /dev/null +++ b/backend/tests/test_push_recovery.py @@ -0,0 +1,120 @@ +"""Tests for stale push operation recovery on API startup.""" + +import pytest +from datetime import datetime, timedelta, timezone +from unittest.mock import AsyncMock, patch, MagicMock +from uuid import uuid4 + +from app.services.restore_service import recover_stale_push_operations + + +@pytest.mark.asyncio +async def test_recovery_commits_reachable_device_with_scheduler(): + """If device is reachable and panic-revert scheduler exists, delete it and commit.""" + push_op = MagicMock() + push_op.id = uuid4() + push_op.device_id = uuid4() + push_op.tenant_id = uuid4() + push_op.status = "pending_verification" + push_op.scheduler_name = "mikrotik-portal-panic-revert" + push_op.started_at = datetime.now(timezone.utc) - timedelta(minutes=10) + + device = MagicMock() + device.ip_address = "192.168.1.1" + device.api_port = 8729 + device.ssh_port = 22 + + mock_session = AsyncMock() + # Return stale ops query + mock_result = MagicMock() + mock_result.scalars.return_value.all.return_value = [push_op] + mock_session.execute = AsyncMock(side_effect=[mock_result, MagicMock()]) + + # Mock device query result (second execute call) + dev_result = MagicMock() + dev_result.scalar_one_or_none.return_value = device + mock_session.execute = AsyncMock(side_effect=[mock_result, dev_result]) + + with patch( + "app.services.restore_service._check_reachability", + new_callable=AsyncMock, + return_value=True, + ), patch( + "app.services.restore_service._remove_panic_scheduler", + new_callable=AsyncMock, + return_value=True, + ), patch( + "app.services.restore_service._update_push_op_status", + new_callable=AsyncMock, + ) as mock_update, patch( + "app.services.restore_service._publish_push_progress", + new_callable=AsyncMock, + ), patch( + "app.services.crypto.decrypt_credentials_hybrid", + new_callable=AsyncMock, + return_value='{"username": "admin", "password": "test123"}', + ), patch( + "app.services.restore_service.settings", + ): + await recover_stale_push_operations(mock_session) + + mock_update.assert_called_once() + call_args = mock_update.call_args + assert call_args[0][1] == "committed" or call_args[1].get("new_status") == "committed" + + +@pytest.mark.asyncio +async def test_recovery_marks_unreachable_device_failed(): + """If device is unreachable, mark operation as failed.""" + push_op = MagicMock() + push_op.id = uuid4() + push_op.device_id = uuid4() + push_op.tenant_id = uuid4() + push_op.status = "pending_verification" + push_op.scheduler_name = "mikrotik-portal-panic-revert" + push_op.started_at = datetime.now(timezone.utc) - timedelta(minutes=10) + + device = MagicMock() + device.ip_address = "192.168.1.1" + + mock_session = AsyncMock() + mock_result = MagicMock() + mock_result.scalars.return_value.all.return_value = [push_op] + dev_result = MagicMock() + dev_result.scalar_one_or_none.return_value = device + mock_session.execute = AsyncMock(side_effect=[mock_result, dev_result]) + + with patch( + "app.services.restore_service._check_reachability", + new_callable=AsyncMock, + return_value=False, + ), patch( + "app.services.restore_service._update_push_op_status", + new_callable=AsyncMock, + ) as mock_update, patch( + "app.services.restore_service._publish_push_progress", + new_callable=AsyncMock, + ), patch( + "app.services.crypto.decrypt_credentials_hybrid", + new_callable=AsyncMock, + return_value='{"username": "admin", "password": "test123"}', + ), patch( + "app.services.restore_service.settings", + ): + await recover_stale_push_operations(mock_session) + + mock_update.assert_called_once() + call_args = mock_update.call_args + assert call_args[0][1] == "failed" or call_args[1].get("new_status") == "failed" + + +@pytest.mark.asyncio +async def test_recovery_skips_recent_ops(): + """Operations less than 5 minutes old should not be recovered (still in progress).""" + mock_session = AsyncMock() + mock_result = MagicMock() + mock_result.scalars.return_value.all.return_value = [] # Query filters by age + mock_session.execute = AsyncMock(return_value=mock_result) + + await recover_stale_push_operations(mock_session) + # No errors, no updates — just returns cleanly diff --git a/backend/tests/test_push_rollback_subscriber.py b/backend/tests/test_push_rollback_subscriber.py new file mode 100644 index 0000000..e517c83 --- /dev/null +++ b/backend/tests/test_push_rollback_subscriber.py @@ -0,0 +1,156 @@ +"""Tests for push rollback NATS subscriber.""" + +import pytest +from unittest.mock import AsyncMock, patch, MagicMock +from uuid import uuid4 + +from app.services.push_rollback_subscriber import ( + handle_push_rollback, + handle_push_alert, +) + + +@pytest.mark.asyncio +async def test_rollback_triggers_restore(): + """Push rollback should call restore_config with the pre-push commit SHA.""" + event = { + "device_id": str(uuid4()), + "tenant_id": str(uuid4()), + "push_operation_id": str(uuid4()), + "pre_push_commit_sha": "abc1234", + } + + mock_session = AsyncMock() + mock_cm = AsyncMock() + mock_cm.__aenter__ = AsyncMock(return_value=mock_session) + mock_cm.__aexit__ = AsyncMock(return_value=False) + + with ( + patch( + "app.services.push_rollback_subscriber.restore_service.restore_config", + new_callable=AsyncMock, + return_value={"status": "committed"}, + ) as mock_restore, + patch( + "app.services.push_rollback_subscriber.AdminAsyncSessionLocal", + return_value=mock_cm, + ), + ): + await handle_push_rollback(event) + + mock_restore.assert_called_once() + call_kwargs = mock_restore.call_args[1] + assert call_kwargs["device_id"] == event["device_id"] + assert call_kwargs["tenant_id"] == event["tenant_id"] + assert call_kwargs["commit_sha"] == "abc1234" + assert call_kwargs["db_session"] is mock_session + + +@pytest.mark.asyncio +async def test_rollback_missing_fields_skips(): + """Rollback with missing fields should log warning and return.""" + event = {"device_id": str(uuid4())} # missing tenant_id and commit_sha + + with patch( + "app.services.push_rollback_subscriber.restore_service.restore_config", + new_callable=AsyncMock, + ) as mock_restore: + await handle_push_rollback(event) + + mock_restore.assert_not_called() + + +@pytest.mark.asyncio +async def test_rollback_failure_creates_alert(): + """When restore_config raises, an alert should be created.""" + event = { + "device_id": str(uuid4()), + "tenant_id": str(uuid4()), + "pre_push_commit_sha": "abc1234", + } + + mock_session = AsyncMock() + mock_cm = AsyncMock() + mock_cm.__aenter__ = AsyncMock(return_value=mock_session) + mock_cm.__aexit__ = AsyncMock(return_value=False) + + with ( + patch( + "app.services.push_rollback_subscriber.restore_service.restore_config", + new_callable=AsyncMock, + side_effect=RuntimeError("SSH failed"), + ), + patch( + "app.services.push_rollback_subscriber.AdminAsyncSessionLocal", + return_value=mock_cm, + ), + patch( + "app.services.push_rollback_subscriber._create_push_alert", + new_callable=AsyncMock, + ) as mock_alert, + ): + await handle_push_rollback(event) + + mock_alert.assert_called_once_with( + event["device_id"], + event["tenant_id"], + "template (auto-rollback failed)", + ) + + +@pytest.mark.asyncio +async def test_alert_creates_alert_record(): + """Editor push alert should create a high-priority alert.""" + event = { + "device_id": str(uuid4()), + "tenant_id": str(uuid4()), + "push_type": "editor", + } + + with patch( + "app.services.push_rollback_subscriber._create_push_alert", + new_callable=AsyncMock, + ) as mock_alert: + await handle_push_alert(event) + + mock_alert.assert_called_once_with( + event["device_id"], + event["tenant_id"], + "editor", + ) + + +@pytest.mark.asyncio +async def test_alert_missing_fields_skips(): + """Alert with missing fields should skip.""" + event = {"device_id": str(uuid4())} # missing tenant_id + + with patch( + "app.services.push_rollback_subscriber._create_push_alert", + new_callable=AsyncMock, + ) as mock_alert: + await handle_push_alert(event) + + mock_alert.assert_not_called() + + +@pytest.mark.asyncio +async def test_alert_defaults_to_editor_push_type(): + """Alert without push_type should default to 'editor'.""" + event = { + "device_id": str(uuid4()), + "tenant_id": str(uuid4()), + # no push_type + } + + with patch( + "app.services.push_rollback_subscriber._create_push_alert", + new_callable=AsyncMock, + ) as mock_alert: + await handle_push_alert(event) + + mock_alert.assert_called_once_with( + event["device_id"], + event["tenant_id"], + "editor", + ) diff --git a/backend/tests/test_restore_preview.py b/backend/tests/test_restore_preview.py new file mode 100644 index 0000000..8cfa0f7 --- /dev/null +++ b/backend/tests/test_restore_preview.py @@ -0,0 +1,211 @@ +"""Tests for the preview-restore endpoint.""" + +import uuid +from unittest.mock import AsyncMock, patch, MagicMock + +import pytest + + +class TestPreviewRestoreEndpointExists: + """Verify the preview-restore route is registered on the config_backups router.""" + + def test_router_has_preview_restore_route(self): + from app.routers.config_backups import router + + paths = [r.path for r in router.routes] + assert any("preview-restore" in p for p in paths), ( + f"No preview-restore route found. Routes: {paths}" + ) + + def test_preview_restore_route_is_post(self): + from app.routers.config_backups import router + + for route in router.routes: + if hasattr(route, "path") and "preview-restore" in route.path: + assert "POST" in route.methods, ( + f"preview-restore route should be POST, got {route.methods}" + ) + break + else: + pytest.fail("No preview-restore route found") + + +class TestPreviewRestoreFunction: + """Test the preview_restore handler logic.""" + + @pytest.mark.asyncio + async def test_preview_returns_impact_analysis(self): + """preview_restore should return diff, categories, warnings, validation.""" + from app.routers.config_backups import preview_restore, RestoreRequest + + tenant_id = uuid.uuid4() + device_id = uuid.uuid4() + + current_export = "/ip address\nadd address=192.168.1.1/24 interface=ether1\n" + target_export = "/ip address\nadd address=10.0.0.1/24 interface=ether1\n" + + mock_db = AsyncMock() + mock_user = MagicMock() + mock_request = MagicMock() + body = RestoreRequest(commit_sha="abc1234") + + # Mock device query result + mock_device = MagicMock() + mock_device.ip_address = "192.168.88.1" + mock_device.encrypted_credentials_transit = "vault:v1:abc" + mock_device.encrypted_credentials = None + mock_device.tenant_id = tenant_id + + mock_scalar = MagicMock() + mock_scalar.scalar_one_or_none.return_value = mock_device + mock_db.execute.return_value = mock_scalar + + with patch( + "app.routers.config_backups._check_tenant_access", + new_callable=AsyncMock, + ), patch( + "app.routers.config_backups.limiter.enabled", + False, + ), patch( + "app.routers.config_backups.git_store.read_file", + return_value=target_export.encode(), + ), patch( + "app.routers.config_backups.backup_service.capture_export", + new_callable=AsyncMock, + return_value=current_export, + ), patch( + "app.routers.config_backups.decrypt_credentials_hybrid", + new_callable=AsyncMock, + return_value='{"username": "admin", "password": "pass"}', + ), patch( + "app.routers.config_backups.settings", + ): + result = await preview_restore( + request=mock_request, + tenant_id=tenant_id, + device_id=device_id, + body=body, + db=mock_db, + current_user=mock_user, + ) + + assert "diff" in result + assert "categories" in result + assert "warnings" in result + assert "validation" in result + # Both exports have /ip address with different commands + assert isinstance(result["categories"], list) + assert isinstance(result["diff"], dict) + assert "added" in result["diff"] + assert "removed" in result["diff"] + + @pytest.mark.asyncio + async def test_preview_falls_back_to_latest_backup_when_device_unreachable(self): + """When live capture fails, preview should fall back to the latest backup.""" + from app.routers.config_backups import preview_restore, RestoreRequest + + tenant_id = uuid.uuid4() + device_id = uuid.uuid4() + + current_export = "/ip address\nadd address=192.168.1.1/24 interface=ether1\n" + target_export = "/ip address\nadd address=10.0.0.1/24 interface=ether1\n" + + mock_db = AsyncMock() + mock_user = MagicMock() + mock_request = MagicMock() + body = RestoreRequest(commit_sha="abc1234") + + # Mock device query result + mock_device = MagicMock() + mock_device.ip_address = "192.168.88.1" + mock_device.encrypted_credentials_transit = "vault:v1:abc" + mock_device.encrypted_credentials = None + mock_device.tenant_id = tenant_id + + # First call: device query, second call: latest backup query + mock_device_result = MagicMock() + mock_device_result.scalar_one_or_none.return_value = mock_device + + mock_latest_run = MagicMock() + mock_latest_run.commit_sha = "latest123" + mock_backup_result = MagicMock() + mock_backup_result.scalar_one_or_none.return_value = mock_latest_run + + mock_db.execute.side_effect = [mock_device_result, mock_backup_result] + + def mock_read_file(tid, sha, did, filename): + if sha == "abc1234": + return target_export.encode() + elif sha == "latest123": + return current_export.encode() + return b"" + + with patch( + "app.routers.config_backups._check_tenant_access", + new_callable=AsyncMock, + ), patch( + "app.routers.config_backups.limiter.enabled", + False, + ), patch( + "app.routers.config_backups.git_store.read_file", + side_effect=mock_read_file, + ), patch( + "app.routers.config_backups.backup_service.capture_export", + new_callable=AsyncMock, + side_effect=ConnectionError("Device unreachable"), + ), patch( + "app.routers.config_backups.decrypt_credentials_hybrid", + new_callable=AsyncMock, + return_value='{"username": "admin", "password": "pass"}', + ), patch( + "app.routers.config_backups.settings", + ): + result = await preview_restore( + request=mock_request, + tenant_id=tenant_id, + device_id=device_id, + body=body, + db=mock_db, + current_user=mock_user, + ) + + assert "diff" in result + assert "categories" in result + assert "warnings" in result + assert "validation" in result + + @pytest.mark.asyncio + async def test_preview_404_when_backup_not_found(self): + """preview_restore should return 404 when the target backup doesn't exist.""" + from app.routers.config_backups import preview_restore, RestoreRequest + from fastapi import HTTPException + + tenant_id = uuid.uuid4() + device_id = uuid.uuid4() + + mock_db = AsyncMock() + mock_user = MagicMock() + mock_request = MagicMock() + body = RestoreRequest(commit_sha="nonexistent") + + with patch( + "app.routers.config_backups._check_tenant_access", + new_callable=AsyncMock, + ), patch( + "app.routers.config_backups.limiter.enabled", + False, + ), patch( + "app.routers.config_backups.git_store.read_file", + side_effect=KeyError("not found"), + ): + with pytest.raises(HTTPException) as exc_info: + await preview_restore( + request=mock_request, + tenant_id=tenant_id, + device_id=device_id, + body=body, + db=mock_db, + current_user=mock_user, + ) + + assert exc_info.value.status_code == 404 diff --git a/backend/tests/test_rsc_parser.py b/backend/tests/test_rsc_parser.py new file mode 100644 index 0000000..de68ccf --- /dev/null +++ b/backend/tests/test_rsc_parser.py @@ -0,0 +1,106 @@ +"""Tests for RouterOS RSC export parser.""" + +import pytest +from app.services.rsc_parser import parse_rsc, validate_rsc, compute_impact + + +SAMPLE_EXPORT = """\ +# 2026-03-07 12:00:00 by RouterOS 7.16.2 +# software id = ABCD-1234 +# +# model = RB750Gr3 +/interface bridge +add name=bridge1 +/ip address +add address=192.168.88.1/24 interface=ether1 network=192.168.88.0 +add address=10.0.0.1/24 interface=bridge1 network=10.0.0.0 +/ip firewall filter +add action=accept chain=input comment="allow established" \\ + connection-state=established,related +add action=drop chain=input in-interface-list=WAN +/ip dns +set servers=8.8.8.8,8.8.4.4 +/system identity +set name=test-router +""" + + +class TestParseRsc: + def test_extracts_categories(self): + result = parse_rsc(SAMPLE_EXPORT) + paths = [c["path"] for c in result["categories"]] + assert "/interface bridge" in paths + assert "/ip address" in paths + assert "/ip firewall filter" in paths + assert "/ip dns" in paths + assert "/system identity" in paths + + def test_counts_commands_per_category(self): + result = parse_rsc(SAMPLE_EXPORT) + cat_map = {c["path"]: c for c in result["categories"]} + assert cat_map["/ip address"]["adds"] == 2 + assert cat_map["/ip address"]["sets"] == 0 + assert cat_map["/ip firewall filter"]["adds"] == 2 + assert cat_map["/ip dns"]["sets"] == 1 + assert cat_map["/system identity"]["sets"] == 1 + + def test_handles_continuation_lines(self): + result = parse_rsc(SAMPLE_EXPORT) + cat_map = {c["path"]: c for c in result["categories"]} + # The firewall filter has a continuation line — should still count as 2 adds + assert cat_map["/ip firewall filter"]["adds"] == 2 + + def test_ignores_comments_and_blank_lines(self): + result = parse_rsc(SAMPLE_EXPORT) + # Comments at top should not create categories + paths = [c["path"] for c in result["categories"]] + assert "#" not in paths + + def test_empty_input(self): + result = parse_rsc("") + assert result["categories"] == [] + + +class TestValidateRsc: + def test_valid_export_passes(self): + result = validate_rsc(SAMPLE_EXPORT) + assert result["valid"] is True + assert result["errors"] == [] + + def test_unbalanced_quotes_detected(self): + bad = '/system identity\nset name="missing-end-quote\n' + result = validate_rsc(bad) + assert result["valid"] is False + assert any("quote" in e.lower() for e in result["errors"]) + + def test_truncated_continuation_detected(self): + bad = '/ip address\nadd address=192.168.1.1/24 \\\n' + result = validate_rsc(bad) + assert result["valid"] is False + assert any("truncat" in e.lower() or "continuation" in e.lower() for e in result["errors"]) + + +class TestComputeImpact: + def test_high_risk_for_firewall_input(self): + current = '/ip firewall filter\nadd action=accept chain=input\n' + target = '/ip firewall filter\nadd action=drop chain=input\n' + result = compute_impact(parse_rsc(current), parse_rsc(target)) + assert any(c["risk"] == "high" for c in result["categories"]) + + def test_high_risk_for_ip_address_changes(self): + current = '/ip address\nadd address=192.168.1.1/24 interface=ether1\n' + target = '/ip address\nadd address=10.0.0.1/24 interface=ether1\n' + result = compute_impact(parse_rsc(current), parse_rsc(target)) + ip_cat = next(c for c in result["categories"] if c["path"] == "/ip address") + assert ip_cat["risk"] in ("high", "medium") + + def test_warnings_for_management_access(self): + current = "" + target = '/ip firewall filter\nadd action=drop chain=input protocol=tcp dst-port=22\n' + result = compute_impact(parse_rsc(current), parse_rsc(target)) + assert len(result["warnings"]) > 0 + + def test_no_changes_no_warnings(self): + same = '/ip dns\nset servers=8.8.8.8\n' + result = compute_impact(parse_rsc(same), parse_rsc(same)) + assert result["warnings"] == [] or all(c["risk"] == "none" for c in result["categories"]) diff --git a/backend/tests/test_srp_interop.py b/backend/tests/test_srp_interop.py new file mode 100644 index 0000000..1bc6b56 --- /dev/null +++ b/backend/tests/test_srp_interop.py @@ -0,0 +1,128 @@ +"""SRP-6a interop verification. + +Uses srptools to perform a complete SRP handshake with fixed inputs, +then prints all intermediate hex values. The TypeScript SRP client +(frontend/src/lib/crypto/srp.ts) can be verified against these +known-good values to catch encoding mismatches. + +Run standalone: + cd backend && python -m tests.test_srp_interop + +Or via pytest: + cd backend && python -m pytest tests/test_srp_interop.py -v +""" + +from srptools import SRPContext, SRPClientSession, SRPServerSession +from srptools.constants import PRIME_2048, PRIME_2048_GEN + + +# Fixed test inputs +EMAIL = "test@example.com" +PASSWORD = "test-password" + + +def test_srp_roundtrip(): + """Verify srptools produces a successful handshake end-to-end. + + This test ensures the server-side library completes a full SRP + handshake without errors. The printed intermediate values serve as + reference data for the TypeScript client interop test. + """ + # Step 1: Registration -- compute salt + verifier (needs password in context) + context = SRPContext(EMAIL, password=PASSWORD, prime=PRIME_2048, generator=PRIME_2048_GEN) + username, verifier, salt = context.get_user_data_triplet() + + print(f"\n--- SRP Interop Reference Values ---") + print(f"email (I): {EMAIL}") + print(f"salt (s): {salt}") + print(f"verifier (v): {verifier[:64]}... (len={len(verifier)})") + + # Step 2: Server init -- generate B (server only needs verifier, no password) + server_context = SRPContext(EMAIL, prime=PRIME_2048, generator=PRIME_2048_GEN) + server_session = SRPServerSession(server_context, verifier) + server_public = server_session.public + + print(f"server_public (B): {server_public[:64]}... (len={len(server_public)})") + + # Step 3: Client init -- generate A (client needs password for proof) + client_context = SRPContext(EMAIL, password=PASSWORD, prime=PRIME_2048, generator=PRIME_2048_GEN) + client_session = SRPClientSession(client_context) + client_public = client_session.public + + print(f"client_public (A): {client_public[:64]}... (len={len(client_public)})") + + # Step 4: Client processes B + client_session.process(server_public, salt) + + # Step 5: Server processes A + server_session.process(client_public, salt) + + # Step 6: Client generates proof M1 + client_proof = client_session.key_proof + + print(f"client_proof (M1): {client_proof}") + + # Step 7: Server verifies M1 and generates M2 + server_session.verify_proof(client_proof) + server_proof = server_session.key_proof_hash + + print(f"server_proof (M2): {server_proof}") + + # Step 8: Client verifies M2 + client_session.verify_proof(server_proof) + + # Step 9: Verify session keys match + assert client_session.key == server_session.key, ( + f"Session key mismatch: client={client_session.key[:32]}... " + f"server={server_session.key[:32]}..." + ) + + print(f"session_key (K): {client_session.key[:64]}... (len={len(client_session.key)})") + print(f"--- Handshake PASSED ---\n") + + +def test_srp_bad_proof_rejected(): + """Verify that an incorrect M1 proof is rejected by the server.""" + context = SRPContext(EMAIL, password=PASSWORD, prime=PRIME_2048, generator=PRIME_2048_GEN) + _, verifier, salt = context.get_user_data_triplet() + + server_context = SRPContext(EMAIL, prime=PRIME_2048, generator=PRIME_2048_GEN) + server_session = SRPServerSession(server_context, verifier) + + client_context = SRPContext(EMAIL, password=PASSWORD, prime=PRIME_2048, generator=PRIME_2048_GEN) + client_session = SRPClientSession(client_context) + + client_session.process(server_session.public, salt) + server_session.process(client_session.public, salt) + + # Tamper with proof + bad_proof = "00" * 32 + + try: + server_session.verify_proof(bad_proof) + assert False, "Server should have rejected bad proof" + except Exception: + pass # Expected: bad proof rejected + + +def test_srp_deterministic_verifier(): + """Verify that the same salt + identity produce consistent verifiers.""" + context1 = SRPContext(EMAIL, password=PASSWORD, prime=PRIME_2048, generator=PRIME_2048_GEN) + _, v1, s1 = context1.get_user_data_triplet() + + # Same email + password, new context + context2 = SRPContext(EMAIL, password=PASSWORD, prime=PRIME_2048, generator=PRIME_2048_GEN) + _, v2, s2 = context2.get_user_data_triplet() + + # srptools generates random salt each time, so verifiers will differ. + # But the output format is consistent. + assert len(v1) > 0 + assert len(v2) > 0 + assert len(s1) == len(s2), "Salt lengths should be consistent" + + +if __name__ == "__main__": + test_srp_roundtrip() + test_srp_bad_proof_rejected() + test_srp_deterministic_verifier() + print("All SRP interop tests passed.") diff --git a/backend/tests/unit/__init__.py b/backend/tests/unit/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/tests/unit/test_api_key_service.py b/backend/tests/unit/test_api_key_service.py new file mode 100644 index 0000000..7445a77 --- /dev/null +++ b/backend/tests/unit/test_api_key_service.py @@ -0,0 +1,76 @@ +"""Unit tests for API key service. + +Tests cover: +- Key generation format (mktp_ prefix, sufficient length) +- Key hashing (SHA-256 hex digest, 64 chars) +- Scope validation against allowed list +- Key prefix extraction + +These are pure function tests -- no database or async required. +""" + +import hashlib + +from app.services.api_key_service import ( + ALLOWED_SCOPES, + generate_raw_key, + hash_key, +) + + +class TestKeyGeneration: + """Tests for API key generation.""" + + def test_key_starts_with_prefix(self): + key = generate_raw_key() + assert key.startswith("mktp_") + + def test_key_has_sufficient_length(self): + """Key should be mktp_ + at least 32 chars of randomness.""" + key = generate_raw_key() + assert len(key) >= 37 # "mktp_" (5) + 32 + + def test_key_uniqueness(self): + """Two generated keys should never be the same.""" + key1 = generate_raw_key() + key2 = generate_raw_key() + assert key1 != key2 + + +class TestKeyHashing: + """Tests for SHA-256 key hashing.""" + + def test_hash_produces_64_char_hex(self): + key = "mktp_test1234567890abcdef" + h = hash_key(key) + assert len(h) == 64 + assert all(c in "0123456789abcdef" for c in h) + + def test_hash_is_sha256(self): + key = "mktp_test1234567890abcdef" + expected = hashlib.sha256(key.encode()).hexdigest() + assert hash_key(key) == expected + + def test_hash_deterministic(self): + key = generate_raw_key() + assert hash_key(key) == hash_key(key) + + def test_different_keys_different_hashes(self): + key1 = generate_raw_key() + key2 = generate_raw_key() + assert hash_key(key1) != hash_key(key2) + + +class TestAllowedScopes: + """Tests for scope definitions.""" + + def test_allowed_scopes_contains_expected(self): + expected = { + "devices:read", + "devices:write", + "config:read", + "config:write", + "alerts:read", + "firmware:write", + } + assert expected == ALLOWED_SCOPES diff --git a/backend/tests/unit/test_audit_service.py b/backend/tests/unit/test_audit_service.py new file mode 100644 index 0000000..a319821 --- /dev/null +++ b/backend/tests/unit/test_audit_service.py @@ -0,0 +1,75 @@ +"""Unit tests for the audit service and model. + +Tests cover: +- AuditLog model can be imported +- log_action function signature is correct +- Audit logs router is importable with expected endpoints +- CSV export endpoint exists +""" + +import uuid +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + + +class TestAuditLogModel: + """Tests for the AuditLog ORM model.""" + + def test_model_importable(self): + from app.models.audit_log import AuditLog + assert AuditLog.__tablename__ == "audit_logs" + + def test_model_has_required_columns(self): + from app.models.audit_log import AuditLog + mapper = AuditLog.__table__.columns + expected_columns = { + "id", "tenant_id", "user_id", "action", + "resource_type", "resource_id", "device_id", + "details", "ip_address", "created_at", + } + actual_columns = {c.name for c in mapper} + assert expected_columns.issubset(actual_columns), ( + f"Missing columns: {expected_columns - actual_columns}" + ) + + def test_model_exported_from_init(self): + from app.models import AuditLog + assert AuditLog.__tablename__ == "audit_logs" + + +class TestAuditService: + """Tests for the audit service log_action function.""" + + def test_log_action_importable(self): + from app.services.audit_service import log_action + assert callable(log_action) + + @pytest.mark.asyncio + async def test_log_action_does_not_raise_on_db_error(self): + """log_action must swallow exceptions so it never breaks the caller.""" + from app.services.audit_service import log_action + + mock_db = AsyncMock() + mock_db.execute = AsyncMock(side_effect=Exception("DB down")) + + # Should NOT raise even though the DB call fails + await log_action( + db=mock_db, + tenant_id=uuid.uuid4(), + user_id=uuid.uuid4(), + action="test_action", + ) + + +class TestAuditRouter: + """Tests for the audit logs router.""" + + def test_router_importable(self): + from app.routers.audit_logs import router + assert router is not None + + def test_router_has_audit_logs_endpoint(self): + from app.routers.audit_logs import router + paths = [route.path for route in router.routes] + assert "/audit-logs" in paths or any("/audit-logs" in p for p in paths) diff --git a/backend/tests/unit/test_auth.py b/backend/tests/unit/test_auth.py new file mode 100644 index 0000000..be65f92 --- /dev/null +++ b/backend/tests/unit/test_auth.py @@ -0,0 +1,169 @@ +"""Unit tests for the JWT authentication service. + +Tests cover: +- Password hashing and verification (bcrypt) +- JWT access token creation and validation +- JWT refresh token creation and validation +- Token rejection for wrong type, expired, invalid, missing subject + +These are pure function tests -- no database or async required. +""" + +import uuid +from datetime import UTC, datetime, timedelta +from unittest.mock import patch + +import pytest +from fastapi import HTTPException +from jose import jwt + +from app.services.auth import ( + create_access_token, + create_refresh_token, + hash_password, + verify_password, + verify_token, +) +from app.config import settings + + +class TestPasswordHashing: + """Tests for bcrypt password hashing.""" + + def test_hash_returns_different_string(self): + password = "test-password-123!" + hashed = hash_password(password) + assert hashed != password + + def test_hash_verify_roundtrip(self): + password = "test-password-123!" + hashed = hash_password(password) + assert verify_password(password, hashed) is True + + def test_verify_rejects_wrong_password(self): + hashed = hash_password("correct-password") + assert verify_password("wrong-password", hashed) is False + + def test_hash_uses_unique_salts(self): + """Each hash should be different even for the same password (random salt).""" + hash1 = hash_password("same-password") + hash2 = hash_password("same-password") + assert hash1 != hash2 + + def test_verify_both_hashes_valid(self): + """Both unique hashes should verify against the original password.""" + password = "same-password" + hash1 = hash_password(password) + hash2 = hash_password(password) + assert verify_password(password, hash1) is True + assert verify_password(password, hash2) is True + + +class TestAccessToken: + """Tests for JWT access token creation and validation.""" + + def test_create_and_verify_roundtrip(self): + user_id = uuid.uuid4() + tenant_id = uuid.uuid4() + token = create_access_token(user_id=user_id, tenant_id=tenant_id, role="admin") + payload = verify_token(token, expected_type="access") + + assert payload["sub"] == str(user_id) + assert payload["tenant_id"] == str(tenant_id) + assert payload["role"] == "admin" + assert payload["type"] == "access" + + def test_super_admin_null_tenant(self): + user_id = uuid.uuid4() + token = create_access_token(user_id=user_id, tenant_id=None, role="super_admin") + payload = verify_token(token, expected_type="access") + + assert payload["sub"] == str(user_id) + assert payload["tenant_id"] is None + assert payload["role"] == "super_admin" + + def test_contains_expiry(self): + token = create_access_token( + user_id=uuid.uuid4(), tenant_id=uuid.uuid4(), role="viewer" + ) + payload = verify_token(token, expected_type="access") + assert "exp" in payload + assert "iat" in payload + + +class TestRefreshToken: + """Tests for JWT refresh token creation and validation.""" + + def test_create_and_verify_roundtrip(self): + user_id = uuid.uuid4() + token = create_refresh_token(user_id=user_id) + payload = verify_token(token, expected_type="refresh") + + assert payload["sub"] == str(user_id) + assert payload["type"] == "refresh" + + def test_refresh_token_has_no_tenant_or_role(self): + token = create_refresh_token(user_id=uuid.uuid4()) + payload = verify_token(token, expected_type="refresh") + + # Refresh tokens intentionally omit tenant_id and role + assert "tenant_id" not in payload + assert "role" not in payload + + +class TestTokenRejection: + """Tests for JWT token validation failure cases.""" + + def test_rejects_wrong_type(self): + """Access token should not verify as refresh, and vice versa.""" + access_token = create_access_token( + user_id=uuid.uuid4(), tenant_id=uuid.uuid4(), role="admin" + ) + with pytest.raises(HTTPException) as exc_info: + verify_token(access_token, expected_type="refresh") + assert exc_info.value.status_code == 401 + + def test_rejects_expired_token(self): + """Manually craft an expired token and verify it is rejected.""" + expired_payload = { + "sub": str(uuid.uuid4()), + "type": "access", + "exp": datetime.now(UTC) - timedelta(hours=1), + "iat": datetime.now(UTC) - timedelta(hours=2), + } + expired_token = jwt.encode( + expired_payload, settings.JWT_SECRET_KEY, algorithm=settings.JWT_ALGORITHM + ) + with pytest.raises(HTTPException) as exc_info: + verify_token(expired_token, expected_type="access") + assert exc_info.value.status_code == 401 + + def test_rejects_invalid_token(self): + with pytest.raises(HTTPException) as exc_info: + verify_token("not-a-valid-jwt", expected_type="access") + assert exc_info.value.status_code == 401 + + def test_rejects_wrong_signing_key(self): + """Token signed with a different key should be rejected.""" + payload = { + "sub": str(uuid.uuid4()), + "type": "access", + "exp": datetime.now(UTC) + timedelta(hours=1), + } + wrong_key_token = jwt.encode(payload, "wrong-secret-key", algorithm="HS256") + with pytest.raises(HTTPException) as exc_info: + verify_token(wrong_key_token, expected_type="access") + assert exc_info.value.status_code == 401 + + def test_rejects_missing_subject(self): + """Token without 'sub' claim should be rejected.""" + no_sub_payload = { + "type": "access", + "exp": datetime.now(UTC) + timedelta(hours=1), + } + no_sub_token = jwt.encode( + no_sub_payload, settings.JWT_SECRET_KEY, algorithm=settings.JWT_ALGORITHM + ) + with pytest.raises(HTTPException) as exc_info: + verify_token(no_sub_token, expected_type="access") + assert exc_info.value.status_code == 401 diff --git a/backend/tests/unit/test_crypto.py b/backend/tests/unit/test_crypto.py new file mode 100644 index 0000000..f05d325 --- /dev/null +++ b/backend/tests/unit/test_crypto.py @@ -0,0 +1,126 @@ +"""Unit tests for the credential encryption/decryption service. + +Tests cover: +- Encryption/decryption round-trip with valid key +- Random nonce ensures different ciphertext per encryption +- Wrong key rejection (InvalidTag) +- Invalid key length rejection (ValueError) +- Unicode and JSON payload handling +- Tampered ciphertext detection + +These are pure function tests -- no database or async required. +""" + +import json +import os + +import pytest +from cryptography.exceptions import InvalidTag + +from app.services.crypto import decrypt_credentials, encrypt_credentials + + +class TestEncryptDecryptRoundTrip: + """Tests for successful encryption/decryption cycles.""" + + def test_basic_roundtrip(self): + key = os.urandom(32) + plaintext = "secret-password" + ciphertext = encrypt_credentials(plaintext, key) + result = decrypt_credentials(ciphertext, key) + assert result == plaintext + + def test_json_credentials_roundtrip(self): + """The actual use case: encrypting JSON credential objects.""" + key = os.urandom(32) + creds = json.dumps({"username": "admin", "password": "RouterOS!123"}) + ciphertext = encrypt_credentials(creds, key) + result = decrypt_credentials(ciphertext, key) + parsed = json.loads(result) + assert parsed["username"] == "admin" + assert parsed["password"] == "RouterOS!123" + + def test_unicode_roundtrip(self): + key = os.urandom(32) + plaintext = "password-with-unicode-\u00e9\u00e8\u00ea" + ciphertext = encrypt_credentials(plaintext, key) + result = decrypt_credentials(ciphertext, key) + assert result == plaintext + + def test_empty_string_roundtrip(self): + key = os.urandom(32) + ciphertext = encrypt_credentials("", key) + result = decrypt_credentials(ciphertext, key) + assert result == "" + + def test_long_payload_roundtrip(self): + """Ensure large payloads work (e.g., SSH keys in credentials).""" + key = os.urandom(32) + plaintext = "x" * 10000 + ciphertext = encrypt_credentials(plaintext, key) + result = decrypt_credentials(ciphertext, key) + assert result == plaintext + + +class TestNonceRandomness: + """Tests that encryption uses random nonces.""" + + def test_different_ciphertext_each_time(self): + """Two encryptions of the same plaintext should produce different ciphertext + because a random 12-byte nonce is generated each time.""" + key = os.urandom(32) + plaintext = "same-plaintext" + ct1 = encrypt_credentials(plaintext, key) + ct2 = encrypt_credentials(plaintext, key) + assert ct1 != ct2 + + def test_both_decrypt_correctly(self): + """Both different ciphertexts should decrypt to the same plaintext.""" + key = os.urandom(32) + plaintext = "same-plaintext" + ct1 = encrypt_credentials(plaintext, key) + ct2 = encrypt_credentials(plaintext, key) + assert decrypt_credentials(ct1, key) == plaintext + assert decrypt_credentials(ct2, key) == plaintext + + +class TestDecryptionFailures: + """Tests for proper rejection of invalid inputs.""" + + def test_wrong_key_raises_invalid_tag(self): + key1 = os.urandom(32) + key2 = os.urandom(32) + ciphertext = encrypt_credentials("secret", key1) + with pytest.raises(InvalidTag): + decrypt_credentials(ciphertext, key2) + + def test_tampered_ciphertext_raises_invalid_tag(self): + """Flipping a byte in the ciphertext should cause authentication failure.""" + key = os.urandom(32) + ciphertext = bytearray(encrypt_credentials("secret", key)) + # Flip a byte in the encrypted portion (after the 12-byte nonce) + ciphertext[15] ^= 0xFF + with pytest.raises(InvalidTag): + decrypt_credentials(bytes(ciphertext), key) + + +class TestKeyValidation: + """Tests for encryption key length validation.""" + + def test_short_key_encrypt_raises(self): + with pytest.raises(ValueError, match="32 bytes"): + encrypt_credentials("test", os.urandom(16)) + + def test_long_key_encrypt_raises(self): + with pytest.raises(ValueError, match="32 bytes"): + encrypt_credentials("test", os.urandom(64)) + + def test_short_key_decrypt_raises(self): + key = os.urandom(32) + ciphertext = encrypt_credentials("test", key) + with pytest.raises(ValueError, match="32 bytes"): + decrypt_credentials(ciphertext, os.urandom(16)) + + def test_empty_key_raises(self): + with pytest.raises(ValueError, match="32 bytes"): + encrypt_credentials("test", b"") diff --git a/backend/tests/unit/test_maintenance_windows.py b/backend/tests/unit/test_maintenance_windows.py new file mode 100644 index 0000000..67b0cb5 --- /dev/null +++ b/backend/tests/unit/test_maintenance_windows.py @@ -0,0 +1,121 @@ +"""Unit tests for maintenance window model, router schemas, and alert suppression. + +Tests cover: +- MaintenanceWindow ORM model imports and field definitions +- MaintenanceWindowCreate/Update/Response Pydantic schema validation +- Alert evaluator _is_device_in_maintenance integration +- Router registration in main app +""" + +import uuid +from datetime import datetime, timezone, timedelta + +import pytest +from pydantic import ValidationError + + +class TestMaintenanceWindowModel: + """Test that the MaintenanceWindow ORM model is importable and has correct fields.""" + + def test_model_importable(self): + from app.models.maintenance_window import MaintenanceWindow + assert MaintenanceWindow.__tablename__ == "maintenance_windows" + + def test_model_exported_from_init(self): + from app.models import MaintenanceWindow + assert MaintenanceWindow.__tablename__ == "maintenance_windows" + + def test_model_has_required_columns(self): + from app.models.maintenance_window import MaintenanceWindow + mapper = MaintenanceWindow.__mapper__ + column_names = {c.key for c in mapper.columns} + expected = { + "id", "tenant_id", "name", "device_ids", + "start_at", "end_at", "suppress_alerts", + "notes", "created_by", "created_at", "updated_at", + } + assert expected.issubset(column_names), f"Missing columns: {expected - column_names}" + + +class TestMaintenanceWindowSchemas: + """Test Pydantic schemas for request/response validation.""" + + def test_create_schema_valid(self): + from app.routers.maintenance_windows import MaintenanceWindowCreate + data = MaintenanceWindowCreate( + name="Nightly update", + device_ids=["abc-123"], + start_at=datetime.now(timezone.utc), + end_at=datetime.now(timezone.utc) + timedelta(hours=2), + suppress_alerts=True, + notes="Scheduled maintenance", + ) + assert data.name == "Nightly update" + assert data.suppress_alerts is True + + def test_create_schema_defaults(self): + from app.routers.maintenance_windows import MaintenanceWindowCreate + data = MaintenanceWindowCreate( + name="Quick reboot", + device_ids=[], + start_at=datetime.now(timezone.utc), + end_at=datetime.now(timezone.utc) + timedelta(hours=1), + ) + assert data.suppress_alerts is True # default + assert data.notes is None + + def test_update_schema_partial(self): + from app.routers.maintenance_windows import MaintenanceWindowUpdate + data = MaintenanceWindowUpdate(name="Updated name") + assert data.name == "Updated name" + assert data.device_ids is None # all optional + + def test_response_schema(self): + from app.routers.maintenance_windows import MaintenanceWindowResponse + data = MaintenanceWindowResponse( + id="abc", + tenant_id="def", + name="Test", + device_ids=["x"], + start_at=datetime.now(timezone.utc).isoformat(), + end_at=datetime.now(timezone.utc).isoformat(), + suppress_alerts=True, + notes=None, + created_by="ghi", + created_at=datetime.now(timezone.utc).isoformat(), + ) + assert data.id == "abc" + + +class TestRouterRegistration: + """Test that the maintenance_windows router is properly registered.""" + + def test_router_importable(self): + from app.routers.maintenance_windows import router + assert router is not None + + def test_router_has_routes(self): + from app.routers.maintenance_windows import router + paths = [r.path for r in router.routes] + assert any("maintenance-windows" in p for p in paths) + + def test_main_app_includes_router(self): + try: + from app.main import app + except ImportError: + pytest.skip("app.main requires full dependencies (prometheus, etc.)") + route_paths = [r.path for r in app.routes] + route_paths_str = " ".join(route_paths) + assert "maintenance-windows" in route_paths_str + + +class TestAlertEvaluatorMaintenance: + """Test that alert_evaluator has maintenance window check capability.""" + + def test_maintenance_cache_exists(self): + from app.services import alert_evaluator + assert hasattr(alert_evaluator, "_maintenance_cache") + + def test_is_device_in_maintenance_function_exists(self): + from app.services.alert_evaluator import _is_device_in_maintenance + assert callable(_is_device_in_maintenance) diff --git a/backend/tests/unit/test_security.py b/backend/tests/unit/test_security.py new file mode 100644 index 0000000..29b7706 --- /dev/null +++ b/backend/tests/unit/test_security.py @@ -0,0 +1,231 @@ +"""Unit tests for security hardening. + +Tests cover: +- Production startup validation (insecure defaults rejection) +- Security headers middleware (per-environment header behavior) + +These are pure function/middleware tests -- no database or async required +for startup validation, async only for middleware tests. +""" + +from types import SimpleNamespace +from unittest.mock import patch + +import pytest + +from app.config import KNOWN_INSECURE_DEFAULTS, validate_production_settings + + +class TestStartupValidation: + """Tests for validate_production_settings().""" + + def _make_settings(self, **kwargs): + """Create a mock settings object with given field values.""" + defaults = { + "ENVIRONMENT": "dev", + "JWT_SECRET_KEY": "change-this-in-production-use-a-long-random-string", + "CREDENTIAL_ENCRYPTION_KEY": "LLLjnfBZTSycvL2U07HDSxUeTtLxb9cZzryQl0R9E4w=", + } + defaults.update(kwargs) + return SimpleNamespace(**defaults) + + def test_production_rejects_insecure_jwt_secret(self): + """Production with default JWT secret must exit.""" + settings = self._make_settings( + ENVIRONMENT="production", + JWT_SECRET_KEY=KNOWN_INSECURE_DEFAULTS["JWT_SECRET_KEY"][0], + ) + with pytest.raises(SystemExit) as exc_info: + validate_production_settings(settings) + assert exc_info.value.code == 1 + + def test_production_rejects_insecure_encryption_key(self): + """Production with default encryption key must exit.""" + settings = self._make_settings( + ENVIRONMENT="production", + JWT_SECRET_KEY="a-real-secure-jwt-secret-that-is-long-enough", + CREDENTIAL_ENCRYPTION_KEY=KNOWN_INSECURE_DEFAULTS["CREDENTIAL_ENCRYPTION_KEY"][0], + ) + with pytest.raises(SystemExit) as exc_info: + validate_production_settings(settings) + assert exc_info.value.code == 1 + + def test_dev_allows_insecure_defaults(self): + """Dev environment allows insecure defaults without error.""" + settings = self._make_settings( + ENVIRONMENT="dev", + JWT_SECRET_KEY=KNOWN_INSECURE_DEFAULTS["JWT_SECRET_KEY"][0], + CREDENTIAL_ENCRYPTION_KEY=KNOWN_INSECURE_DEFAULTS["CREDENTIAL_ENCRYPTION_KEY"][0], + ) + # Should NOT raise + validate_production_settings(settings) + + def test_production_allows_secure_values(self): + """Production with non-default secrets should pass.""" + settings = self._make_settings( + ENVIRONMENT="production", + JWT_SECRET_KEY="a-real-secure-jwt-secret-that-is-long-enough-for-production", + CREDENTIAL_ENCRYPTION_KEY="dGhpcyBpcyBhIHNlY3VyZSBrZXkgdGhhdCBpcw==", + ) + # Should NOT raise + validate_production_settings(settings) + + +class TestSecurityHeadersMiddleware: + """Tests for SecurityHeadersMiddleware.""" + + @pytest.fixture + def prod_app(self): + """Create a minimal FastAPI app with security middleware in production mode.""" + from fastapi import FastAPI + from app.middleware.security_headers import SecurityHeadersMiddleware + + app = FastAPI() + app.add_middleware(SecurityHeadersMiddleware, environment="production") + + @app.get("/test") + async def test_endpoint(): + return {"status": "ok"} + + return app + + @pytest.fixture + def dev_app(self): + """Create a minimal FastAPI app with security middleware in dev mode.""" + from fastapi import FastAPI + from app.middleware.security_headers import SecurityHeadersMiddleware + + app = FastAPI() + app.add_middleware(SecurityHeadersMiddleware, environment="dev") + + @app.get("/test") + async def test_endpoint(): + return {"status": "ok"} + + return app + + @pytest.mark.asyncio + async def test_production_includes_hsts(self, prod_app): + """Production responses must include HSTS header.""" + import httpx + + transport = httpx.ASGITransport(app=prod_app) + async with httpx.AsyncClient(transport=transport, base_url="http://test") as client: + response = await client.get("/test") + + assert response.status_code == 200 + assert response.headers["strict-transport-security"] == "max-age=31536000; includeSubDomains" + assert response.headers["x-content-type-options"] == "nosniff" + assert response.headers["x-frame-options"] == "DENY" + assert response.headers["cache-control"] == "no-store" + + @pytest.mark.asyncio + async def test_dev_excludes_hsts(self, dev_app): + """Dev responses must NOT include HSTS (breaks plain HTTP).""" + import httpx + + transport = httpx.ASGITransport(app=dev_app) + async with httpx.AsyncClient(transport=transport, base_url="http://test") as client: + response = await client.get("/test") + + assert response.status_code == 200 + assert "strict-transport-security" not in response.headers + assert response.headers["x-content-type-options"] == "nosniff" + assert response.headers["x-frame-options"] == "DENY" + assert response.headers["cache-control"] == "no-store" + + @pytest.mark.asyncio + async def test_csp_header_present_production(self, prod_app): + """Production responses must include CSP header.""" + import httpx + + transport = httpx.ASGITransport(app=prod_app) + async with httpx.AsyncClient(transport=transport, base_url="http://test") as client: + response = await client.get("/test") + + assert "content-security-policy" in response.headers + csp = response.headers["content-security-policy"] + assert "default-src 'self'" in csp + assert "script-src" in csp + + @pytest.mark.asyncio + async def test_csp_header_present_dev(self, dev_app): + """Dev responses must include CSP header.""" + import httpx + + transport = httpx.ASGITransport(app=dev_app) + async with httpx.AsyncClient(transport=transport, base_url="http://test") as client: + response = await client.get("/test") + + assert "content-security-policy" in response.headers + csp = response.headers["content-security-policy"] + assert "default-src 'self'" in csp + + @pytest.mark.asyncio + async def test_csp_production_blocks_inline_scripts(self, prod_app): + """Production CSP must block inline scripts (no unsafe-inline in script-src).""" + import httpx + + transport = httpx.ASGITransport(app=prod_app) + async with httpx.AsyncClient(transport=transport, base_url="http://test") as client: + response = await client.get("/test") + + csp = response.headers["content-security-policy"] + # Extract the script-src directive value + script_src = [d for d in csp.split(";") if "script-src" in d][0] + assert "'unsafe-inline'" not in script_src + assert "'unsafe-eval'" not in script_src + assert "'self'" in script_src + + @pytest.mark.asyncio + async def test_csp_dev_allows_unsafe_inline(self, dev_app): + """Dev CSP must allow unsafe-inline and unsafe-eval for Vite HMR.""" + import httpx + + transport = httpx.ASGITransport(app=dev_app) + async with httpx.AsyncClient(transport=transport, base_url="http://test") as client: + response = await client.get("/test") + + csp = response.headers["content-security-policy"] + script_src = [d for d in csp.split(";") if "script-src" in d][0] + assert "'unsafe-inline'" in script_src + assert "'unsafe-eval'" in script_src + + @pytest.mark.asyncio + async def test_csp_production_allows_inline_styles(self, prod_app): + """Production CSP must allow unsafe-inline for styles (Tailwind, Framer Motion, Radix).""" + import httpx + + transport = httpx.ASGITransport(app=prod_app) + async with httpx.AsyncClient(transport=transport, base_url="http://test") as client: + response = await client.get("/test") + + csp = response.headers["content-security-policy"] + style_src = [d for d in csp.split(";") if "style-src" in d][0] + assert "'unsafe-inline'" in style_src + + @pytest.mark.asyncio + async def test_csp_allows_websocket_connections(self, prod_app): + """CSP must allow wss: and ws: for SSE/WebSocket connections.""" + import httpx + + transport = httpx.ASGITransport(app=prod_app) + async with httpx.AsyncClient(transport=transport, base_url="http://test") as client: + response = await client.get("/test") + + csp = response.headers["content-security-policy"] + connect_src = [d for d in csp.split(";") if "connect-src" in d][0] + assert "wss:" in connect_src + assert "ws:" in connect_src + + @pytest.mark.asyncio + async def test_csp_frame_ancestors_none(self, prod_app): + """CSP must include frame-ancestors 'none' (anti-clickjacking).""" + import httpx + + transport = httpx.ASGITransport(app=prod_app) + async with httpx.AsyncClient(transport=transport, base_url="http://test") as client: + response = await client.get("/test") + + csp = response.headers["content-security-policy"] + assert "frame-ancestors 'none'" in csp diff --git a/docker-compose.observability.yml b/docker-compose.observability.yml new file mode 100644 index 0000000..23e75cb --- /dev/null +++ b/docker-compose.observability.yml @@ -0,0 +1,49 @@ +# docker-compose.observability.yml -- Observability stack (Prometheus + Grafana) +# Usage: docker compose -f docker-compose.yml -f docker-compose.observability.yml up -d +# Or with dev services: docker compose -f docker-compose.yml -f docker-compose.override.yml -f docker-compose.observability.yml up -d + +services: + prometheus: + image: prom/prometheus:latest + container_name: tod_prometheus + ports: + - "9090:9090" + volumes: + - ./infrastructure/observability/prometheus.yml:/etc/prometheus/prometheus.yml:ro + - ./docker-data/prometheus:/prometheus + command: + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/prometheus' + - '--storage.tsdb.retention.time=15d' + - '--web.console.libraries=/usr/share/prometheus/console_libraries' + - '--web.console.templates=/usr/share/prometheus/consoles' + deploy: + resources: + limits: + memory: 256M + networks: + - tod + + grafana: + image: grafana/grafana:latest + container_name: tod_grafana + ports: + - "3001:3000" + volumes: + - ./infrastructure/observability/grafana/provisioning:/etc/grafana/provisioning:ro + - ./infrastructure/observability/grafana/dashboards:/var/lib/grafana/dashboards:ro + - ./docker-data/grafana:/var/lib/grafana + environment: + GF_SECURITY_ADMIN_USER: admin + GF_SECURITY_ADMIN_PASSWORD: admin + GF_AUTH_ANONYMOUS_ENABLED: "true" + GF_AUTH_ANONYMOUS_ORG_ROLE: Viewer + GF_DASHBOARDS_DEFAULT_HOME_DASHBOARD_PATH: /var/lib/grafana/dashboards/api-overview.json + deploy: + resources: + limits: + memory: 128M + depends_on: + - prometheus + networks: + - tod diff --git a/docker-compose.override.yml b/docker-compose.override.yml new file mode 100644 index 0000000..4488130 --- /dev/null +++ b/docker-compose.override.yml @@ -0,0 +1,111 @@ +# docker-compose.override.yml -- Dev environment (auto-loaded by `docker compose up`) +# Adds application services with hot reload, debug logging, and dev defaults. + +services: + api: + build: + context: . + dockerfile: infrastructure/docker/Dockerfile.api + container_name: tod_api + ports: + - "8001:8000" + env_file: .env + environment: + ENVIRONMENT: dev + LOG_LEVEL: debug + DEBUG: "true" + GUNICORN_WORKERS: "1" + DATABASE_URL: postgresql+asyncpg://postgres:postgres@postgres:5432/mikrotik + SYNC_DATABASE_URL: postgresql+psycopg2://postgres:postgres@postgres:5432/mikrotik + APP_USER_DATABASE_URL: postgresql+asyncpg://app_user:app_password@postgres:5432/mikrotik + REDIS_URL: redis://redis:6379/0 + NATS_URL: nats://nats:4222 + FIRST_ADMIN_EMAIL: ${FIRST_ADMIN_EMAIL:-admin@mikrotik-portal.dev} + FIRST_ADMIN_PASSWORD: ${FIRST_ADMIN_PASSWORD:-changeme-in-production} + CREDENTIAL_ENCRYPTION_KEY: ${CREDENTIAL_ENCRYPTION_KEY:?Set CREDENTIAL_ENCRYPTION_KEY in .env} + JWT_SECRET_KEY: ${JWT_SECRET_KEY:?Set JWT_SECRET_KEY in .env} + OPENBAO_ADDR: http://openbao:8200 + OPENBAO_TOKEN: dev-openbao-token + GIT_STORE_PATH: /data/git-store + WIREGUARD_CONFIG_PATH: /data/wireguard + WIREGUARD_GATEWAY: wireguard + cap_add: + - NET_ADMIN + user: root + command: > + sh -c " + if [ -n \"$$WIREGUARD_GATEWAY\" ]; then + apt-get update -qq && apt-get install -y -qq iproute2 >/dev/null 2>&1 || true; + GW_IP=$$(getent hosts $$WIREGUARD_GATEWAY 2>/dev/null | awk '{print $$1}'); + [ -z \"$$GW_IP\" ] && GW_IP=$$WIREGUARD_GATEWAY; + ip route add 10.10.0.0/16 via $$GW_IP 2>/dev/null || true; + echo VPN route: 10.10.0.0/16 via $$GW_IP; + fi; + exec su -s /bin/sh appuser -c 'gunicorn app.main:app --config gunicorn.conf.py' + " + volumes: + - ./backend:/app + - ./docker-data/git-store:/data/git-store + - ./docker-data/wireguard:/data/wireguard + depends_on: + postgres: + condition: service_healthy + redis: + condition: service_healthy + nats: + condition: service_healthy + deploy: + resources: + limits: + memory: 512M + networks: + - tod + + poller: + build: + context: ./poller + dockerfile: ./Dockerfile + container_name: tod_poller + env_file: .env + environment: + ENVIRONMENT: dev + LOG_LEVEL: debug + DATABASE_URL: postgres://poller_user:poller_password@postgres:5432/mikrotik + REDIS_URL: redis://redis:6379/0 + NATS_URL: nats://nats:4222 + CREDENTIAL_ENCRYPTION_KEY: ${CREDENTIAL_ENCRYPTION_KEY:?Set CREDENTIAL_ENCRYPTION_KEY in .env} + OPENBAO_ADDR: http://openbao:8200 + OPENBAO_TOKEN: dev-openbao-token + POLL_INTERVAL_SECONDS: 60 + WIREGUARD_GATEWAY: wireguard + cap_add: + - NET_ADMIN + depends_on: + postgres: + condition: service_healthy + redis: + condition: service_healthy + nats: + condition: service_healthy + deploy: + resources: + limits: + memory: 256M + networks: + - tod + + frontend: + build: + context: . + dockerfile: infrastructure/docker/Dockerfile.frontend + container_name: tod_frontend + ports: + - "3000:80" + depends_on: + - api + deploy: + resources: + limits: + memory: 64M + networks: + - tod diff --git a/docker-compose.prod.yml b/docker-compose.prod.yml new file mode 100644 index 0000000..27f1dbb --- /dev/null +++ b/docker-compose.prod.yml @@ -0,0 +1,82 @@ +# docker-compose.prod.yml -- Production environment override +# Usage: docker compose -f docker-compose.yml -f docker-compose.prod.yml --env-file .env.prod up -d + +services: + api: + build: + context: . + dockerfile: infrastructure/docker/Dockerfile.api + container_name: tod_api + env_file: .env.prod + environment: + ENVIRONMENT: production + LOG_LEVEL: info + GUNICORN_WORKERS: "2" + command: ["gunicorn", "app.main:app", "--config", "gunicorn.conf.py"] + volumes: + - ./docker-data/git-store:/data/git-store + depends_on: + postgres: + condition: service_healthy + redis: + condition: service_healthy + nats: + condition: service_healthy + deploy: + resources: + limits: + memory: 512M + restart: unless-stopped + logging: + driver: json-file + options: + max-size: "10m" + max-file: "3" + networks: + - tod + + poller: + build: + context: ./poller + dockerfile: ./Dockerfile + container_name: tod_poller + env_file: .env.prod + environment: + ENVIRONMENT: production + LOG_LEVEL: info + depends_on: + postgres: + condition: service_healthy + redis: + condition: service_healthy + nats: + condition: service_healthy + deploy: + resources: + limits: + memory: 256M + restart: unless-stopped + logging: + driver: json-file + options: + max-size: "10m" + max-file: "3" + networks: + - tod + + frontend: + build: + context: . + dockerfile: infrastructure/docker/Dockerfile.frontend + container_name: tod_frontend + ports: + - "80:80" + depends_on: + - api + deploy: + resources: + limits: + memory: 64M + restart: unless-stopped + networks: + - tod diff --git a/docker-compose.staging.yml b/docker-compose.staging.yml new file mode 100644 index 0000000..fada08a --- /dev/null +++ b/docker-compose.staging.yml @@ -0,0 +1,88 @@ +# docker-compose.staging.yml -- Staging environment override +# Usage: docker compose -f docker-compose.yml -f docker-compose.staging.yml --env-file .env.staging up -d +# +# Staging mirrors production behavior (gunicorn, info logging, restart policies) +# but exposes the API port for debugging and uses distinct container names/ports +# so it can coexist with dev on the same host if needed. + +services: + api: + build: + context: . + dockerfile: infrastructure/docker/Dockerfile.api + container_name: tod_staging_api + ports: + - "8081:8000" + env_file: .env.staging + environment: + ENVIRONMENT: staging + LOG_LEVEL: info + GUNICORN_WORKERS: "2" + command: ["gunicorn", "app.main:app", "--config", "gunicorn.conf.py"] + volumes: + - ./docker-data/staging-git-store:/data/git-store + depends_on: + postgres: + condition: service_healthy + redis: + condition: service_healthy + nats: + condition: service_healthy + deploy: + resources: + limits: + memory: 512M + restart: unless-stopped + logging: + driver: json-file + options: + max-size: "10m" + max-file: "3" + networks: + - tod + + poller: + build: + context: ./poller + dockerfile: ./Dockerfile + container_name: tod_staging_poller + env_file: .env.staging + environment: + ENVIRONMENT: staging + LOG_LEVEL: info + depends_on: + postgres: + condition: service_healthy + redis: + condition: service_healthy + nats: + condition: service_healthy + deploy: + resources: + limits: + memory: 256M + restart: unless-stopped + logging: + driver: json-file + options: + max-size: "10m" + max-file: "3" + networks: + - tod + + frontend: + build: + context: . + dockerfile: infrastructure/docker/Dockerfile.frontend + container_name: tod_staging_frontend + ports: + - "3080:80" + depends_on: + - api + deploy: + resources: + limits: + memory: 64M + restart: unless-stopped + networks: + - tod diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..01834d0 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,164 @@ +# ─── Low-RAM build note ────────────────────────────────────────────────────── +# On a 2-core / 2-4 GB server, build images ONE AT A TIME to avoid OOM: +# +# docker compose build api +# docker compose build poller +# docker compose build frontend +# +# Running `docker compose build` (all at once) will trigger three concurrent +# multi-stage builds (Go, Python/pip, Node/tsc/Vite) that together can peak at +# 3-4 GB RAM, crashing the machine before any image finishes. +# +# Once built, starting the stack uses far less RAM (nginx + uvicorn + Go binary). +# ───────────────────────────────────────────────────────────────────────────── + +services: + postgres: + image: timescale/timescaledb:2.17.2-pg17 + container_name: tod_postgres + env_file: .env + environment: + POSTGRES_DB: ${POSTGRES_DB:-mikrotik} + POSTGRES_USER: ${POSTGRES_USER:-postgres} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres} + ports: + - "5432:5432" + volumes: + - /Volumes/ssd01/mikrotik/docker-data/postgres:/var/lib/postgresql/data + - ./scripts/init-postgres.sql:/docker-entrypoint-initdb.d/init.sql:ro + healthcheck: + test: ["CMD-SHELL", "pg_isready -U postgres -d mikrotik"] + interval: 5s + timeout: 5s + retries: 5 + deploy: + resources: + limits: + memory: 512M + networks: + - mikrotik + + redis: + image: redis:7-alpine + container_name: tod_redis + env_file: .env + ports: + - "6379:6379" + volumes: + - /Volumes/ssd01/mikrotik/docker-data/redis:/data + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 5s + timeout: 3s + retries: 5 + deploy: + resources: + limits: + memory: 128M + networks: + - mikrotik + + nats: + image: nats:2-alpine + container_name: tod_nats + command: ["--jetstream", "--store_dir", "/data", "-m", "8222"] + env_file: .env + ports: + - "4222:4222" + - "8222:8222" + volumes: + - /Volumes/ssd01/mikrotik/docker-data/nats:/data + healthcheck: + test: ["CMD-SHELL", "wget --spider -q http://localhost:8222/healthz || exit 1"] + interval: 5s + timeout: 5s + retries: 5 + deploy: + resources: + limits: + memory: 128M + networks: + - mikrotik + + openbao: + image: openbao/openbao:2.1 + container_name: tod_openbao + entrypoint: /bin/sh + command: + - -c + - | + # Start OpenBao in background + bao server -dev -dev-listen-address=0.0.0.0:8200 & + BAO_PID=$$! + # Wait for ready and run init + sleep 2 + /init/init.sh + # Wait for OpenBao process + wait $$BAO_PID + environment: + BAO_DEV_ROOT_TOKEN_ID: dev-openbao-token + BAO_DEV_LISTEN_ADDRESS: "0.0.0.0:8200" + ports: + - "8200:8200" + volumes: + - ./infrastructure/openbao/init.sh:/init/init.sh:ro + cap_add: + - IPC_LOCK + healthcheck: + test: ["CMD-SHELL", "wget -qO- http://127.0.0.1:8200/v1/sys/health | grep -q '\"sealed\":false' || exit 1"] + interval: 5s + timeout: 3s + retries: 5 + deploy: + resources: + limits: + memory: 256M + networks: + - mikrotik + + wireguard: + image: lscr.io/linuxserver/wireguard:latest + container_name: tod_wireguard + environment: + - PUID=1000 + - PGID=1000 + - TZ=UTC + volumes: + - /Volumes/ssd01/mikrotik/docker-data/wireguard:/config + - /Volumes/ssd01/mikrotik/docker-data/wireguard/custom-cont-init.d:/custom-cont-init.d + ports: + - "51820:51820/udp" + cap_add: + - NET_ADMIN + sysctls: + - net.ipv4.ip_forward=1 + - net.ipv4.conf.all.src_valid_mark=1 + restart: unless-stopped + healthcheck: + test: ["CMD-SHELL", "ip link show wg0 2>/dev/null || exit 0"] + interval: 10s + timeout: 5s + retries: 3 + deploy: + resources: + limits: + memory: 128M + networks: + - mikrotik + + mailpit: + image: axllent/mailpit:latest + profiles: ["mail-testing"] + ports: + - "8026:8025" + - "1026:1025" + networks: + - mikrotik + deploy: + resources: + limits: + memory: 64M + +networks: + mikrotik: + driver: bridge diff --git a/docs/API.md b/docs/API.md new file mode 100644 index 0000000..747d587 --- /dev/null +++ b/docs/API.md @@ -0,0 +1,117 @@ +# API Reference + +## Overview + +TOD exposes a REST API built with FastAPI. Interactive documentation is available at: + +- Swagger UI: `http://:/docs` (dev environment only) +- ReDoc: `http://:/redoc` (dev environment only) + +Both Swagger and ReDoc are disabled in staging/production environments. + +## Authentication + +### SRP-6a Login + +- `POST /api/auth/login` -- SRP-6a authentication (returns JWT access + refresh tokens) +- `POST /api/auth/refresh` -- Refresh an expired access token +- `POST /api/auth/logout` -- Invalidate the current session + +All authenticated endpoints require one of: + +- `Authorization: Bearer ` header +- httpOnly cookie (set automatically by the login flow) + +Access tokens expire after 15 minutes. Refresh tokens are valid for 7 days. + +### API Key Authentication + +- Create API keys in Admin > API Keys +- Use header: `X-API-Key: mktp_` +- Keys have operator-level RBAC permissions +- Prefix: `mktp_`, stored as SHA-256 hash + +## Endpoint Groups + +All API routes are mounted under the `/api` prefix. + +| Group | Prefix | Description | +|-------|--------|-------------| +| Auth | `/api/auth/*` | Login, register, SRP exchange, password reset, token refresh | +| Tenants | `/api/tenants/*` | Tenant/organization CRUD | +| Users | `/api/users/*` | User management, RBAC role assignment | +| Devices | `/api/devices/*` | Device CRUD, scanning, status | +| Device Groups | `/api/device-groups/*` | Logical device grouping | +| Device Tags | `/api/device-tags/*` | Tag-based device labeling | +| Metrics | `/api/metrics/*` | TimescaleDB device metrics (CPU, memory, traffic) | +| Config Backups | `/api/config-backups/*` | Automated RouterOS config backup history | +| Config Editor | `/api/config-editor/*` | Live RouterOS config browsing and editing | +| Firmware | `/api/firmware/*` | RouterOS firmware version management and upgrades | +| Alerts | `/api/alerts/*` | Alert rule CRUD, alert history | +| Events | `/api/events/*` | Device event log | +| Device Logs | `/api/device-logs/*` | RouterOS syslog entries | +| Templates | `/api/templates/*` | Config templates for batch operations | +| Clients | `/api/clients/*` | Connected client (DHCP lease) data | +| Topology | `/api/topology/*` | Network topology map data | +| SSE | `/api/sse/*` | Server-Sent Events for real-time updates | +| Audit Logs | `/api/audit-logs/*` | Immutable audit trail | +| Reports | `/api/reports/*` | PDF report generation (Jinja2 + WeasyPrint) | +| API Keys | `/api/api-keys/*` | API key CRUD | +| Maintenance Windows | `/api/maintenance-windows/*` | Scheduled maintenance window management | +| VPN | `/api/vpn/*` | WireGuard VPN tunnel management | +| Certificates | `/api/certificates/*` | Internal CA and device certificate management | +| Transparency | `/api/transparency/*` | KMS access event dashboard | + +## Health Checks + +| Endpoint | Type | Description | +|----------|------|-------------| +| `GET /health` | Liveness | Always returns 200 if the API process is alive. Response includes `version`. | +| `GET /health/ready` | Readiness | Returns 200 only when PostgreSQL, Redis, and NATS are all healthy. Returns 503 otherwise. | +| `GET /api/health` | Liveness | Backward-compatible alias under `/api` prefix. | + +## Rate Limiting + +- Auth endpoints: 5 requests/minute per IP +- General endpoints: no global rate limit (per-route limits may apply) + +Rate limit violations return HTTP 429 with a JSON error body. + +## Error Format + +All error responses use a standard JSON format: + +```json +{ + "detail": "Human-readable error message" +} +``` + +HTTP status codes follow REST conventions: + +| Code | Meaning | +|------|---------| +| 400 | Bad request / validation error | +| 401 | Unauthorized (missing or expired token) | +| 403 | Forbidden (insufficient RBAC permissions) | +| 404 | Resource not found | +| 409 | Conflict (duplicate resource) | +| 422 | Unprocessable entity (Pydantic validation) | +| 429 | Rate limit exceeded | +| 500 | Internal server error | +| 503 | Service unavailable (readiness check failed) | + +## RBAC Roles + +Endpoints enforce role-based access control. The four roles in descending privilege order: + +| Role | Scope | Description | +|------|-------|-------------| +| `super_admin` | Global (no tenant) | Full platform access, tenant management | +| `admin` | Tenant | Full access within their tenant | +| `operator` | Tenant | Device operations, config changes | +| `viewer` | Tenant | Read-only access | + +## Multi-Tenancy + +Tenant isolation is enforced at the database level via PostgreSQL Row-Level Security (RLS). The `app_user` database role automatically filters all queries by the authenticated user's `tenant_id`. Super admins operate outside tenant scope. diff --git a/docs/ARCHITECTURE.md b/docs/ARCHITECTURE.md new file mode 100644 index 0000000..160ace0 --- /dev/null +++ b/docs/ARCHITECTURE.md @@ -0,0 +1,329 @@ +# Architecture + +## System Overview + +TOD (The Other Dude) is a containerized MSP fleet management platform for MikroTik RouterOS devices. It uses a three-service architecture: a React frontend, a Python FastAPI backend, and a Go poller. All services communicate through PostgreSQL, Redis, and NATS JetStream. Multi-tenancy is enforced at the database level via PostgreSQL Row-Level Security (RLS). + +``` +┌─────────────┐ ┌─────────────────┐ ┌──────────────┐ +│ Frontend │────▶│ Backend API │◀───▶│ Go Poller │ +│ React/nginx │ │ FastAPI │ │ go-routeros │ +└─────────────┘ └────────┬────────┘ └──────┬───────┘ + │ │ + ┌──────────────┼──────────────────────┤ + │ │ │ + ┌────────▼──┐ ┌──────▼──────┐ ┌──────────▼──┐ + │ Redis │ │ PostgreSQL │ │ NATS │ + │ locks, │ │ 17 + Timescale│ │ JetStream │ + │ cache │ │ DB + RLS │ │ pub/sub │ + └───────────┘ └─────────────┘ └─────────────┘ + │ + ┌──────▼──────┐ + │ OpenBao │ + │ Transit KMS │ + └─────────────┘ +``` + +## Services + +### Frontend (React / nginx) + +- **Stack**: React 19, TypeScript, TanStack Router (file-based routing), TanStack Query (data fetching), Tailwind CSS 3.4, Vite +- **Production**: Static build served by nginx on port 80 (exposed as port 3000) +- **Development**: Vite dev server with hot module replacement +- **Design system**: Geist Sans + Geist Mono fonts, HSL color tokens via CSS custom properties, class-based dark/light mode +- **Real-time**: Server-Sent Events (SSE) for live device status updates, alerts, and operation progress +- **Client-side encryption**: SRP-6a authentication flow with 2SKD key derivation; Emergency Kit PDF generation +- **UX features**: Command palette (Cmd+K), Framer Motion page transitions, collapsible sidebar, skeleton loaders +- **Memory limit**: 64MB + +### Backend API (FastAPI) + +- **Stack**: Python 3.12+, FastAPI 0.115+, SQLAlchemy 2.0 async, asyncpg, Gunicorn +- **Two database engines**: + - `admin_engine` (superuser) -- used only for auth/bootstrap and NATS subscribers that need cross-tenant access + - `app_engine` (non-superuser `app_user` role) -- used for all device/data routes, enforces RLS +- **Authentication**: JWT tokens (15min access, 7d refresh), SRP-6a zero-knowledge proof, RBAC (super_admin, admin, operator, viewer) +- **NATS subscribers**: Three independent subscribers for device status, metrics, and firmware events. Non-fatal startup -- API serves requests even if NATS is unavailable +- **Background services**: APScheduler for nightly config backups and daily firmware version checks +- **OpenBao integration**: Provisions per-tenant Transit encryption keys on startup, dual-read fallback if OpenBao is unavailable +- **Startup sequence**: Configure logging -> Run Alembic migrations -> Bootstrap first admin -> Start NATS subscribers -> Ensure SSE streams -> Start schedulers -> Provision OpenBao keys +- **API documentation**: OpenAPI docs at `/docs` and `/redoc` (dev environment only) +- **Health endpoints**: `/health` (liveness), `/health/ready` (readiness -- checks PostgreSQL, Redis, NATS) +- **Middleware stack** (LIFO order): RequestID -> SecurityHeaders -> RateLimiting -> CORS -> Route handler +- **Memory limit**: 512MB + +#### API Routers + +The backend exposes 21 route groups under the `/api` prefix: + +| Router | Purpose | +|--------|---------| +| `auth` | Login (SRP-6a + legacy), token refresh, registration | +| `tenants` | Tenant CRUD (super_admin only) | +| `users` | User management, RBAC | +| `devices` | Device CRUD, status, commands | +| `device_groups` | Logical device grouping | +| `device_tags` | Tagging and filtering | +| `metrics` | Time-series metrics (TimescaleDB) | +| `config_backups` | Configuration backup history | +| `config_editor` | Live RouterOS config editing | +| `firmware` | Firmware version tracking and upgrades | +| `alerts` | Alert rules and active alerts | +| `events` | Device event log | +| `device_logs` | RouterOS system logs | +| `templates` | Configuration templates | +| `clients` | Connected client devices | +| `topology` | Network topology (ReactFlow data) | +| `sse` | Server-Sent Events streams | +| `audit_logs` | Immutable audit trail | +| `reports` | PDF report generation (Jinja2 + weasyprint) | +| `api_keys` | API key management (mktp_ prefix) | +| `maintenance_windows` | Scheduled maintenance with alert suppression | +| `vpn` | WireGuard VPN management | +| `certificates` | Internal CA and device TLS certificates | +| `transparency` | KMS access event dashboard | + +### Go Poller + +- **Stack**: Go 1.23, go-routeros/v3, pgx/v5, nats.go +- **Polling model**: Synchronous per-device polling on a configurable interval (default 60s) +- **Device communication**: RouterOS binary API over TLS (port 8729), InsecureSkipVerify for self-signed certs +- **TLS fallback**: Three-tier strategy -- CA-verified -> InsecureSkipVerify -> plain API +- **Distributed locking**: Redis locks prevent concurrent polling of the same device (safe for multi-instance deployment) +- **Circuit breaker**: Backs off from unreachable devices to avoid wasting poll cycles +- **Credential decryption**: OpenBao Transit with LRU cache (1024 entries, 5min TTL) to minimize KMS calls +- **Output**: Publishes poll results to NATS JetStream; the API's NATS subscribers process and persist them +- **Database access**: Uses `poller_user` role which bypasses RLS (needs cross-tenant device access) +- **VPN routing**: Adds static route to WireGuard gateway for reaching remote devices +- **Memory limit**: 256MB + +## Infrastructure Services + +### PostgreSQL 17 + TimescaleDB + +- **Image**: `timescale/timescaledb:2.17.2-pg17` +- **Row-Level Security (RLS)**: Enforces tenant isolation at the database level. All data tables have a `tenant_id` column; RLS policies filter by `current_setting('app.tenant_id')` +- **Database roles**: + - `postgres` (superuser) -- admin engine, auth/bootstrap, migrations + - `app_user` (non-superuser) -- RLS-enforced, used by API for data routes + - `poller_user` -- bypasses RLS, used by Go poller for cross-tenant device access +- **TimescaleDB hypertables**: Time-series storage for device metrics (CPU, memory, interface traffic, etc.) +- **Migrations**: Alembic, run automatically on API startup +- **Initialization**: `scripts/init-postgres.sql` creates roles and enables extensions +- **Data volume**: `./docker-data/postgres` +- **Memory limit**: 512MB + +### Redis + +- **Image**: `redis:7-alpine` +- **Uses**: + - Distributed locking for the Go poller (prevents concurrent polling of the same device) + - Rate limiting on auth endpoints (5 requests/min) + - Credential cache for OpenBao Transit responses +- **Data volume**: `./docker-data/redis` +- **Memory limit**: 128MB + +### NATS JetStream + +- **Image**: `nats:2-alpine` +- **Role**: Message bus between the Go poller and the Python API +- **Streams**: DEVICE_EVENTS (poll results, status changes), ALERT_EVENTS (SSE delivery), OPERATION_EVENTS (SSE delivery) +- **Durable consumers**: Ensure no message loss during API restarts +- **Monitoring port**: 8222 +- **Data volume**: `./docker-data/nats` +- **Memory limit**: 128MB + +### OpenBao (HashiCorp Vault fork) + +- **Image**: `openbao/openbao:2.1` +- **Mode**: Dev server (auto-unsealed, in-memory storage) +- **Transit secrets engine**: Provides envelope encryption for device credentials at rest +- **Per-tenant keys**: Each tenant gets a dedicated Transit encryption key +- **Init script**: `infrastructure/openbao/init.sh` enables Transit engine and creates initial keys +- **Dev token**: `dev-openbao-token` (must be replaced in production) +- **Memory limit**: 256MB + +### WireGuard + +- **Image**: `lscr.io/linuxserver/wireguard` +- **Role**: VPN gateway for reaching RouterOS devices on remote networks +- **Port**: 51820/UDP +- **Routing**: API and Poller containers add static routes through the WireGuard container to reach device subnets (e.g., `10.10.0.0/16`) +- **Data volume**: `./docker-data/wireguard` +- **Memory limit**: 128MB + +## Data Flow + +### Device Polling Cycle + +``` +Go Poller Redis OpenBao RouterOS NATS API PostgreSQL + │ │ │ │ │ │ │ + ├──query device list──────▶│ │ │ │ │ │ + │◀─────────────────────────┤ │ │ │ │ │ + ├──acquire lock────────────▶│ │ │ │ │ │ + │◀──lock granted───────────┤ │ │ │ │ │ + ├──decrypt credentials (cache miss)────────▶│ │ │ │ │ + │◀──plaintext credentials──────────────────┤ │ │ │ │ + ├──binary API (8729 TLS)───────────────────────────────────▶│ │ │ │ + │◀──system info, interfaces, metrics───────────────────────┤ │ │ │ + ├──publish poll result──────────────────────────────────────────────────▶│ │ │ + │ │ │ │ │ ──subscribe──▶│ │ + │ │ │ │ │ ├──upsert data──▶│ + ├──release lock────────────▶│ │ │ │ │ │ +``` + +1. Poller queries PostgreSQL for the list of active devices +2. Acquires a Redis distributed lock per device (prevents duplicate polling) +3. Decrypts device credentials via OpenBao Transit (LRU cache avoids repeated KMS calls) +4. Connects to the RouterOS binary API on port 8729 over TLS +5. Collects system info, interface stats, routing tables, and metrics +6. Publishes results to NATS JetStream +7. API NATS subscriber processes results and upserts into PostgreSQL +8. Releases Redis lock + +### Config Push (Two-Phase with Panic Revert) + +``` +Frontend API RouterOS + │ │ │ + ├──push config─▶│ │ + │ ├──apply config─▶│ + │ ├──set revert timer─▶│ + │ │◀──ack────────┤ + │◀──pending────┤ │ + │ │ │ (timer counting down) + ├──confirm─────▶│ │ + │ ├──cancel timer─▶│ + │ │◀──ack────────┤ + │◀──confirmed──┤ │ +``` + +1. Frontend sends config commands to the API +2. API connects to the device and applies the configuration +3. Sets a revert timer on the device (RouterOS safe mode / scheduler) +4. Returns pending status to the frontend +5. User confirms the change works (e.g., connectivity still up) +6. If confirmed: API cancels the revert timer, config is permanent +7. If timeout or rejected: device automatically reverts to the previous configuration + +This pattern prevents lockouts from misconfigured firewall rules or IP changes. + +### Authentication (SRP-6a Zero-Knowledge Proof) + +``` +Browser API PostgreSQL + │ │ │ + │──register────────────────▶│ │ + │ (email, salt, verifier) │──store verifier──────▶│ + │ │ │ + │──login step 1────────────▶│ │ + │ (email, client_public) │──lookup verifier─────▶│ + │◀──(salt, server_public)──┤◀─────────────────────┤ + │ │ │ + │──login step 2────────────▶│ │ + │ (client_proof) │──verify proof────────│ + │◀──(server_proof, JWT)────┤ │ +``` + +1. **Registration**: Client derives a verifier from `password + secret_key` using PBKDF2 (650K iterations) + HKDF + XOR (2SKD). Only the salt and verifier are sent to the server -- never the password +2. **Login step 1**: Client sends email and ephemeral public value; server responds with stored salt and its own ephemeral public value +3. **Login step 2**: Client computes a proof from the shared session key; server validates the proof without ever seeing the password +4. **Token issuance**: On successful proof, server issues JWT (15min access + 7d refresh) +5. **Emergency Kit**: A downloadable PDF containing the user's secret key for account recovery + +## Multi-Tenancy Model + +- Every data table includes a `tenant_id` column +- PostgreSQL RLS policies filter rows by `current_setting('app.tenant_id')` +- The API sets tenant context (`SET app.tenant_id = ...`) on each database session +- `super_admin` role has NULL `tenant_id` and can access all tenants +- `poller_user` bypasses RLS intentionally (needs cross-tenant device access for polling) +- Tenant isolation is enforced at the database level, not the application level -- even a compromised API cannot leak cross-tenant data through `app_user` connections + +## Security Layers + +| Layer | Mechanism | Purpose | +|-------|-----------|---------| +| **Authentication** | SRP-6a | Zero-knowledge proof -- password never transmitted or stored | +| **Key Derivation** | 2SKD (PBKDF2 650K + HKDF + XOR) | Two-secret key derivation from password + secret key | +| **Encryption at Rest** | OpenBao Transit | Envelope encryption for device credentials | +| **Tenant Isolation** | PostgreSQL RLS | Database-level row filtering by tenant_id | +| **Access Control** | JWT + RBAC | Role-based permissions (super_admin, admin, operator, viewer) | +| **Rate Limiting** | Redis-backed | Auth endpoints limited to 5 requests/min | +| **TLS Certificates** | Internal CA | Certificate management and deployment to RouterOS devices | +| **Security Headers** | Middleware | CSP, SRI hashes on JS bundles, X-Frame-Options, etc. | +| **Secret Validation** | Startup check | Rejects known-insecure defaults in non-dev environments | + +## Network Topology + +All services communicate over a single Docker bridge network (`tod`). External ports: + +| Service | Internal Port | External Port | Protocol | +|---------|--------------|---------------|----------| +| Frontend | 80 | 3000 | HTTP | +| API | 8000 | 8001 | HTTP | +| PostgreSQL | 5432 | 5432 | TCP | +| Redis | 6379 | 6379 | TCP | +| NATS | 4222 | 4222 | TCP | +| NATS Monitor | 8222 | 8222 | HTTP | +| OpenBao | 8200 | 8200 | HTTP | +| WireGuard | 51820 | 51820 | UDP | + +## File Structure + +``` +backend/ FastAPI Python backend + app/ + main.py Application entry point, lifespan, router registration + config.py Pydantic Settings configuration + database.py SQLAlchemy engines (admin + app_user) + models/ SQLAlchemy ORM models + routers/ FastAPI route handlers (21 modules) + services/ Business logic, NATS subscribers, schedulers + middleware/ Rate limiting, request ID, security headers +frontend/ React TypeScript frontend + src/ + routes/ TanStack Router file-based routes + components/ Reusable UI components + lib/ API client, crypto, utilities +poller/ Go microservice for device polling + main.go Entry point + Dockerfile Multi-stage build +infrastructure/ Deployment configuration + docker/ Dockerfiles for api, frontend + helm/ Kubernetes Helm charts + openbao/ OpenBao init scripts +scripts/ Database init scripts +docker-compose.yml Infrastructure services (postgres, redis, nats, openbao, wireguard) +docker-compose.override.yml Application services for dev (api, poller, frontend) +``` + +## Running the Stack + +```bash +# Infrastructure only (postgres, redis, nats, openbao, wireguard) +docker compose up -d + +# Full stack including application services (api, poller, frontend) +docker compose up -d # override.yml is auto-loaded in dev + +# Build images sequentially to avoid OOM on low-RAM machines +docker compose build api +docker compose build poller +docker compose build frontend +``` + +## Container Memory Limits + +| Service | Limit | +|---------|-------| +| PostgreSQL | 512MB | +| API | 512MB | +| Go Poller | 256MB | +| OpenBao | 256MB | +| Redis | 128MB | +| NATS | 128MB | +| WireGuard | 128MB | +| Frontend (nginx) | 64MB | diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md new file mode 100644 index 0000000..64dadd7 --- /dev/null +++ b/docs/CONFIGURATION.md @@ -0,0 +1,127 @@ +# Configuration Reference + +TOD uses Pydantic Settings for configuration. All values can be set via environment variables or a `.env` file in the backend working directory. + +## Environment Variables + +### Application + +| Variable | Default | Description | +|----------|---------|-------------| +| `APP_NAME` | `TOD - The Other Dude` | Application display name | +| `APP_VERSION` | `0.1.0` | Semantic version string | +| `ENVIRONMENT` | `dev` | Runtime environment: `dev`, `staging`, or `production` | +| `DEBUG` | `false` | Enable debug mode | +| `CORS_ORIGINS` | `http://localhost:3000,http://localhost:5173,http://localhost:8080` | Comma-separated list of allowed CORS origins | +| `APP_BASE_URL` | `http://localhost:5173` | Frontend base URL (used in password reset emails) | + +### Authentication & JWT + +| Variable | Default | Description | +|----------|---------|-------------| +| `JWT_SECRET_KEY` | *(insecure dev default)* | HMAC signing key for JWTs. **Must be changed in production.** Generate with: `python -c "import secrets; print(secrets.token_urlsafe(64))"` | +| `JWT_ALGORITHM` | `HS256` | JWT signing algorithm | +| `JWT_ACCESS_TOKEN_EXPIRE_MINUTES` | `15` | Access token lifetime in minutes | +| `JWT_REFRESH_TOKEN_EXPIRE_DAYS` | `7` | Refresh token lifetime in days | +| `PASSWORD_RESET_TOKEN_EXPIRE_MINUTES` | `30` | Password reset link validity in minutes | + +### Database + +| Variable | Default | Description | +|----------|---------|-------------| +| `DATABASE_URL` | `postgresql+asyncpg://postgres:postgres@localhost:5432/mikrotik` | Admin (superuser) async database URL. Used for migrations and bootstrap operations. | +| `SYNC_DATABASE_URL` | `postgresql+psycopg2://postgres:postgres@localhost:5432/mikrotik` | Synchronous database URL used by Alembic migrations only. | +| `APP_USER_DATABASE_URL` | `postgresql+asyncpg://app_user:app_password@localhost:5432/mikrotik` | Non-superuser async database URL. Enforces PostgreSQL RLS for tenant isolation. | +| `DB_POOL_SIZE` | `20` | App user connection pool size | +| `DB_MAX_OVERFLOW` | `40` | App user pool max overflow connections | +| `DB_ADMIN_POOL_SIZE` | `10` | Admin connection pool size | +| `DB_ADMIN_MAX_OVERFLOW` | `20` | Admin pool max overflow connections | + +### Security + +| Variable | Default | Description | +|----------|---------|-------------| +| `CREDENTIAL_ENCRYPTION_KEY` | *(insecure dev default)* | AES-256-GCM encryption key for device credentials at rest. Must be exactly 32 bytes, base64-encoded. **Must be changed in production.** Generate with: `python -c "import secrets, base64; print(base64.b64encode(secrets.token_bytes(32)).decode())"` | + +### OpenBao / Vault (KMS) + +| Variable | Default | Description | +|----------|---------|-------------| +| `OPENBAO_ADDR` | `http://localhost:8200` | OpenBao Transit server address for per-tenant envelope encryption | +| `OPENBAO_TOKEN` | *(insecure dev default)* | OpenBao authentication token. **Must be changed in production.** | + +### NATS + +| Variable | Default | Description | +|----------|---------|-------------| +| `NATS_URL` | `nats://localhost:4222` | NATS JetStream server URL for pub/sub between Go poller and Python API | + +### Redis + +| Variable | Default | Description | +|----------|---------|-------------| +| `REDIS_URL` | `redis://localhost:6379/0` | Redis URL for caching, distributed locks, and rate limiting | + +### SMTP (Notifications) + +| Variable | Default | Description | +|----------|---------|-------------| +| `SMTP_HOST` | `localhost` | SMTP server hostname | +| `SMTP_PORT` | `587` | SMTP server port | +| `SMTP_USER` | *(none)* | SMTP authentication username | +| `SMTP_PASSWORD` | *(none)* | SMTP authentication password | +| `SMTP_USE_TLS` | `false` | Enable STARTTLS for SMTP connections | +| `SMTP_FROM_ADDRESS` | `noreply@mikrotik-portal.local` | Sender address for outbound emails | + +### Firmware + +| Variable | Default | Description | +|----------|---------|-------------| +| `FIRMWARE_CACHE_DIR` | `/data/firmware-cache` | Path to firmware download cache (PVC mount in production) | +| `FIRMWARE_CHECK_INTERVAL_HOURS` | `24` | Hours between automatic RouterOS version checks | + +### Storage Paths + +| Variable | Default | Description | +|----------|---------|-------------| +| `GIT_STORE_PATH` | `./git-store` | Path to bare git repos for config backup history (one repo per tenant). In production: `/data/git-store` on a ReadWriteMany PVC. | +| `WIREGUARD_CONFIG_PATH` | `/data/wireguard` | Shared volume path for WireGuard configuration files | + +### Bootstrap + +| Variable | Default | Description | +|----------|---------|-------------| +| `FIRST_ADMIN_EMAIL` | *(none)* | Email for the initial super_admin user. Only used if no users exist in the database. | +| `FIRST_ADMIN_PASSWORD` | *(none)* | Password for the initial super_admin user. The user is created with `must_upgrade_auth=True`, triggering SRP registration on first login. | + +## Production Safety + +TOD refuses to start in `staging` or `production` environments if any of these variables still have their insecure dev defaults: + +- `JWT_SECRET_KEY` +- `CREDENTIAL_ENCRYPTION_KEY` +- `OPENBAO_TOKEN` + +The process exits with code 1 and a clear error message indicating which variable needs to be rotated. + +## Docker Compose Profiles + +| Profile | Command | Services | +|---------|---------|----------| +| *(default)* | `docker compose up -d` | Infrastructure only: PostgreSQL, Redis, NATS, OpenBao | +| `full` | `docker compose --profile full up -d` | All services: infrastructure + API, Poller, Frontend | + +## Container Memory Limits + +All containers have enforced memory limits to prevent OOM on the host: + +| Service | Memory Limit | +|---------|-------------| +| PostgreSQL | 512 MB | +| Redis | 128 MB | +| NATS | 128 MB | +| API | 512 MB | +| Poller | 256 MB | +| Frontend | 64 MB | + +Build Docker images sequentially (not in parallel) to avoid OOM during builds. diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md new file mode 100644 index 0000000..52ebcb7 --- /dev/null +++ b/docs/DEPLOYMENT.md @@ -0,0 +1,257 @@ +# TOD - The Other Dude — Deployment Guide + +## Overview + +TOD (The Other Dude) is a containerized fleet management platform for RouterOS devices. This guide covers Docker Compose deployment for production environments. + +### Architecture + +- **Backend API** (Python/FastAPI) -- REST API with JWT authentication and PostgreSQL RLS +- **Go Poller** -- Polls RouterOS devices via binary API, publishes events to NATS +- **Frontend** (React/nginx) -- Single-page application served by nginx +- **PostgreSQL + TimescaleDB** -- Primary database with time-series extensions +- **Redis** -- Distributed locking and rate limiting +- **NATS JetStream** -- Message bus for device events + +## Prerequisites + +- Docker Engine 24+ with Docker Compose v2 +- At least 4GB RAM (2GB absolute minimum -- builds are memory-intensive) +- External SSD or fast storage recommended for Docker volumes +- Network access to RouterOS devices on ports 8728 (API) and 8729 (API-SSL) + +## Quick Start + +### 1. Clone and Configure + +```bash +git clone tod +cd tod + +# Copy environment template +cp .env.example .env.prod +``` + +### 2. Generate Secrets + +```bash +# Generate JWT secret +python3 -c "import secrets; print(secrets.token_urlsafe(64))" + +# Generate credential encryption key (32 bytes, base64-encoded) +python3 -c "import secrets, base64; print(base64.b64encode(secrets.token_bytes(32)).decode())" +``` + +Edit `.env.prod` with the generated values: + +```env +ENVIRONMENT=production +JWT_SECRET_KEY= +CREDENTIAL_ENCRYPTION_KEY= +POSTGRES_PASSWORD= + +# First admin user (created on first startup) +FIRST_ADMIN_EMAIL=admin@example.com +FIRST_ADMIN_PASSWORD= +``` + +### 3. Build Images + +Build images **one at a time** to avoid out-of-memory crashes on constrained hosts: + +```bash +docker compose -f docker-compose.yml -f docker-compose.prod.yml build api +docker compose -f docker-compose.yml -f docker-compose.prod.yml build poller +docker compose -f docker-compose.yml -f docker-compose.prod.yml build frontend +``` + +### 4. Start the Stack + +```bash +docker compose -f docker-compose.yml -f docker-compose.prod.yml --env-file .env.prod up -d +``` + +### 5. Verify + +```bash +# Check all services are running +docker compose ps + +# Check API health (liveness) +curl http://localhost:8000/health + +# Check readiness (PostgreSQL, Redis, NATS connected) +curl http://localhost:8000/health/ready + +# Access the portal +open http://localhost +``` + +Log in with the `FIRST_ADMIN_EMAIL` and `FIRST_ADMIN_PASSWORD` credentials set in step 2. + +## Environment Configuration + +### Required Variables + +| Variable | Description | Example | +|----------|-------------|---------| +| `ENVIRONMENT` | Deployment environment | `production` | +| `JWT_SECRET_KEY` | JWT signing secret (min 32 chars) | `` | +| `CREDENTIAL_ENCRYPTION_KEY` | AES-256 key for device credentials (base64) | `` | +| `POSTGRES_PASSWORD` | PostgreSQL superuser password | `` | +| `FIRST_ADMIN_EMAIL` | Initial admin account email | `admin@example.com` | +| `FIRST_ADMIN_PASSWORD` | Initial admin account password | `` | + +### Optional Variables + +| Variable | Default | Description | +|----------|---------|-------------| +| `GUNICORN_WORKERS` | `2` | API worker process count | +| `DB_POOL_SIZE` | `20` | App database connection pool size | +| `DB_MAX_OVERFLOW` | `40` | Max overflow connections above pool | +| `DB_ADMIN_POOL_SIZE` | `10` | Admin database connection pool size | +| `DB_ADMIN_MAX_OVERFLOW` | `20` | Admin max overflow connections | +| `POLL_INTERVAL_SECONDS` | `60` | Device polling interval | +| `CONNECTION_TIMEOUT_SECONDS` | `10` | RouterOS connection timeout | +| `COMMAND_TIMEOUT_SECONDS` | `30` | RouterOS per-command timeout | +| `CIRCUIT_BREAKER_MAX_FAILURES` | `5` | Consecutive failures before backoff | +| `CIRCUIT_BREAKER_BASE_BACKOFF_SECONDS` | `30` | Initial backoff duration | +| `CIRCUIT_BREAKER_MAX_BACKOFF_SECONDS` | `900` | Maximum backoff (15 min) | +| `LOG_LEVEL` | `info` | Logging verbosity (`debug`/`info`/`warn`/`error`) | +| `CORS_ORIGINS` | `http://localhost:3000` | Comma-separated CORS origins | + +### Security Notes + +- **Never use default secrets in production.** The application refuses to start if it detects known insecure defaults (like the dev JWT secret) in non-dev environments. +- **Credential encryption key** is used to encrypt RouterOS device passwords at rest. Losing this key means re-entering all device credentials. +- **CORS_ORIGINS** should be set to your actual domain in production. +- **RLS enforcement**: The app_user database role enforces row-level security. Tenants cannot access each other's data even with a compromised JWT. + +## Storage Configuration + +Docker volumes mount to the host filesystem. Default locations are configured in `docker-compose.yml`: + +- **PostgreSQL data**: `./docker-data/postgres` +- **Redis data**: `./docker-data/redis` +- **NATS data**: `./docker-data/nats` +- **Git store (config backups)**: `./docker-data/git-store` + +To change storage locations, edit the volume mounts in `docker-compose.yml`. + +## Resource Limits + +Container memory limits are enforced in `docker-compose.prod.yml` to prevent OOM crashes: + +| Service | Memory Limit | +|---------|-------------| +| PostgreSQL | 512MB | +| Redis | 128MB | +| NATS | 128MB | +| API | 512MB | +| Poller | 256MB | +| Frontend | 64MB | + +Adjust under `deploy.resources.limits.memory` in `docker-compose.prod.yml`. + +## API Documentation + +The backend serves interactive API documentation at: + +- **Swagger UI**: `http://localhost:8000/docs` +- **ReDoc**: `http://localhost:8000/redoc` + +All endpoints include descriptions, request/response schemas, and authentication requirements. + +## Monitoring (Optional) + +Enable Prometheus and Grafana monitoring with the observability compose overlay: + +```bash +docker compose \ + -f docker-compose.yml \ + -f docker-compose.prod.yml \ + -f docker-compose.observability.yml \ + --env-file .env.prod up -d +``` + +- **Prometheus**: `http://localhost:9090` +- **Grafana**: `http://localhost:3001` (default: admin/admin) + +### Exported Metrics + +The API and poller export Prometheus metrics: + +| Metric | Source | Description | +|--------|--------|-------------| +| `http_requests_total` | API | HTTP request count by method, path, status | +| `http_request_duration_seconds` | API | Request latency histogram | +| `mikrotik_poll_total` | Poller | Poll cycles by status (success/error/skipped) | +| `mikrotik_poll_duration_seconds` | Poller | Poll cycle duration histogram | +| `mikrotik_devices_active` | Poller | Number of devices being polled | +| `mikrotik_circuit_breaker_skips_total` | Poller | Polls skipped due to backoff | +| `mikrotik_nats_publish_total` | Poller | NATS publishes by subject and status | + +## Maintenance + +### Backup Strategy + +- **Database**: Use `pg_dump` or configure PostgreSQL streaming replication +- **Config backups**: Git repositories in the git-store volume (automatic nightly backups) +- **Encryption key**: Store `CREDENTIAL_ENCRYPTION_KEY` securely -- required to decrypt device credentials + +### Updating + +```bash +git pull +docker compose -f docker-compose.yml -f docker-compose.prod.yml build api +docker compose -f docker-compose.yml -f docker-compose.prod.yml build poller +docker compose -f docker-compose.yml -f docker-compose.prod.yml build frontend +docker compose -f docker-compose.yml -f docker-compose.prod.yml --env-file .env.prod up -d +``` + +Database migrations run automatically on API startup via Alembic. + +### Logs + +```bash +# All services +docker compose logs -f + +# Specific service +docker compose logs -f api + +# Filter structured JSON logs with jq +docker compose logs api --no-log-prefix 2>&1 | jq 'select(.event != null)' + +# View audit logs (config editor operations) +docker compose logs api --no-log-prefix 2>&1 | jq 'select(.event | startswith("routeros_"))' +``` + +### Graceful Shutdown + +All services handle SIGTERM for graceful shutdown: + +- **API (gunicorn)**: Finishes in-flight requests within `GUNICORN_GRACEFUL_TIMEOUT` (default 30s), then disposes database connection pools +- **Poller (Go)**: Cancels all device polling goroutines via context propagation, waits for in-flight polls to complete +- **Frontend (nginx)**: Stops accepting new connections and finishes serving active requests + +```bash +# Graceful stop (sends SIGTERM, waits 30s) +docker compose stop + +# Restart a single service +docker compose restart api +``` + +## Troubleshooting + +| Issue | Solution | +|-------|----------| +| API won't start with secret error | Generate production secrets (see step 2 above) | +| Build crashes with OOM | Build images one at a time (see step 3 above) | +| Device shows offline | Check network access to device API port (8728/8729) | +| Health check fails | Check `docker compose logs api` for startup errors | +| Rate limited (429) | Wait 60 seconds or check Redis connectivity | +| Migration fails | Check `docker compose logs api` for Alembic errors | +| NATS subscriber won't start | Non-fatal -- API runs without NATS; check NATS container health | +| Poller circuit breaker active | Device unreachable; check `CIRCUIT_BREAKER_*` env vars to tune backoff | diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000..caa8674 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,203 @@ +# The Other Dude + +**Fleet management for MikroTik RouterOS devices.** Built for MSPs who manage hundreds of routers across multiple tenants. Think "UniFi Controller, but for MikroTik." + +The Other Dude is a self-hosted, multi-tenant platform that gives you centralized visibility, configuration management, real-time monitoring, and zero-knowledge security across your entire MikroTik fleet -- from a single pane of glass. + +--- + +## Features + +### Fleet + +- **Dashboard** -- At-a-glance fleet health with device counts, uptime sparklines, and status breakdowns per organization. +- **Device Management** -- Detailed device pages with system info, interfaces, routes, firewall rules, DHCP leases, and real-time resource metrics. +- **Fleet Table** -- Virtual-scrolled table (TanStack Virtual) that handles hundreds of devices without breaking a sweat. +- **Device Map** -- Geographic view of device locations. +- **Subnet Scanner** -- Discover new RouterOS devices on your network and onboard them in clicks. + +### Configuration + +- **Config Editor** -- Browse and edit RouterOS configuration sections with a structured command interface. Two-phase config push with automatic panic-revert ensures you never brick a remote device. +- **Batch Config** -- Apply configuration changes across multiple devices simultaneously with template support. +- **Bulk Commands** -- Execute arbitrary RouterOS commands across device groups. +- **Templates** -- Reusable configuration templates with variable substitution. +- **Simple Config** -- A Linksys/Ubiquiti-style simplified interface covering Internet, LAN/DHCP, WiFi, Port Forwarding, Firewall, DNS, and System settings. No RouterOS CLI knowledge required. +- **Config Backup & Diff** -- Git-backed configuration storage with full version history and side-by-side diffs. Restore any previous configuration with one click. + +### Monitoring + +- **Network Topology** -- Interactive topology map (ReactFlow + Dagre layout) showing device interconnections and shared subnets. +- **Real-Time Metrics** -- Live CPU, memory, disk, and interface traffic via Server-Sent Events (SSE) backed by NATS JetStream. +- **Alert Rules** -- Configurable threshold-based alerts for any metric (CPU > 90%, interface down, uptime reset, etc.). +- **Notification Channels** -- Route alerts to email, webhooks, or Slack. +- **Audit Trail** -- Immutable log of every action taken in the portal, with user attribution and exportable records. +- **Transparency Dashboard** -- KMS access event monitoring for tenant admins (who accessed what encryption keys, when). +- **Reports** -- Generate PDF reports (fleet summary, device detail, security audit, performance) with Jinja2 + WeasyPrint. + +### Security + +- **Zero-Knowledge Architecture** -- 1Password-style hybrid design. SRP-6a authentication means the server never sees your password. Two-Secret Key Derivation (2SKD) with PBKDF2 (650K iterations) + HKDF + XOR. +- **Secret Key** -- 128-bit `A3-XXXXXX` format key stored in IndexedDB with Emergency Kit PDF export. +- **OpenBao KMS** -- Per-tenant envelope encryption via Transit secret engine. Go poller uses LRU cache (1024 keys / 5-min TTL) for performance. +- **Internal Certificate Authority** -- Issue and deploy TLS certificates to RouterOS devices via SFTP. Three-tier TLS fallback: CA-verified, InsecureSkipVerify, plain API. +- **WireGuard VPN** -- Manage WireGuard tunnels for secure device access across NAT boundaries. +- **Credential Encryption** -- AES-256-GCM (Fernet) encryption of all stored device credentials at rest. +- **RBAC** -- Four roles: `super_admin`, `admin`, `operator`, `viewer`. PostgreSQL Row-Level Security enforces tenant isolation at the database layer. + +### Administration + +- **Multi-Tenancy** -- Full organization isolation with PostgreSQL RLS. Super admins manage all tenants; tenant admins see only their own devices and users. +- **User Management** -- Per-tenant user administration with role assignment. +- **API Keys** -- Generate `mktp_`-prefixed API keys with SHA-256 hash storage and operator-level RBAC for automation and integrations. +- **Firmware Management** -- Track RouterOS versions across your fleet, plan upgrades, and push firmware updates. +- **Maintenance Windows** -- Schedule maintenance periods with automatic alert suppression. +- **Setup Wizard** -- Guided 3-step onboarding for first-time deployment. + +### UX + +- **Command Palette** -- `Cmd+K` / `Ctrl+K` quick navigation (cmdk). +- **Keyboard Shortcuts** -- Vim-style sequence shortcuts (`g d` for dashboard, `g t` for topology, `[` to toggle sidebar). +- **Dark / Light Mode** -- Class-based theming with flicker-free initialization. +- **Page Transitions** -- Smooth route transitions with Framer Motion. +- **Skeleton Loaders** -- Shimmer-gradient loading states throughout the UI. + +--- + +## Architecture + +``` + +-----------+ + | Frontend | + | React/nginx| + +-----+-----+ + | + /api/ proxy + | + +-----v-----+ + | API | + | FastAPI | + +--+--+--+--+ + | | | + +-------------+ | +--------------+ + | | | + +-----v------+ +-----v-----+ +-------v-------+ + | PostgreSQL | | Redis | | NATS | + | TimescaleDB | | (locks, | | JetStream | + | (RLS) | | caching) | | (pub/sub) | + +-----^------+ +-----^-----+ +-------^-------+ + | | | + +-----+-------+-------+---------+-------+ + | Poller (Go) | + | Polls RouterOS devices via binary API | + | port 8729 TLS | + +----------------------------------------+ + | + +--------v---------+ + | RouterOS Fleet | + | (your devices) | + +-------------------+ +``` + +- **Frontend** serves the React SPA via nginx and proxies `/api/` to the backend. +- **API** handles all business logic, authentication, and database access with RLS-enforced tenant isolation. +- **Poller** is a Go microservice that polls RouterOS devices on a configurable interval using the RouterOS binary API, publishing results to NATS and persisting to PostgreSQL. +- **PostgreSQL + TimescaleDB** stores all relational data with hypertables for time-series metrics. +- **Redis** provides distributed locks (one poller per device) and rate limiting. +- **NATS JetStream** delivers real-time events from the poller to the API (and onward to the frontend via SSE). +- **OpenBao** provides Transit secret engine for per-tenant envelope encryption (zero-knowledge key management). + +--- + +## Tech Stack + +| Layer | Technology | +|-------|-----------| +| Frontend | React 19, TanStack Router + Query, Tailwind CSS 3.4, Vite, Framer Motion | +| Backend | Python 3.12, FastAPI 0.115, SQLAlchemy 2.0 async, asyncpg, Pydantic v2 | +| Poller | Go 1.24, go-routeros/v3, pgx/v5, nats.go | +| Database | PostgreSQL 17 + TimescaleDB 2.17, Row-Level Security | +| Cache | Redis 7 | +| Message Bus | NATS with JetStream | +| KMS | OpenBao 2.1 (Transit secret engine) | +| VPN | WireGuard | +| Auth | SRP-6a (zero-knowledge), JWT (15m access / 7d refresh) | +| Reports | Jinja2 + WeasyPrint (PDF generation) | +| Containerization | Docker Compose (dev, staging, production profiles) | + +--- + +## Quick Start + +See the full [Quick Start Guide](../QUICKSTART.md) for detailed instructions. + +```bash +# Clone and configure +cp .env.example .env + +# Start infrastructure +docker compose up -d + +# Build app images (one at a time to avoid OOM) +docker compose build api +docker compose build poller +docker compose build frontend + +# Start the full stack +docker compose up -d + +# Verify +curl http://localhost:8001/health +open http://localhost:3000 +``` + +Three environment profiles are available: + +| Environment | Frontend | API | Notes | +|-------------|----------|-----|-------| +| Dev | `localhost:3000` | `localhost:8001` | Hot-reload, volume-mounted source | +| Staging | `localhost:3080` | `localhost:8081` | Built images, staging secrets | +| Production | `localhost` (port 80) | Internal (proxied) | Gunicorn workers, log rotation | + +--- + +## Documentation + +| Document | Description | +|----------|-------------| +| [Quick Start](../QUICKSTART.md) | Get running in minutes | +| [Deployment Guide](DEPLOYMENT.md) | Production deployment, TLS, backups | +| [Architecture](ARCHITECTURE.md) | System design, data flows, multi-tenancy | +| [Security Model](SECURITY.md) | Zero-knowledge auth, encryption, RLS, RBAC | +| [User Guide](USER-GUIDE.md) | End-user guide for all features | +| [API Reference](API.md) | REST API endpoints and authentication | +| [Configuration](CONFIGURATION.md) | Environment variables and tuning | + +--- + +## Screenshots + +See the [documentation site](https://theotherdude.net) for screenshots. + +--- + +## Project Structure + +``` +backend/ Python FastAPI backend +frontend/ React TypeScript frontend +poller/ Go microservice for device polling +infrastructure/ Helm charts, Dockerfiles, OpenBao init +docs/ Documentation +docker-compose.yml Base compose (infrastructure services) +docker-compose.override.yml Dev overrides (hot-reload) +docker-compose.staging.yml Staging profile +docker-compose.prod.yml Production profile +docker-compose.observability.yml Prometheus + Grafana +``` + +--- + +## License + +Open-source. Self-hosted. Your data stays on your infrastructure. diff --git a/docs/SECURITY.md b/docs/SECURITY.md new file mode 100644 index 0000000..14769ff --- /dev/null +++ b/docs/SECURITY.md @@ -0,0 +1,149 @@ +# Security Model + +## Overview + +TOD (The Other Dude) implements a 1Password-inspired zero-knowledge security architecture. The server never stores or sees user passwords. All data is stored on infrastructure you own and control — no external telemetry, analytics, or third-party data transmission. + +## Authentication: SRP-6a Zero-Knowledge Proof + +TOD uses the Secure Remote Password (SRP-6a) protocol for authentication, ensuring the server never receives, transmits, or stores user passwords. + +- **SRP-6a protocol:** Password is verified via a zero-knowledge proof — only a cryptographic verifier derived from the password is stored on the server, never the password itself. +- **Two-Secret Key Derivation (2SKD):** Combines the user password with a 128-bit Secret Key using a multi-step derivation process, ensuring that compromise of either factor alone is insufficient. +- **Key derivation pipeline:** PBKDF2 with 650,000 iterations + HKDF expansion + XOR combination of both factors. +- **Secret Key format:** `A3-XXXXXX` (128-bit), stored exclusively in the browser's IndexedDB. The server never sees or stores the Secret Key. +- **Emergency Kit:** Downloadable PDF containing the Secret Key for account recovery. Generated client-side. +- **Session management:** JWT tokens with 15-minute access token lifetime and 7-day refresh token lifetime, delivered via httpOnly cookies. +- **SRP session state:** Ephemeral SRP handshake data stored in Redis with automatic expiration. + +### Authentication Flow + +``` +Client Server + | | + | POST /auth/srp/init {email} | + |------------------------------------>| + | {salt, server_ephemeral_B} | + |<------------------------------------| + | | + | [Client derives session key from | + | password + Secret Key + salt + B] | + | | + | POST /auth/srp/verify {A, M1} | + |------------------------------------>| + | [Server verifies M1 proof] | + | {M2, access_token, refresh_token} | + |<------------------------------------| +``` + +## Credential Encryption + +Device credentials (RouterOS usernames and passwords) are encrypted at rest using envelope encryption: + +- **Encryption algorithm:** AES-256-GCM (via Fernet symmetric encryption). +- **Key management:** OpenBao Transit secrets engine provides the master encryption keys. +- **Per-tenant isolation:** Each tenant has its own encryption key in OpenBao Transit. +- **Envelope encryption:** Data is encrypted with a data encryption key (DEK), which is itself encrypted by the tenant's Transit key. +- **Go poller decryption:** The poller service decrypts credentials at runtime via the Transit API, with an LRU cache (1,024 entries, 5-minute TTL) to reduce KMS round-trips. +- **CA private keys:** Encrypted with AES-256-GCM before database storage. PEM key material is never logged. + +## Tenant Isolation + +Multi-tenancy is enforced at the database level, making cross-tenant data access structurally impossible: + +- **PostgreSQL Row-Level Security (RLS):** All data tables have RLS policies that filter rows by `tenant_id`. +- **`app_user` database role:** All application queries run through a non-superuser role that enforces RLS. Even a SQL injection attack cannot cross tenant boundaries. +- **Session context:** `tenant_id` is set via PostgreSQL session variables (`SET app.current_tenant`) on every request, derived from the authenticated user's JWT. +- **`super_admin` role:** Users with NULL `tenant_id` can access all tenants for platform administration. Represented as `'super_admin'` in the RLS context. +- **`poller_user` role:** Bypasses RLS by design — the polling service needs cross-tenant device access to poll all devices. This is an intentional security trade-off documented in the architecture. + +## Role-Based Access Control (RBAC) + +| Role | Scope | Capabilities | +|------|-------|-------------| +| `super_admin` | Global | Full system access, tenant management, user management across all tenants | +| `admin` | Tenant | Manage devices, users, settings, certificates within their tenant | +| `operator` | Tenant | Device operations, configuration changes, monitoring | +| `viewer` | Tenant | Read-only access to devices, metrics, and dashboards | + +- RBAC is enforced at both the API middleware layer and database level. +- API keys inherit the `operator` permission level and are scoped to a single tenant. +- API key tokens use the `mktp_` prefix and are stored as SHA-256 hashes (the plaintext token is shown once at creation and never stored). + +## Internal Certificate Authority + +TOD includes a per-tenant Internal Certificate Authority for managing TLS certificates on RouterOS devices: + +- **Per-tenant CA:** Each tenant can generate its own self-signed Certificate Authority. +- **Device certificate lifecycle:** Certificates follow a state machine: `issued` -> `deploying` -> `deployed` -> `expiring`/`revoked`/`superseded`. +- **Deployment:** Certificates are deployed to devices via SFTP. +- **Three-tier TLS fallback:** The Go poller attempts connections in order: + 1. CA-verified TLS (using the tenant's CA certificate) + 2. InsecureSkipVerify TLS (for self-signed RouterOS certs) + 3. Plain API connection (fallback) +- **Key protection:** CA private keys are encrypted with AES-256-GCM before database storage. PEM key material is never logged or exposed via API responses. +- **Certificate rotation and revocation:** Supported via the certificate lifecycle state machine. + +## Network Security + +- **RouterOS communication:** All device communication uses the RouterOS binary API over TLS (port 8729). InsecureSkipVerify is enabled by default because RouterOS devices typically use self-signed certificates. +- **CORS enforcement:** Strict CORS policy in production, configured via `CORS_ORIGINS` environment variable. +- **Rate limiting:** Authentication endpoints are rate-limited to 5 requests per minute per IP to prevent brute-force attacks. +- **Cookie security:** httpOnly cookies prevent JavaScript access to session tokens. The `Secure` flag is auto-detected based on whether CORS origins use HTTPS. + +## Data Protection + +- **Config backups:** Encrypted at rest via OpenBao Transit envelope encryption before database storage. +- **Audit logs:** Encrypted at rest via Transit encryption — audit log content is protected even from database administrators. +- **Subresource Integrity (SRI):** SHA-384 hashes on JavaScript bundles prevent tampering with frontend code. +- **Content Security Policy (CSP):** Strict CSP headers prevent XSS, code injection, and unauthorized resource loading. +- **No external dependencies:** Fully self-hosted with no external analytics, telemetry, CDNs, or third-party services. The only outbound connections are: + - RouterOS firmware update checks (no device data sent) + - SMTP for email notifications (if configured) + - Webhooks for alerts (if configured) + +## Security Headers + +The following security headers are enforced on all responses: + +| Header | Value | Purpose | +|--------|-------|---------| +| `Strict-Transport-Security` | `max-age=31536000; includeSubDomains` | Force HTTPS connections | +| `X-Content-Type-Options` | `nosniff` | Prevent MIME-type sniffing | +| `X-Frame-Options` | `DENY` | Prevent clickjacking via iframes | +| `Content-Security-Policy` | Strict policy | Prevent XSS and code injection | +| `Referrer-Policy` | `strict-origin-when-cross-origin` | Limit referrer information leakage | + +## Audit Trail + +- **Immutable audit log:** All significant actions are recorded in the `audit_logs` table — logins, configuration changes, device operations, admin actions. +- **Fire-and-forget logging:** The `log_action()` function records audit events asynchronously without blocking the main request. +- **Per-tenant access:** Tenants can only view their own audit logs (enforced by RLS). +- **Encryption at rest:** Audit log content is encrypted via OpenBao Transit. +- **CSV export:** Audit logs can be exported in CSV format for compliance and reporting. +- **Account deletion:** When a user deletes their account, audit log entries are anonymized (PII removed) but the action records are retained for security compliance. + +## Data Retention + +| Data Type | Retention | Notes | +|-----------|-----------|-------| +| User accounts | Until deleted | Users can self-delete from Settings | +| Device metrics | 90 days | Purged by TimescaleDB retention policy | +| Configuration backups | Indefinite | Stored in git repositories on your server | +| Audit logs | Indefinite | Anonymized on account deletion | +| API keys | Until revoked | Cascade-deleted with user account | +| Encrypted key material | Until user deleted | Cascade-deleted with user account | +| Session data (Redis) | 15 min / 7 days | Auto-expiring access/refresh tokens | +| Password reset tokens | 30 minutes | Auto-expire | +| SRP session state | Short-lived | Auto-expire in Redis | + +## GDPR Compliance + +TOD provides built-in tools for GDPR compliance: + +- **Right of Access (Art. 15):** Users can view their account information on the Settings page. +- **Right to Data Portability (Art. 20):** Users can export all personal data in JSON format from Settings. +- **Right to Erasure (Art. 17):** Users can permanently delete their account and all associated data. Audit logs are anonymized (PII removed) with a deletion receipt generated for compliance verification. +- **Right to Rectification (Art. 16):** Account information can be updated by the tenant administrator. + +As a self-hosted application, the deployment operator is the data controller and is responsible for compliance with applicable data protection laws. diff --git a/docs/USER-GUIDE.md b/docs/USER-GUIDE.md new file mode 100644 index 0000000..3865011 --- /dev/null +++ b/docs/USER-GUIDE.md @@ -0,0 +1,246 @@ +# TOD - The Other Dude: User Guide + +MSP fleet management platform for MikroTik RouterOS devices. + +--- + +## Getting Started + +### First Login + +1. Navigate to the portal URL provided by your administrator. +2. Log in with the admin credentials created during initial deployment. +3. Complete **SRP security enrollment** -- the portal uses zero-knowledge authentication (SRP-6a), so a unique Secret Key is generated for your account. +4. **Save your Emergency Kit PDF immediately.** This PDF contains your Secret Key, which you will need to log in from any new browser or device. Without it, you cannot recover access. +5. Complete the **Setup Wizard** to create your first organization and add your first device. + +### Setup Wizard + +The Setup Wizard launches automatically for first-time super_admin users. It walks through three steps: + +- **Step 1 -- Create Organization**: Enter a name for your tenant (organization). This is the top-level container for all your devices, users, and configuration. +- **Step 2 -- Add Device**: Enter the IP address, API port (default 8729 for TLS), and RouterOS credentials for your first device. The portal will attempt to connect and verify the device. +- **Step 3 -- Verify & Complete**: The portal polls the device to confirm connectivity. Once verified, you are taken to the dashboard. + +You can always add more organizations and devices later from the sidebar. + +--- + +## Navigation + +TOD uses a collapsible sidebar with four sections. Press `[` to toggle the sidebar between expanded (240px) and collapsed (48px) views. On mobile, the sidebar opens as an overlay. + +### Fleet + +| Item | Description | +|------|-------------| +| **Dashboard** | Overview of your fleet with device status cards, active alerts, and metrics sparklines. The landing page after login. | +| **Devices** | Fleet table with search, sort, and filter. Click any device row to open its detail page. | +| **Map** | Geographic map view of device locations. | + +### Manage + +| Item | Description | +|------|-------------| +| **Config Editor** | Browse and edit RouterOS configuration paths in real-time. Select a device from the header dropdown. | +| **Batch Config** | Apply configuration changes across multiple devices simultaneously using templates. | +| **Bulk Commands** | Execute RouterOS CLI commands across selected devices in bulk. | +| **Templates** | Create and manage reusable configuration templates. | +| **Firmware** | Check for RouterOS updates and schedule firmware upgrades across your fleet. | +| **Maintenance** | Schedule maintenance windows to suppress alerts during planned work. | +| **VPN** | WireGuard VPN tunnel management -- create, deploy, and monitor tunnels between devices. | +| **Certificates** | Internal Certificate Authority management -- generate, deploy, and rotate TLS certificates for your devices. | +### Monitor + +| Item | Description | +|------|-------------| +| **Topology** | Interactive network map showing device connections and shared subnets, rendered with ReactFlow and Dagre layout. | +| **Alerts** | Live alert feed with filtering by severity (info, warning, critical) and acknowledgment actions. | +| **Alert Rules** | Define threshold-based alert rules on device metrics with configurable severity and notification channels. | +| **Audit Trail** | Immutable, append-only log of all operations -- configuration changes, logins, user management, and admin actions. | +| **Transparency** | KMS access event dashboard showing encryption key usage across your organization (admin only). | +| **Reports** | Generate and export PDF reports: fleet summary, device health, compliance, and SLA. | + +### Admin + +| Item | Description | +|------|-------------| +| **Users** | User management with role-based access control (RBAC). Assign roles: super_admin, admin, operator, viewer. | +| **Organizations** | Create and manage tenants for multi-tenant MSP operation. Each tenant has isolated data via PostgreSQL row-level security. | +| **API Keys** | Generate and manage programmatic access tokens (prefixed `mktp_`) with operator-level permissions. | +| **Settings** | System configuration, theme toggle (dark/light), and profile settings. | +| **About** | Platform version, feature summary, and project information. | + +--- + +## Keyboard Shortcuts + +| Shortcut | Action | +|----------|--------| +| `Cmd+K` / `Ctrl+K` | Open command palette for quick navigation and actions | +| `[` | Toggle sidebar collapsed/expanded | +| `?` | Show keyboard shortcut help dialog | +| `g d` | Go to Dashboard | +| `g f` | Go to Firmware | +| `g t` | Go to Topology | +| `g a` | Go to Alerts | + +The command palette (`Cmd+K`) provides fuzzy search across all pages, devices, and common actions. It is accessible in both dark and light themes. + +--- + +## Device Management + +### Adding Devices + +There are several ways to add devices to your fleet: + +1. **Setup Wizard** -- automatically offered on first login. +2. **Fleet Table** -- click the "Add Device" button from the Devices page. +3. **Subnet Scanner** -- enter a CIDR range (e.g., `192.168.1.0/24`) to auto-discover MikroTik devices on the network. + +When adding a device, provide: + +- **IP Address** -- the management IP of the RouterOS device. +- **API Port** -- default is 8729 (TLS). The portal connects via the RouterOS binary API protocol. +- **Credentials** -- username and password for the device. Credentials are encrypted at rest with AES-256-GCM. + +### Device Detail Page + +Click any device in the fleet table to open its detail page. Tabs include: + +| Tab | Description | +|-----|-------------| +| **Overview** | System info, uptime, hardware model, RouterOS version, resource usage, and interface status summary. | +| **Interfaces** | Real-time traffic graphs for each network interface. | +| **Config** | Browse the full device configuration tree by RouterOS path. | +| **Firewall** | View and manage firewall filter rules, NAT rules, and address lists. | +| **DHCP** | Active DHCP leases, server configuration, and address pools. | +| **Backups** | Configuration backup timeline with side-by-side diff viewer to compare changes over time. | +| **Clients** | Connected clients and wireless registrations. | + +### Config Editor + +The Config Editor provides direct access to RouterOS configuration paths (e.g., `/ip/address`, `/ip/firewall/filter`, `/interface/bridge`). + +- Select a device from the header dropdown. +- Navigate the configuration tree to browse, add, edit, or delete entries. +- Two apply modes are available: + - **Standard Apply** -- changes are applied immediately. + - **Safe Apply** -- two-phase commit with automatic panic-revert. Changes are applied, and you have a confirmation window to accept them. If the confirmation times out (device becomes unreachable), changes automatically revert to prevent lockouts. + +Safe Apply is strongly recommended for firewall rules and routing changes on remote devices. + +### Simple Config + +Simple Config provides a consumer-router-style interface modeled after Linksys and Ubiquiti UIs. It is designed for operators who prefer guided configuration over raw RouterOS paths. + +Seven category tabs: + +1. **Internet** -- WAN connection type, PPPoE, DHCP client settings. +2. **LAN / DHCP** -- LAN addressing, DHCP server and pool configuration. +3. **WiFi** -- Wireless SSID, security, and channel settings. +4. **Port Forwarding** -- NAT destination rules for inbound services. +5. **Firewall** -- Simplified firewall rule management. +6. **DNS** -- DNS server and static DNS entries. +7. **System** -- Device identity, timezone, NTP, admin password. + +Toggle between **Simple** (guided) and **Standard** (full config editor) modes at any time. Per-device settings are stored in browser localStorage. + +--- + +## Monitoring & Alerts + +### Alert Rules + +Create threshold-based rules that fire when device metrics cross defined boundaries: + +- Select the metric to monitor (CPU, memory, disk, interface traffic, uptime, etc.). +- Set the threshold value and comparison operator. +- Choose severity: **info**, **warning**, or **critical**. +- Assign one or more notification channels. + +### Notification Channels + +Alerts can be delivered through multiple channels: + +| Channel | Description | +|---------|-------------| +| **Email** | SMTP-based email notifications. Configure server, port, and recipients. | +| **Webhook** | HTTP POST to any URL with a JSON payload containing alert details. | +| **Slack** | Slack incoming webhook with Block Kit formatting for rich alert messages. | + +### Maintenance Windows + +Schedule maintenance periods to suppress alerts during planned work: + +- Define start and end times. +- Apply to specific devices or fleet-wide. +- Alerts generated during the window are recorded but do not trigger notifications. +- Maintenance windows can be recurring or one-time. + +--- + +## Reports + +Generate PDF reports from the Reports page. Four report types are available: + +| Report | Content | +|--------|---------| +| **Fleet Summary** | Overall fleet health, device counts by status, top alerts, and aggregate statistics. | +| **Device Health** | Per-device detailed report with hardware info, resource trends, and recent events. | +| **Compliance** | Security posture audit -- firmware versions, default credentials, firewall policy checks. | +| **SLA** | Uptime and availability metrics over a selected period with percentage calculations. | + +Reports are generated as downloadable PDFs using server-side rendering. + +--- + +## Security + +### Zero-Knowledge Architecture + +TOD uses a 1Password-style hybrid zero-knowledge model: + +- **SRP-6a authentication** -- your password never leaves the browser. The server verifies a cryptographic proof without knowing the password. +- **Secret Key** -- a 128-bit key in `A3-XXXXXX` format, generated during enrollment. Combined with your password for two-secret key derivation (2SKD). +- **Emergency Kit** -- a downloadable PDF containing your Secret Key. Store it securely offline; you need it to log in from new browsers. +- **Envelope encryption** -- configuration backups and audit logs are encrypted at rest using per-tenant keys managed by the KMS (OpenBao Transit). + +### Roles and Permissions + +| Role | Capabilities | +|------|-------------| +| **super_admin** | Full platform access across all tenants. Can create organizations, manage all users, and access system settings. | +| **admin** | Full access within their tenant. Can manage users, devices, and configuration for their organization. | +| **operator** | Can view devices, apply configurations, and acknowledge alerts. Cannot manage users or organization settings. | +| **viewer** | Read-only access to devices, dashboards, and reports within their tenant. | + +### Credential Storage + +Device credentials (RouterOS username/password) are encrypted at rest with AES-256-GCM (Fernet) and only decrypted in memory by the poller when connecting to devices. + +--- + +## Theme + +TOD supports dark and light modes: + +- **Dark mode** (default) uses the Midnight Slate palette. +- **Light mode** provides a clean, high-contrast alternative. +- Toggle in **Settings** or let the portal follow your system preference. +- The command palette and all UI components adapt to the active theme. + +--- + +## Tips + +- Use the **command palette** (`Cmd+K`) for the fastest way to navigate. It searches pages, devices, and actions. +- The **Audit Trail** is immutable -- every configuration change, login, and admin action is recorded and cannot be deleted. +- **Safe Apply** is your safety net for remote devices. If a firewall change locks you out, the automatic revert restores access. +- **API Keys** (prefixed `mktp_`) provide programmatic access at operator-level permissions for automation and scripting. +- The **Topology** view uses automatic Dagre layout. Toggle shared subnet edges to reduce visual clutter on complex networks. + +--- + +*TOD -- The Other Dude is not affiliated with or endorsed by MikroTik (SIA Mikrotikls).* diff --git a/docs/website/CNAME b/docs/website/CNAME new file mode 100644 index 0000000..f335ab6 --- /dev/null +++ b/docs/website/CNAME @@ -0,0 +1 @@ +theotherdude.net \ No newline at end of file diff --git a/docs/website/assets/alerts.png b/docs/website/assets/alerts.png new file mode 100644 index 0000000..1eecba2 Binary files /dev/null and b/docs/website/assets/alerts.png differ diff --git a/docs/website/assets/config-editor.png b/docs/website/assets/config-editor.png new file mode 100644 index 0000000..8b488e2 Binary files /dev/null and b/docs/website/assets/config-editor.png differ diff --git a/docs/website/assets/dashboard-lebowski-lanes.png b/docs/website/assets/dashboard-lebowski-lanes.png new file mode 100644 index 0000000..d873861 Binary files /dev/null and b/docs/website/assets/dashboard-lebowski-lanes.png differ diff --git a/docs/website/assets/dashboard-strangers-ranch.png b/docs/website/assets/dashboard-strangers-ranch.png new file mode 100644 index 0000000..bac9fd4 Binary files /dev/null and b/docs/website/assets/dashboard-strangers-ranch.png differ diff --git a/docs/website/assets/device-detail.png b/docs/website/assets/device-detail.png new file mode 100644 index 0000000..f1b6471 Binary files /dev/null and b/docs/website/assets/device-detail.png differ diff --git a/docs/website/assets/device-list.png b/docs/website/assets/device-list.png new file mode 100644 index 0000000..460a981 Binary files /dev/null and b/docs/website/assets/device-list.png differ diff --git a/docs/website/assets/login.png b/docs/website/assets/login.png new file mode 100644 index 0000000..a9a84ec Binary files /dev/null and b/docs/website/assets/login.png differ diff --git a/docs/website/assets/topology.png b/docs/website/assets/topology.png new file mode 100644 index 0000000..7632bf6 Binary files /dev/null and b/docs/website/assets/topology.png differ diff --git a/docs/website/docs.html b/docs/website/docs.html new file mode 100644 index 0000000..07fb893 --- /dev/null +++ b/docs/website/docs.html @@ -0,0 +1,1412 @@ + + + + + + Documentation — The Other Dude | MikroTik Fleet Management Setup, API & Architecture Guide + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + +
+

TOD — The Other Dude

+

Fleet management for MikroTik RouterOS devices. Built for MSPs who manage hundreds of routers across multiple tenants. Think “UniFi Controller, but for MikroTik.”

+

TOD is a self-hosted, multi-tenant platform that gives you centralized visibility, configuration management, real-time monitoring, and zero-knowledge security across your entire MikroTik fleet.

+ +

Features

+
    +
  • Fleet — Dashboard with at-a-glance fleet health, virtual-scrolled device table, geographic map, and subnet scanner for device discovery.
  • +
  • Configuration — Config Editor with two-phase safe apply, batch configuration across devices, bulk CLI commands, reusable templates, Simple Config (Linksys/Ubiquiti-style UI), and git-backed config backup with diff viewer.
  • +
  • Monitoring — Interactive network topology (ReactFlow + Dagre), real-time metrics via SSE/NATS, configurable alert rules, notification channels (email, webhook, Slack), audit trail, KMS transparency dashboard, and PDF reports.
  • +
  • Security — 1Password-style zero-knowledge architecture with SRP-6a auth, 2SKD key derivation, Secret Key with Emergency Kit, OpenBao KMS for per-tenant envelope encryption, Internal CA with SFTP cert deployment, WireGuard VPN, and AES-256-GCM credential encryption.
  • +
  • Administration — Full multi-tenancy with PostgreSQL RLS, user management with RBAC, API keys (mktp_ prefix), firmware management, maintenance windows, and setup wizard.
  • +
  • UX — Command palette (Cmd+K), Vim-style keyboard shortcuts, dark/light mode, Framer Motion page transitions, and shimmer skeleton loaders.
  • +
+ +

Tech Stack

+ + + + + + + + + + + + + + +
LayerTechnology
FrontendReact 19, TanStack Router + Query, Tailwind CSS 3.4, Vite
BackendPython 3.12, FastAPI 0.115, SQLAlchemy 2.0, asyncpg
PollerGo 1.24, go-routeros/v3, pgx/v5, nats.go
DatabasePostgreSQL 17 + TimescaleDB, Row-Level Security
CacheRedis 7
Message BusNATS with JetStream
KMSOpenBao 2.1 (Transit)
AuthSRP-6a (zero-knowledge), JWT
+
+ + +
+

Quick Start

+
# Clone and configure
+cp .env.example .env
+
+# Start infrastructure
+docker compose up -d
+
+# Build app images (one at a time to avoid OOM)
+docker compose build api
+docker compose build poller
+docker compose build frontend
+
+# Start the full stack
+docker compose up -d
+
+# Verify
+curl http://localhost:8001/health
+open http://localhost:3000
+ +

Environment Profiles

+ + + + + + + + + +
EnvironmentFrontendAPINotes
Devlocalhost:3000localhost:8001Hot-reload, volume-mounted source
Staginglocalhost:3080localhost:8081Built images, staging secrets
Productionlocalhost (port 80)Internal (proxied)Gunicorn workers, log rotation
+
+ + +
+

Deployment

+ +

Prerequisites

+
    +
  • Docker Engine 24+ with Docker Compose v2
  • +
  • At least 4 GB RAM (2 GB absolute minimum — builds are memory-intensive)
  • +
  • External SSD or fast storage recommended for Docker volumes
  • +
  • Network access to RouterOS devices on ports 8728 (API) and 8729 (API-SSL)
  • +
+ +

1. Clone and Configure

+
git clone <repository-url> tod
+cd tod
+
+# Copy environment template
+cp .env.example .env.prod
+ +

2. Generate Secrets

+
# Generate JWT secret
+python3 -c "import secrets; print(secrets.token_urlsafe(64))"
+
+# Generate credential encryption key (32 bytes, base64-encoded)
+python3 -c "import secrets, base64; print(base64.b64encode(secrets.token_bytes(32)).decode())"
+

Edit .env.prod with the generated values:

+
ENVIRONMENT=production
+JWT_SECRET_KEY=<generated-jwt-secret>
+CREDENTIAL_ENCRYPTION_KEY=<generated-encryption-key>
+POSTGRES_PASSWORD=<strong-password>
+
+# First admin user (created on first startup)
+FIRST_ADMIN_EMAIL=admin@example.com
+FIRST_ADMIN_PASSWORD=<strong-password>
+ +

3. Build Images

+

Build images one at a time to avoid out-of-memory crashes on constrained hosts:

+
docker compose -f docker-compose.yml -f docker-compose.prod.yml build api
+docker compose -f docker-compose.yml -f docker-compose.prod.yml build poller
+docker compose -f docker-compose.yml -f docker-compose.prod.yml build frontend
+ +

4. Start the Stack

+
docker compose -f docker-compose.yml -f docker-compose.prod.yml --env-file .env.prod up -d
+ +

5. Verify

+
# Check all services are running
+docker compose ps
+
+# Check API health (liveness)
+curl http://localhost:8000/health
+
+# Check readiness (PostgreSQL, Redis, NATS connected)
+curl http://localhost:8000/health/ready
+
+# Access the portal
+open http://localhost
+

Log in with the FIRST_ADMIN_EMAIL and FIRST_ADMIN_PASSWORD credentials set in step 2.

+ +

Required Environment Variables

+ + + + + + + + + + + + +
VariableDescriptionExample
ENVIRONMENTDeployment environmentproduction
JWT_SECRET_KEYJWT signing secret (min 32 chars)<generated>
CREDENTIAL_ENCRYPTION_KEYAES-256 key for device credentials (base64)<generated>
POSTGRES_PASSWORDPostgreSQL superuser password<strong-password>
FIRST_ADMIN_EMAILInitial admin account emailadmin@example.com
FIRST_ADMIN_PASSWORDInitial admin account password<strong-password>
+ +

Optional Environment Variables

+ + + + + + + + + + + + + + + + + + + +
VariableDefaultDescription
GUNICORN_WORKERS2API worker process count
DB_POOL_SIZE20App database connection pool size
DB_MAX_OVERFLOW40Max overflow connections above pool
DB_ADMIN_POOL_SIZE10Admin database connection pool size
DB_ADMIN_MAX_OVERFLOW20Admin max overflow connections
POLL_INTERVAL_SECONDS60Device polling interval
CONNECTION_TIMEOUT_SECONDS10RouterOS connection timeout
COMMAND_TIMEOUT_SECONDS30RouterOS per-command timeout
CIRCUIT_BREAKER_MAX_FAILURES5Consecutive failures before backoff
CIRCUIT_BREAKER_BASE_BACKOFF_SECONDS30Initial backoff duration
CIRCUIT_BREAKER_MAX_BACKOFF_SECONDS900Maximum backoff (15 min)
LOG_LEVELinfoLogging verbosity (debug/info/warn/error)
CORS_ORIGINShttp://localhost:3000Comma-separated CORS origins
+ +

Storage Configuration

+

Docker volumes mount to the host filesystem. Default locations:

+
    +
  • PostgreSQL data: ./docker-data/postgres
  • +
  • Redis data: ./docker-data/redis
  • +
  • NATS data: ./docker-data/nats
  • +
  • Git store (config backups): ./docker-data/git-store
  • +
+

To change storage locations, edit the volume mounts in docker-compose.yml.

+ +

Resource Limits

+

Container memory limits are enforced in docker-compose.prod.yml to prevent OOM crashes:

+ + + + + + + + + + + + +
ServiceMemory Limit
PostgreSQL512 MB
Redis128 MB
NATS128 MB
API512 MB
Poller256 MB
Frontend64 MB
+

Adjust under deploy.resources.limits.memory in docker-compose.prod.yml.

+ +

Monitoring (Optional)

+

Enable Prometheus and Grafana monitoring with the observability compose overlay:

+
docker compose \
+  -f docker-compose.yml \
+  -f docker-compose.prod.yml \
+  -f docker-compose.observability.yml \
+  --env-file .env.prod up -d
+
    +
  • Prometheus: http://localhost:9090
  • +
  • Grafana: http://localhost:3001 (default: admin/admin)
  • +
+ +

Exported Metrics

+ + + + + + + + + + + + + +
MetricSourceDescription
http_requests_totalAPIHTTP request count by method, path, status
http_request_duration_secondsAPIRequest latency histogram
mikrotik_poll_totalPollerPoll cycles by status (success/error/skipped)
mikrotik_poll_duration_secondsPollerPoll cycle duration histogram
mikrotik_devices_activePollerNumber of devices being polled
mikrotik_circuit_breaker_skips_totalPollerPolls skipped due to backoff
mikrotik_nats_publish_totalPollerNATS publishes by subject and status
+ +

Troubleshooting

+ + + + + + + + + + + + + + +
IssueSolution
API won’t start with secret errorGenerate production secrets (see step 2 above)
Build crashes with OOMBuild images one at a time (see step 3 above)
Device shows offlineCheck network access to device API port (8728/8729)
Health check failsCheck docker compose logs api for startup errors
Rate limited (429)Wait 60 seconds or check Redis connectivity
Migration failsCheck docker compose logs api for Alembic errors
NATS subscriber won’t startNon-fatal — API runs without NATS; check NATS container health
Poller circuit breaker activeDevice unreachable; check CIRCUIT_BREAKER_* env vars to tune backoff
+
+ + + + + + +
+

System Overview

+

TOD is a containerized MSP fleet management platform for MikroTik RouterOS devices. It uses a three-service architecture: a React frontend, a Python FastAPI backend, and a Go poller. All services communicate through PostgreSQL, Redis, and NATS JetStream. Multi-tenancy is enforced at the database level via PostgreSQL Row-Level Security (RLS).

+ +

Architecture Diagram

+
+--------------+     +------------------+     +---------------+
+|   Frontend   |---->|   Backend API    |<--->|   Go Poller   |
+|  React/nginx |     |    FastAPI       |     |  go-routeros  |
++--------------+     +--------+---------+     +-------+-------+
+                              |                       |
+               +--------------+-------------------+---+
+               |              |                   |
+      +--------v---+   +-----v-------+   +-------v-------+
+      |   Redis    |   | PostgreSQL  |   |    NATS       |
+      |  locks,    |   | 17+Timescale|   |  JetStream    |
+      |  cache     |   | DB + RLS    |   |  pub/sub      |
+      +------------+   +-------------+   +-------+-------+
+                                                 |
+                                          +------v-------+
+                                          |   OpenBao    |
+                                          | Transit KMS  |
+                                          +--------------+
+ +

Services

+ +

Frontend (React / nginx)

+
    +
  • Stack: React 19, TypeScript, TanStack Router (file-based routing), TanStack Query (data fetching), Tailwind CSS 3.4, Vite
  • +
  • Production: Static build served by nginx on port 80 (exposed as port 3000)
  • +
  • Development: Vite dev server with hot module replacement
  • +
  • Design system: Geist Sans + Geist Mono fonts, HSL color tokens via CSS custom properties, class-based dark/light mode
  • +
  • Real-time: Server-Sent Events (SSE) for live device status updates, alerts, and operation progress
  • +
  • Client-side encryption: SRP-6a authentication flow with 2SKD key derivation; Emergency Kit PDF generation
  • +
  • UX features: Command palette (Cmd+K), Framer Motion page transitions, collapsible sidebar, skeleton loaders
  • +
  • Memory limit: 64 MB
  • +
+ +

Backend API (FastAPI)

+
    +
  • Stack: Python 3.12+, FastAPI 0.115+, SQLAlchemy 2.0 async, asyncpg, Gunicorn
  • +
  • Two database engines: +
      +
    • admin_engine (superuser) — used only for auth/bootstrap and NATS subscribers that need cross-tenant access
    • +
    • app_engine (non-superuser app_user role) — used for all device/data routes, enforces RLS
    • +
    +
  • +
  • Authentication: JWT tokens (15min access, 7d refresh), SRP-6a zero-knowledge proof, RBAC (super_admin, admin, operator, viewer)
  • +
  • NATS subscribers: Three independent subscribers for device status, metrics, and firmware events. Non-fatal startup — API serves requests even if NATS is unavailable
  • +
  • Background services: APScheduler for nightly config backups and daily firmware version checks
  • +
  • Middleware stack (LIFO): RequestID → SecurityHeaders → RateLimiting → CORS → Route handler
  • +
  • Health endpoints: /health (liveness), /health/ready (readiness — checks PostgreSQL, Redis, NATS)
  • +
  • Memory limit: 512 MB
  • +
+ +

API Routers

+

The backend exposes route groups under the /api prefix:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RouterPurpose
authLogin (SRP-6a + legacy), token refresh, registration
tenantsTenant CRUD (super_admin only)
usersUser management, RBAC
devicesDevice CRUD, status, commands
device_groupsLogical device grouping
device_tagsTagging and filtering
metricsTime-series metrics (TimescaleDB)
config_backupsConfiguration backup history
config_editorLive RouterOS config editing
firmwareFirmware version tracking and upgrades
alertsAlert rules and active alerts
eventsDevice event log
device_logsRouterOS system logs
templatesConfiguration templates
clientsConnected client devices
topologyNetwork topology (ReactFlow data)
sseServer-Sent Events streams
audit_logsImmutable audit trail
reportsPDF report generation (Jinja2 + WeasyPrint)
api_keysAPI key management (mktp_ prefix)
maintenance_windowsScheduled maintenance with alert suppression
vpnWireGuard VPN management
certificatesInternal CA and device TLS certificates
transparencyKMS access event dashboard
+ +

Go Poller

+
    +
  • Stack: Go 1.23, go-routeros/v3, pgx/v5, nats.go
  • +
  • Polling model: Synchronous per-device polling on a configurable interval (default 60s)
  • +
  • Device communication: RouterOS binary API over TLS (port 8729), InsecureSkipVerify for self-signed certs
  • +
  • TLS fallback: Three-tier strategy — CA-verified → InsecureSkipVerify → plain API
  • +
  • Distributed locking: Redis locks prevent concurrent polling of the same device (safe for multi-instance deployment)
  • +
  • Circuit breaker: Backs off from unreachable devices to avoid wasting poll cycles
  • +
  • Credential decryption: OpenBao Transit with LRU cache (1024 entries, 5min TTL) to minimize KMS calls
  • +
  • Output: Publishes poll results to NATS JetStream; the API’s NATS subscribers process and persist them
  • +
  • Database access: Uses poller_user role which bypasses RLS (needs cross-tenant device access)
  • +
  • Memory limit: 256 MB
  • +
+ +

Infrastructure Services

+ +

PostgreSQL 17 + TimescaleDB

+
    +
  • Image: timescale/timescaledb:2.17.2-pg17
  • +
  • Row-Level Security (RLS): Enforces tenant isolation at the database level. All data tables have a tenant_id column; RLS policies filter by current_setting('app.tenant_id')
  • +
  • Database roles: +
      +
    • postgres (superuser) — admin engine, auth/bootstrap, migrations
    • +
    • app_user (non-superuser) — RLS-enforced, used by API for data routes
    • +
    • poller_user — bypasses RLS, used by Go poller for cross-tenant device access
    • +
    +
  • +
  • TimescaleDB hypertables: Time-series storage for device metrics (CPU, memory, interface traffic, etc.)
  • +
  • Migrations: Alembic, run automatically on API startup
  • +
  • Memory limit: 512 MB
  • +
+ +

Redis

+
    +
  • Image: redis:7-alpine
  • +
  • Distributed locking for the Go poller (prevents concurrent polling of the same device)
  • +
  • Rate limiting on auth endpoints (5 requests/min)
  • +
  • Credential cache for OpenBao Transit responses
  • +
  • Memory limit: 128 MB
  • +
+ +

NATS JetStream

+
    +
  • Image: nats:2-alpine
  • +
  • Role: Message bus between the Go poller and the Python API
  • +
  • Streams: DEVICE_EVENTS (poll results, status changes), ALERT_EVENTS (SSE delivery), OPERATION_EVENTS (SSE delivery)
  • +
  • Durable consumers: Ensure no message loss during API restarts
  • +
  • Memory limit: 128 MB
  • +
+ +

OpenBao (HashiCorp Vault fork)

+
    +
  • Image: openbao/openbao:2.1
  • +
  • Transit secrets engine: Provides envelope encryption for device credentials at rest
  • +
  • Per-tenant keys: Each tenant gets a dedicated Transit encryption key
  • +
  • Memory limit: 256 MB
  • +
+ +

WireGuard

+
    +
  • Image: lscr.io/linuxserver/wireguard
  • +
  • Role: VPN gateway for reaching RouterOS devices on remote networks
  • +
  • Port: 51820/UDP
  • +
  • Memory limit: 128 MB
  • +
+ +

Container Memory Limits

+ + + + + + + + + + + + + + +
ServiceLimit
PostgreSQL512 MB
API512 MB
Go Poller256 MB
OpenBao256 MB
Redis128 MB
NATS128 MB
WireGuard128 MB
Frontend (nginx)64 MB
+ +

Network Ports

+ + + + + + + + + + + + + + +
ServiceInternal PortExternal PortProtocol
Frontend803000HTTP
API80008001HTTP
PostgreSQL54325432TCP
Redis63796379TCP
NATS42224222TCP
NATS Monitor82228222HTTP
OpenBao82008200HTTP
WireGuard5182051820UDP
+
+ + +
+

Data Flow

+ +

Device Polling Cycle

+
Go Poller        Redis      OpenBao    RouterOS     NATS        API        PostgreSQL
+   |               |           |           |           |           |            |
+   +--query list-->|           |           |           |           |            |
+   |<--------------+           |           |           |           |            |
+   +--acquire lock->|          |           |           |           |            |
+   |<--lock granted-+          |           |           |           |            |
+   +--decrypt creds (miss)---->|           |           |           |            |
+   |<--plaintext creds--------+           |           |           |            |
+   +--binary API (8729 TLS)--------------->|           |           |            |
+   |<--system info, interfaces, metrics---+           |           |            |
+   +--publish poll result--------------------------------->|       |            |
+   |               |           |           |           |  subscribe>|           |
+   |               |           |           |           |           +--upsert--->|
+   +--release lock->|          |           |           |           |            |
+
    +
  1. Poller queries PostgreSQL for the list of active devices
  2. +
  3. Acquires a Redis distributed lock per device (prevents duplicate polling)
  4. +
  5. Decrypts device credentials via OpenBao Transit (LRU cache avoids repeated KMS calls)
  6. +
  7. Connects to the RouterOS binary API on port 8729 over TLS
  8. +
  9. Collects system info, interface stats, routing tables, and metrics
  10. +
  11. Publishes results to NATS JetStream
  12. +
  13. API NATS subscriber processes results and upserts into PostgreSQL
  14. +
  15. Releases Redis lock
  16. +
+ +

Config Push (Two-Phase with Panic Revert)

+
Frontend        API           RouterOS
+   |              |               |
+   +--push config->|              |
+   |              +--apply config->|
+   |              +--set revert--->|
+   |              |<--ack---------+
+   |<--pending----+               |
+   |              |               |  (timer counting down)
+   +--confirm----->|              |
+   |              +--cancel timer->|
+   |              |<--ack---------+
+   |<--confirmed--+               |
+
    +
  1. Frontend sends config commands to the API
  2. +
  3. API connects to the device and applies the configuration
  4. +
  5. Sets a revert timer on the device (RouterOS safe mode / scheduler)
  6. +
  7. Returns pending status to the frontend
  8. +
  9. User confirms the change works (e.g., connectivity still up)
  10. +
  11. If confirmed: API cancels the revert timer, config is permanent
  12. +
  13. If timeout or rejected: device automatically reverts to the previous configuration
  14. +
+

This pattern prevents lockouts from misconfigured firewall rules or IP changes.

+ +

SRP-6a Authentication Flow

+
Browser                     API                   PostgreSQL
+   |                          |                       |
+   +--register---------------->|                      |
+   |  (email, salt, verifier) +--store verifier------>|
+   |                          |                       |
+   +--login step 1------------>|                      |
+   |  (email, client_public)  +--lookup verifier----->|
+   |<--(salt, server_public)--+<----------------------+
+   |                          |                       |
+   +--login step 2------------>|                      |
+   |  (client_proof)          +--verify proof---------+
+   |<--(server_proof, JWT)----+                       |
+
    +
  1. Registration: Client derives a verifier from password + secret_key using PBKDF2 (650K iterations) + HKDF + XOR (2SKD). Only the salt and verifier are sent to the server — never the password.
  2. +
  3. Login step 1: Client sends email and ephemeral public value; server responds with stored salt and its own ephemeral public value.
  4. +
  5. Login step 2: Client computes a proof from the shared session key; server validates the proof without ever seeing the password.
  6. +
  7. Token issuance: On successful proof, server issues JWT (15min access + 7d refresh).
  8. +
  9. Emergency Kit: A downloadable PDF containing the user’s secret key for account recovery.
  10. +
+
+ + +
+

Multi-Tenancy

+

TOD enforces tenant isolation at the database level using PostgreSQL Row-Level Security (RLS), making cross-tenant data access structurally impossible.

+ +

How It Works

+
    +
  • Every data table includes a tenant_id column.
  • +
  • PostgreSQL RLS policies filter rows by current_setting('app.tenant_id').
  • +
  • The API sets tenant context (SET app.tenant_id = ...) on each database session, derived from the authenticated user’s JWT.
  • +
  • super_admin role has NULL tenant_id and can access all tenants.
  • +
  • poller_user bypasses RLS intentionally (needs cross-tenant device access for polling).
  • +
  • Tenant isolation is enforced at the database level, not the application level — even a compromised API cannot leak cross-tenant data through app_user connections.
  • +
+ +

Database Roles

+ + + + + + + + + +
RoleRLSPurpose
postgresBypasses (superuser)Admin engine, auth/bootstrap, migrations
app_userEnforcedAll device/data routes in the API
poller_userBypassesCross-tenant device access for Go poller
+ +

Security Layers

+ + + + + + + + + + + + + + + +
LayerMechanismPurpose
AuthenticationSRP-6aZero-knowledge proof — password never transmitted or stored
Key Derivation2SKD (PBKDF2 650K + HKDF + XOR)Two-secret key derivation from password + secret key
Encryption at RestOpenBao TransitEnvelope encryption for device credentials
Tenant IsolationPostgreSQL RLSDatabase-level row filtering by tenant_id
Access ControlJWT + RBACRole-based permissions (super_admin, admin, operator, viewer)
Rate LimitingRedis-backedAuth endpoints limited to 5 requests/min
TLS CertificatesInternal CACertificate management and deployment to RouterOS devices
Security HeadersMiddlewareCSP, SRI hashes on JS bundles, X-Frame-Options, etc.
Secret ValidationStartup checkRejects known-insecure defaults in non-dev environments
+
+ + + + + + +
+

First Login

+
    +
  1. Navigate to the portal URL provided by your administrator.
  2. +
  3. Log in with the admin credentials created during initial deployment.
  4. +
  5. Complete SRP security enrollment — the portal uses zero-knowledge authentication (SRP-6a), so a unique Secret Key is generated for your account.
  6. +
  7. Save your Emergency Kit PDF immediately. This PDF contains your Secret Key, which you will need to log in from any new browser or device. Without it, you cannot recover access.
  8. +
  9. Complete the Setup Wizard to create your first organization and add your first device.
  10. +
+ +

Setup Wizard

+

The Setup Wizard launches automatically for first-time super_admin users. It walks through three steps:

+
    +
  • Step 1 — Create Organization: Enter a name for your tenant (organization). This is the top-level container for all your devices, users, and configuration.
  • +
  • Step 2 — Add Device: Enter the IP address, API port (default 8729 for TLS), and RouterOS credentials for your first device. The portal will attempt to connect and verify the device.
  • +
  • Step 3 — Verify & Complete: The portal polls the device to confirm connectivity. Once verified, you are taken to the dashboard.
  • +
+

You can always add more organizations and devices later from the sidebar.

+
+ + + + + +
+

Device Management

+ +

Adding Devices

+

There are three ways to add devices to your fleet:

+
    +
  1. Setup Wizard — automatically offered on first login.
  2. +
  3. Fleet Table — click the “Add Device” button from the Devices page.
  4. +
  5. Subnet Scanner — enter a CIDR range (e.g., 192.168.1.0/24) to auto-discover MikroTik devices on the network.
  6. +
+

When adding a device, provide:

+
    +
  • IP Address — the management IP of the RouterOS device.
  • +
  • API Port — default is 8729 (TLS). The portal connects via the RouterOS binary API protocol.
  • +
  • Credentials — username and password for the device. Credentials are encrypted at rest with AES-256-GCM.
  • +
+ +

Device Detail Tabs

+ + + + + + + + + + + + + +
TabDescription
OverviewSystem info, uptime, hardware model, RouterOS version, resource usage, and interface status summary.
InterfacesReal-time traffic graphs for each network interface.
ConfigBrowse the full device configuration tree by RouterOS path.
FirewallView and manage firewall filter rules, NAT rules, and address lists.
DHCPActive DHCP leases, server configuration, and address pools.
BackupsConfiguration backup timeline with side-by-side diff viewer to compare changes over time.
ClientsConnected clients and wireless registrations.
+ +

Simple Config

+

Simple Config provides a consumer-router-style interface modeled after Linksys and Ubiquiti UIs. It is designed for operators who prefer guided configuration over raw RouterOS paths.

+

Seven category tabs:

+
    +
  1. Internet — WAN connection type, PPPoE, DHCP client settings.
  2. +
  3. LAN / DHCP — LAN addressing, DHCP server and pool configuration.
  4. +
  5. WiFi — Wireless SSID, security, and channel settings.
  6. +
  7. Port Forwarding — NAT destination rules for inbound services.
  8. +
  9. Firewall — Simplified firewall rule management.
  10. +
  11. DNS — DNS server and static DNS entries.
  12. +
  13. System — Device identity, timezone, NTP, admin password.
  14. +
+

Toggle between Simple (guided) and Standard (full config editor) modes at any time. Per-device settings are stored in browser localStorage.

+
+ + +
+

Config Editor

+

The Config Editor provides direct access to RouterOS configuration paths (e.g., /ip/address, /ip/firewall/filter, /interface/bridge).

+
    +
  • Select a device from the header dropdown.
  • +
  • Navigate the configuration tree to browse, add, edit, or delete entries.
  • +
+ +

Apply Modes

+
    +
  • Standard Apply — changes are applied immediately.
  • +
  • Safe Apply — two-phase commit with automatic panic-revert. Changes are applied, and you have a confirmation window to accept them. If the confirmation times out (device becomes unreachable), changes automatically revert to prevent lockouts.
  • +
+

Safe Apply is strongly recommended for firewall rules and routing changes on remote devices.

+
+ + +
+

Monitoring & Alerts

+ +

Alert Rules

+

Create threshold-based rules that fire when device metrics cross defined boundaries:

+
    +
  • Select the metric to monitor (CPU, memory, disk, interface traffic, uptime, etc.).
  • +
  • Set the threshold value and comparison operator.
  • +
  • Choose severity: info, warning, or critical.
  • +
  • Assign one or more notification channels.
  • +
+ +

Notification Channels

+ + + + + + + + + +
ChannelDescription
EmailSMTP-based email notifications. Configure server, port, and recipients.
WebhookHTTP POST to any URL with a JSON payload containing alert details.
SlackSlack incoming webhook with Block Kit formatting for rich alert messages.
+ +

Maintenance Windows

+
    +
  • Define start and end times.
  • +
  • Apply to specific devices or fleet-wide.
  • +
  • Alerts generated during the window are recorded but do not trigger notifications.
  • +
  • Maintenance windows can be recurring or one-time.
  • +
+
+ + +
+

Reports

+

Generate PDF reports from the Reports page. Four report types are available:

+ + + + + + + + + + +
ReportContent
Fleet SummaryOverall fleet health, device counts by status, top alerts, and aggregate statistics.
Device HealthPer-device detailed report with hardware info, resource trends, and recent events.
ComplianceSecurity posture audit — firmware versions, default credentials, firewall policy checks.
SLAUptime and availability metrics over a selected period with percentage calculations.
+

Reports are generated as downloadable PDFs using server-side rendering (Jinja2 + WeasyPrint).

+
+ + + + + + +
+

Security Model

+

TOD implements a 1Password-inspired zero-knowledge security architecture. The server never stores or sees user passwords. All data is stored on infrastructure you own and control — no external telemetry, analytics, or third-party data transmission.

+ +

Data Protection

+
    +
  • Config backups: Encrypted at rest via OpenBao Transit envelope encryption before database storage.
  • +
  • Audit logs: Encrypted at rest via Transit encryption — audit log content is protected even from database administrators.
  • +
  • Subresource Integrity (SRI): SHA-384 hashes on JavaScript bundles prevent tampering with frontend code.
  • +
  • Content Security Policy (CSP): Strict CSP headers prevent XSS, code injection, and unauthorized resource loading.
  • +
  • No external dependencies: Fully self-hosted with no external analytics, telemetry, CDNs, or third-party services. The only outbound connections are: +
      +
    • RouterOS firmware update checks (no device data sent)
    • +
    • SMTP for email notifications (if configured)
    • +
    • Webhooks for alerts (if configured)
    • +
    +
  • +
+ +

Security Headers

+ + + + + + + + + + + +
HeaderValuePurpose
Strict-Transport-Securitymax-age=31536000; includeSubDomainsForce HTTPS connections
X-Content-Type-OptionsnosniffPrevent MIME-type sniffing
X-Frame-OptionsDENYPrevent clickjacking via iframes
Content-Security-PolicyStrict policyPrevent XSS and code injection
Referrer-Policystrict-origin-when-cross-originLimit referrer information leakage
+ +

Audit Trail

+
    +
  • Immutable audit log: All significant actions are recorded — logins, configuration changes, device operations, admin actions.
  • +
  • Fire-and-forget logging: The log_action() function records audit events asynchronously without blocking the main request.
  • +
  • Per-tenant access: Tenants can only view their own audit logs (enforced by RLS).
  • +
  • Encryption at rest: Audit log content is encrypted via OpenBao Transit.
  • +
  • CSV export: Audit logs can be exported in CSV format for compliance and reporting.
  • +
  • Account deletion: When a user deletes their account, audit log entries are anonymized (PII removed) but the action records are retained for security compliance.
  • +
+ +

Data Retention

+ + + + + + + + + + + + + + + +
Data TypeRetentionNotes
User accountsUntil deletedUsers can self-delete from Settings
Device metrics90 daysPurged by TimescaleDB retention policy
Configuration backupsIndefiniteStored in git repositories on your server
Audit logsIndefiniteAnonymized on account deletion
API keysUntil revokedCascade-deleted with user account
Encrypted key materialUntil user deletedCascade-deleted with user account
Session data (Redis)15 min / 7 daysAuto-expiring access/refresh tokens
Password reset tokens30 minutesAuto-expire
SRP session stateShort-livedAuto-expire in Redis
+ +

GDPR Compliance

+
    +
  • Right of Access (Art. 15): Users can view their account information on the Settings page.
  • +
  • Right to Data Portability (Art. 20): Users can export all personal data in JSON format from Settings.
  • +
  • Right to Erasure (Art. 17): Users can permanently delete their account and all associated data. Audit logs are anonymized (PII removed) with a deletion receipt generated for compliance verification.
  • +
  • Right to Rectification (Art. 16): Account information can be updated by the tenant administrator.
  • +
+

As a self-hosted application, the deployment operator is the data controller and is responsible for compliance with applicable data protection laws.

+
+ + +
+

Authentication

+ +

SRP-6a Zero-Knowledge Proof

+

TOD uses the Secure Remote Password (SRP-6a) protocol for authentication, ensuring the server never receives, transmits, or stores user passwords.

+
    +
  • SRP-6a protocol: Password is verified via a zero-knowledge proof — only a cryptographic verifier derived from the password is stored on the server, never the password itself.
  • +
  • Session management: JWT tokens with 15-minute access token lifetime and 7-day refresh token lifetime, delivered via httpOnly cookies.
  • +
  • SRP session state: Ephemeral SRP handshake data stored in Redis with automatic expiration.
  • +
+ +

Authentication Flow

+
Client                                Server
+  |                                     |
+  |  POST /auth/srp/init {email}        |
+  |------------------------------------>|
+  |  {salt, server_ephemeral_B}         |
+  |<------------------------------------|
+  |                                     |
+  |  [Client derives session key from   |
+  |   password + Secret Key + salt + B] |
+  |                                     |
+  |  POST /auth/srp/verify {A, M1}      |
+  |------------------------------------>|
+  |  [Server verifies M1 proof]         |
+  |  {M2, access_token, refresh_token}  |
+  |<------------------------------------|
+ +

Two-Secret Key Derivation (2SKD)

+

Combines the user password with a 128-bit Secret Key using a multi-step derivation process, ensuring that compromise of either factor alone is insufficient:

+
    +
  • PBKDF2 with 650,000 iterations stretches the password.
  • +
  • HKDF expansion derives the final key material.
  • +
  • XOR combination of both factors produces the verifier input.
  • +
+ +

Secret Key & Emergency Kit

+
    +
  • Secret Key format: A3-XXXXXX (128-bit), stored exclusively in the browser’s IndexedDB. The server never sees or stores the Secret Key.
  • +
  • Emergency Kit: Downloadable PDF containing the Secret Key for account recovery. Generated client-side.
  • +
+
+ + +
+

Encryption

+ +

Credential Encryption

+

Device credentials (RouterOS usernames and passwords) are encrypted at rest using envelope encryption:

+
    +
  • Encryption algorithm: AES-256-GCM (via Fernet symmetric encryption).
  • +
  • Key management: OpenBao Transit secrets engine provides the master encryption keys.
  • +
  • Per-tenant isolation: Each tenant has its own encryption key in OpenBao Transit.
  • +
  • Envelope encryption: Data is encrypted with a data encryption key (DEK), which is itself encrypted by the tenant’s Transit key.
  • +
+ +

Go Poller LRU Cache

+

The Go poller decrypts credentials at runtime via the Transit API, with an LRU cache (1,024 entries, 5-minute TTL) to reduce KMS round-trips. Cache hits avoid OpenBao API calls entirely.

+ +

Additional Encryption

+
    +
  • CA private keys: Encrypted with AES-256-GCM before database storage. PEM key material is never logged.
  • +
  • Config backups: Encrypted at rest via OpenBao Transit before database storage.
  • +
  • Audit logs: Content encrypted via Transit — protected even from database administrators.
  • +
+
+ + +
+

RBAC & Tenants

+ +

Role-Based Access Control

+ + + + + + + + + + +
RoleScopeCapabilities
super_adminGlobalFull system access, tenant management, user management across all tenants
adminTenantManage devices, users, settings, certificates within their tenant
operatorTenantDevice operations, configuration changes, monitoring
viewerTenantRead-only access to devices, metrics, and dashboards
+
    +
  • RBAC is enforced at both the API middleware layer and database level.
  • +
  • API keys inherit the operator permission level and are scoped to a single tenant.
  • +
  • API key tokens use the mktp_ prefix and are stored as SHA-256 hashes (the plaintext token is shown once at creation and never stored).
  • +
+ +

Tenant Isolation via RLS

+

Multi-tenancy is enforced at the database level via PostgreSQL Row-Level Security (RLS). The app_user database role automatically filters all queries by the authenticated user’s tenant_id. Super admins operate outside tenant scope.

+ +

Internal CA & TLS Fallback

+

TOD includes a per-tenant Internal Certificate Authority for managing TLS certificates on RouterOS devices:

+
    +
  • Per-tenant CA: Each tenant can generate its own self-signed Certificate Authority.
  • +
  • Deployment: Certificates are deployed to devices via SFTP.
  • +
  • Three-tier TLS fallback: The Go poller attempts connections in order: +
      +
    1. CA-verified TLS (using the tenant’s CA certificate)
    2. +
    3. InsecureSkipVerify TLS (for self-signed RouterOS certs)
    4. +
    5. Plain API connection (fallback)
    6. +
    +
  • +
  • Key protection: CA private keys are encrypted with AES-256-GCM before database storage.
  • +
+
+ + + + + + +
+

API Endpoints

+ +

Overview

+

TOD exposes a REST API built with FastAPI. Interactive documentation is available at:

+
    +
  • Swagger UI: http://<host>:<port>/docs (dev environment only)
  • +
  • ReDoc: http://<host>:<port>/redoc (dev environment only)
  • +
+

Both Swagger and ReDoc are disabled in staging/production environments.

+ +

Endpoint Groups

+

All API routes are mounted under the /api prefix.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
GroupPrefixDescription
Auth/api/auth/*Login, register, SRP exchange, password reset, token refresh
Tenants/api/tenants/*Tenant/organization CRUD
Users/api/users/*User management, RBAC role assignment
Devices/api/devices/*Device CRUD, scanning, status
Device Groups/api/device-groups/*Logical device grouping
Device Tags/api/device-tags/*Tag-based device labeling
Metrics/api/metrics/*TimescaleDB device metrics (CPU, memory, traffic)
Config Backups/api/config-backups/*Automated RouterOS config backup history
Config Editor/api/config-editor/*Live RouterOS config browsing and editing
Firmware/api/firmware/*RouterOS firmware version management and upgrades
Alerts/api/alerts/*Alert rule CRUD, alert history
Events/api/events/*Device event log
Device Logs/api/device-logs/*RouterOS syslog entries
Templates/api/templates/*Config templates for batch operations
Clients/api/clients/*Connected client (DHCP lease) data
Topology/api/topology/*Network topology map data
SSE/api/sse/*Server-Sent Events for real-time updates
Audit Logs/api/audit-logs/*Immutable audit trail
Reports/api/reports/*PDF report generation (Jinja2 + WeasyPrint)
API Keys/api/api-keys/*API key CRUD
Maintenance Windows/api/maintenance-windows/*Scheduled maintenance window management
VPN/api/vpn/*WireGuard VPN tunnel management
Certificates/api/certificates/*Internal CA and device certificate management
Transparency/api/transparency/*KMS access event dashboard
+ +

Health Checks

+ + + + + + + + + +
EndpointTypeDescription
GET /healthLivenessAlways returns 200 if the API process is alive. Response includes version.
GET /health/readyReadinessReturns 200 only when PostgreSQL, Redis, and NATS are all healthy. Returns 503 otherwise.
GET /api/healthLivenessBackward-compatible alias under /api prefix.
+
+ + +
+

API Authentication

+ +

SRP-6a Login

+
    +
  • POST /api/auth/login — SRP-6a authentication (returns JWT access + refresh tokens)
  • +
  • POST /api/auth/refresh — Refresh an expired access token
  • +
  • POST /api/auth/logout — Invalidate the current session
  • +
+

All authenticated endpoints require one of:

+
    +
  • Authorization: Bearer <token> header
  • +
  • httpOnly cookie (set automatically by the login flow)
  • +
+

Access tokens expire after 15 minutes. Refresh tokens are valid for 7 days.

+ +

API Key Authentication

+
    +
  • Create API keys in Admin > API Keys
  • +
  • Use header: X-API-Key: mktp_<key>
  • +
  • Keys have operator-level RBAC permissions
  • +
  • Prefix: mktp_, stored as SHA-256 hash
  • +
+ +

Rate Limiting

+
    +
  • Auth endpoints: 5 requests/minute per IP
  • +
  • General endpoints: no global rate limit (per-route limits may apply)
  • +
+

Rate limit violations return HTTP 429 with a JSON error body.

+ +

RBAC Roles

+ + + + + + + + + + +
RoleScopeDescription
super_adminGlobal (no tenant)Full platform access, tenant management
adminTenantFull access within their tenant
operatorTenantDevice operations, config changes
viewerTenantRead-only access
+
+ + +
+

Error Handling

+ +

Error Format

+

All error responses use a standard JSON format:

+
{
+  "detail": "Human-readable error message"
+}
+ +

Status Codes

+ + + + + + + + + + + + + + + +
CodeMeaning
400Bad request / validation error
401Unauthorized (missing or expired token)
403Forbidden (insufficient RBAC permissions)
404Resource not found
409Conflict (duplicate resource)
422Unprocessable entity (Pydantic validation)
429Rate limit exceeded
500Internal server error
503Service unavailable (readiness check failed)
+
+ + + + + + +
+

Environment Variables

+

TOD uses Pydantic Settings for configuration. All values can be set via environment variables or a .env file in the backend working directory.

+ +

Application

+ + + + + + + + + + + + +
VariableDefaultDescription
APP_NAMETOD - The Other DudeApplication display name
APP_VERSION0.1.0Semantic version string
ENVIRONMENTdevRuntime environment: dev, staging, or production
DEBUGfalseEnable debug mode
CORS_ORIGINShttp://localhost:3000,...Comma-separated list of allowed CORS origins
APP_BASE_URLhttp://localhost:5173Frontend base URL (used in password reset emails)
+ +

Authentication & JWT

+ + + + + + + + + + + +
VariableDefaultDescription
JWT_SECRET_KEY(insecure dev default)HMAC signing key for JWTs. Must be changed in production.
JWT_ALGORITHMHS256JWT signing algorithm
JWT_ACCESS_TOKEN_EXPIRE_MINUTES15Access token lifetime in minutes
JWT_REFRESH_TOKEN_EXPIRE_DAYS7Refresh token lifetime in days
PASSWORD_RESET_TOKEN_EXPIRE_MINUTES30Password reset link validity in minutes
+ +

Database

+ + + + + + + + + + + + + +
VariableDefaultDescription
DATABASE_URLpostgresql+asyncpg://postgres:postgres@localhost:5432/mikrotikAdmin (superuser) async database URL. Used for migrations and bootstrap.
SYNC_DATABASE_URLpostgresql+psycopg2://postgres:postgres@localhost:5432/mikrotikSynchronous URL used by Alembic migrations only.
APP_USER_DATABASE_URLpostgresql+asyncpg://app_user:app_password@localhost:5432/mikrotikNon-superuser async URL. Enforces PostgreSQL RLS for tenant isolation.
DB_POOL_SIZE20App user connection pool size
DB_MAX_OVERFLOW40App user pool max overflow connections
DB_ADMIN_POOL_SIZE10Admin connection pool size
DB_ADMIN_MAX_OVERFLOW20Admin pool max overflow connections
+ +

Security

+ + + + + + + +
VariableDefaultDescription
CREDENTIAL_ENCRYPTION_KEY(insecure dev default)AES-256-GCM encryption key for device credentials at rest. Must be exactly 32 bytes, base64-encoded. Must be changed in production.
+ +

OpenBao / Vault (KMS)

+ + + + + + + + +
VariableDefaultDescription
OPENBAO_ADDRhttp://localhost:8200OpenBao Transit server address for per-tenant envelope encryption
OPENBAO_TOKEN(insecure dev default)OpenBao authentication token. Must be changed in production.
+ +

NATS

+ + + + + + + +
VariableDefaultDescription
NATS_URLnats://localhost:4222NATS JetStream server URL for pub/sub between Go poller and Python API
+ +

Redis

+ + + + + + + +
VariableDefaultDescription
REDIS_URLredis://localhost:6379/0Redis URL for caching, distributed locks, and rate limiting
+ +

SMTP (Notifications)

+ + + + + + + + + + + + +
VariableDefaultDescription
SMTP_HOSTlocalhostSMTP server hostname
SMTP_PORT587SMTP server port
SMTP_USER(none)SMTP authentication username
SMTP_PASSWORD(none)SMTP authentication password
SMTP_USE_TLSfalseEnable STARTTLS for SMTP connections
SMTP_FROM_ADDRESSnoreply@mikrotik-portal.localSender address for outbound emails
+ +

Firmware

+ + + + + + + + +
VariableDefaultDescription
FIRMWARE_CACHE_DIR/data/firmware-cachePath to firmware download cache (PVC mount in production)
FIRMWARE_CHECK_INTERVAL_HOURS24Hours between automatic RouterOS version checks
+ +

Storage Paths

+ + + + + + + + +
VariableDefaultDescription
GIT_STORE_PATH./git-storePath to bare git repos for config backup history. In production: /data/git-store on a ReadWriteMany PVC.
WIREGUARD_CONFIG_PATH/data/wireguardShared volume path for WireGuard configuration files
+ +

Bootstrap

+ + + + + + + + +
VariableDefaultDescription
FIRST_ADMIN_EMAIL(none)Email for the initial super_admin user. Only used if no users exist in the database.
FIRST_ADMIN_PASSWORD(none)Password for the initial super_admin user. The user is created with must_upgrade_auth=True, triggering SRP registration on first login.
+ +

Production Safety

+

TOD refuses to start in staging or production environments if any of these variables still have their insecure dev defaults:

+
    +
  • JWT_SECRET_KEY
  • +
  • CREDENTIAL_ENCRYPTION_KEY
  • +
  • OPENBAO_TOKEN
  • +
+

The process exits with code 1 and a clear error message indicating which variable needs to be rotated.

+
+ + +
+

Docker Compose

+ +

Profiles

+ + + + + + + + +
ProfileCommandServices
(default)docker compose up -dInfrastructure only: PostgreSQL, Redis, NATS, OpenBao
fulldocker compose --profile full up -dAll services: infrastructure + API, Poller, Frontend
+ +

Container Memory Limits

+

All containers have enforced memory limits to prevent OOM on the host:

+ + + + + + + + + + + + +
ServiceMemory Limit
PostgreSQL512 MB
Redis128 MB
NATS128 MB
API512 MB
Poller256 MB
Frontend64 MB
+

Build Docker images sequentially (not in parallel) to avoid OOM during builds.

+
+ +
+
+ + + + + + diff --git a/docs/website/index.html b/docs/website/index.html new file mode 100644 index 0000000..139c087 --- /dev/null +++ b/docs/website/index.html @@ -0,0 +1,520 @@ + + + + + + The Other Dude — Fleet Management for MikroTik RouterOS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+
+
+
+ +
+ Fleet Management for MikroTik +

MikroTik Fleet Management for MSPs

+

Manage hundreds of MikroTik routers from a single pane of glass. Zero-knowledge security, real-time monitoring, and configuration management — built for MSPs who demand more than WinBox.

+ +
+
+ + + + +
+
+ +

Everything you need to manage your fleet

+

From device discovery to firmware upgrades, The Other Dude gives you complete control over your MikroTik infrastructure.

+ +
+ + +
+
+ + + + + + + + + +
+

Fleet Management

+

Dashboard with real-time status, virtual-scrolled fleet table, subnet scanning, and per-device detail pages with live metrics.

+
+ + +
+
+ + + + +
+

Configuration

+

Browse and edit RouterOS config in real-time. Two-phase push with panic-revert ensures you never brick a remote device. Batch templates for fleet-wide changes.

+
+ + +
+
+ + + +
+

Monitoring

+

Real-time CPU, memory, and traffic via SSE. Threshold-based alerts with email, webhook, Slack, and webhook push notifications. Interactive topology map.

+
+ + +
+
+ + + + + +
+

Zero-Knowledge Security

+

1Password-style SRP-6a auth — the server never sees your password. Per-tenant envelope encryption via OpenBao Transit. Internal CA for device TLS.

+
+ + +
+
+ + + + + + + + +
+

Multi-Tenant

+

PostgreSQL Row-Level Security isolates tenants at the database layer. RBAC with four roles. API keys for automation.

+
+ + +
+
+ + + +
+

Operations

+

Firmware management, PDF reports, audit trail, maintenance windows, config backup with git-backed version history and diff.

+
+ +
+
+
+ + + + +
+
+ +

Built for reliability at scale

+ +
+ +
+
+
+ +
+
Frontend
+
React 19 · nginx
+
+
+
/api/ proxy
+ + +
+
+
+ +
+
Backend API
+
FastAPI · Python 3.12
+
+
+
+ + +
+
+
+ +
+
PostgreSQL
+
TimescaleDB · RLS
+
+
+
+ +
+
Redis
+
Locks · Cache
+
+
+
+ +
+
NATS
+
JetStream pub/sub
+
+
+
+ + +
+
+
+ +
+
Go Poller
+
RouterOS binary API · port 8729
+
+
+
+ + +
+
+
+ +
+
RouterOS Fleet
+
Your MikroTik devices
+
+
+
+ +
    +
  • Three-service stack: React frontend, Python API, Go poller — each independently scalable
  • +
  • PostgreSQL RLS enforces tenant isolation at the database layer, not the application layer
  • +
  • NATS JetStream delivers real-time events from poller to frontend via SSE
  • +
  • OpenBao Transit provides per-tenant envelope encryption for zero-knowledge credential storage
  • +
+
+
+ + + + +
+
+ +

Modern tools, battle-tested foundations

+
+ React 19 + TypeScript + FastAPI + Python 3.12 + Go 1.24 + PostgreSQL 17 + TimescaleDB + Redis + NATS + Docker + OpenBao + WireGuard + Tailwind CSS + Vite +
+
+
+ + + + +
+
+ +

See it in action

+
+
+
+ +
+ The Other Dude zero-knowledge SRP-6a login page +
Zero-Knowledge SRP-6a Login
+
+ +
+ Fleet dashboard showing Lebowski Lanes network overview +
Fleet Dashboard — Lebowski Lanes
+
+ +
+ Device fleet list with status monitoring across tenants +
Device Fleet List
+
+ +
+ Device detail view for The Dude core router +
Device Detail View
+
+ +
+ Network topology map with automatic device discovery +
Network Topology Map
+
+ +
+ RouterOS configuration editor with diff preview +
Configuration Editor
+
+ +
+ Alert rules and notification channel management +
Alert Rules & Notifications
+
+ +
+ Multi-tenant view showing The Stranger's Ranch network +
Multi-Tenant — The Stranger’s Ranch
+
+ +
+
+
+ + + + +
+
+ +

Up and running in minutes

+ +
+
+ + + + Terminal +
+
# Clone and configure
+cp .env.example .env
+
+# Start infrastructure
+docker compose up -d
+
+# Build app images
+docker compose build api && docker compose build poller && docker compose build frontend
+
+# Launch
+docker compose up -d
+
+# Open TOD
+open http://localhost:3000
+
+
+
+ + + + +
+
+

Ready to manage your fleet?

+

Get started in minutes. Self-hosted, open-source, and built for the MikroTik community.

+ +
+
+ + + + +
+ + + + + + + + + + diff --git a/docs/website/robots.txt b/docs/website/robots.txt new file mode 100644 index 0000000..afe2dd7 --- /dev/null +++ b/docs/website/robots.txt @@ -0,0 +1,4 @@ +User-agent: * +Allow: / + +Sitemap: https://theotherdude.net/sitemap.xml diff --git a/docs/website/script.js b/docs/website/script.js new file mode 100644 index 0000000..867814d --- /dev/null +++ b/docs/website/script.js @@ -0,0 +1,241 @@ +/* TOD Documentation Website — Shared JavaScript */ + +(function () { + 'use strict'; + + /* -------------------------------------------------- */ + /* 1. Scroll Spy (docs page) */ + /* -------------------------------------------------- */ + function initScrollSpy() { + const sidebar = document.querySelector('.sidebar-nav'); + if (!sidebar) return; + + const links = Array.from(document.querySelectorAll('.sidebar-link')); + const sections = links + .map(function (link) { + var id = link.getAttribute('data-section'); + return id ? document.getElementById(id) : null; + }) + .filter(Boolean); + + if (!sections.length) return; + + var current = null; + + var observer = new IntersectionObserver( + function (entries) { + entries.forEach(function (entry) { + if (entry.isIntersecting) { + var id = entry.target.id; + if (id !== current) { + current = id; + links.forEach(function (l) { + l.classList.toggle( + 'sidebar-link--active', + l.getAttribute('data-section') === id + ); + }); + + /* keep active link visible in sidebar */ + var active = sidebar.querySelector('.sidebar-link--active'); + if (active) { + active.scrollIntoView({ block: 'nearest', behavior: 'smooth' }); + } + } + } + }); + }, + { rootMargin: '-80px 0px -60% 0px', threshold: 0 } + ); + + sections.forEach(function (s) { + observer.observe(s); + }); + } + + /* -------------------------------------------------- */ + /* 2. Docs Search */ + /* -------------------------------------------------- */ + function initDocsSearch() { + var input = document.getElementById('docs-search-input'); + if (!input) return; + + var content = document.getElementById('docs-content'); + if (!content) return; + + var sections = Array.from(content.querySelectorAll('section[id]')); + var links = Array.from(document.querySelectorAll('.sidebar-link')); + + input.addEventListener('input', function () { + var q = input.value.trim().toLowerCase(); + + if (!q) { + sections.forEach(function (s) { s.style.display = ''; }); + links.forEach(function (l) { l.style.display = ''; }); + return; + } + + sections.forEach(function (s) { + var text = s.textContent.toLowerCase(); + var match = text.indexOf(q) !== -1; + s.style.display = match ? '' : 'none'; + }); + + links.forEach(function (l) { + var sectionId = l.getAttribute('data-section'); + var section = sectionId ? document.getElementById(sectionId) : null; + if (section) { + l.style.display = section.style.display; + } + }); + }); + } + + /* -------------------------------------------------- */ + /* 3. Back to Top */ + /* -------------------------------------------------- */ + function initBackToTop() { + var btn = document.getElementById('back-to-top'); + if (!btn) return; + + window.addEventListener('scroll', function () { + btn.classList.toggle('back-to-top--visible', window.scrollY > 400); + }, { passive: true }); + } + + window.scrollToTop = function () { + window.scrollTo({ top: 0, behavior: 'smooth' }); + }; + + /* -------------------------------------------------- */ + /* 4. Sidebar Toggle (mobile) */ + /* -------------------------------------------------- */ + window.toggleSidebar = function () { + var sidebar = document.getElementById('docs-sidebar'); + if (!sidebar) return; + sidebar.classList.toggle('docs-sidebar--open'); + }; + + function initSidebarClose() { + var sidebar = document.getElementById('docs-sidebar'); + if (!sidebar) return; + + /* close on outside click */ + document.addEventListener('click', function (e) { + if ( + sidebar.classList.contains('docs-sidebar--open') && + !sidebar.contains(e.target) && + !e.target.closest('.docs-hamburger') + ) { + sidebar.classList.remove('docs-sidebar--open'); + } + }); + + /* close on link click (mobile) */ + sidebar.addEventListener('click', function (e) { + if (e.target.closest('.sidebar-link')) { + sidebar.classList.remove('docs-sidebar--open'); + } + }); + } + + /* -------------------------------------------------- */ + /* 5. Reveal Animation (landing page) */ + /* -------------------------------------------------- */ + function initReveal() { + var els = document.querySelectorAll('.reveal'); + if (!els.length) return; + + var observer = new IntersectionObserver( + function (entries) { + entries.forEach(function (entry) { + if (entry.isIntersecting) { + entry.target.classList.add('reveal--visible'); + observer.unobserve(entry.target); + } + }); + }, + { threshold: 0.1 } + ); + + els.forEach(function (el) { + observer.observe(el); + }); + } + + /* -------------------------------------------------- */ + /* 6. Smooth scroll for anchor links */ + /* -------------------------------------------------- */ + function initSmoothScroll() { + document.addEventListener('click', function (e) { + var link = e.target.closest('a[href^="#"]'); + if (!link) return; + + var id = link.getAttribute('href').slice(1); + var target = document.getElementById(id); + if (!target) return; + + e.preventDefault(); + + var offset = 80; + var top = target.getBoundingClientRect().top + window.pageYOffset - offset; + window.scrollTo({ top: top, behavior: 'smooth' }); + + /* update URL without jump */ + history.pushState(null, '', '#' + id); + }); + } + + /* -------------------------------------------------- */ + /* 7. Active nav link (landing page) */ + /* -------------------------------------------------- */ + function initActiveNav() { + var navLinks = document.querySelectorAll('.nav-link[href^="index.html#"]'); + if (!navLinks.length) return; + + /* only run on landing page */ + if (document.body.classList.contains('docs-page')) return; + + var sectionIds = []; + navLinks.forEach(function (l) { + var hash = l.getAttribute('href').split('#')[1]; + if (hash) sectionIds.push({ id: hash, link: l }); + }); + + if (!sectionIds.length) return; + + var observer = new IntersectionObserver( + function (entries) { + entries.forEach(function (entry) { + if (entry.isIntersecting) { + sectionIds.forEach(function (item) { + item.link.classList.toggle( + 'nav-link--active', + item.id === entry.target.id + ); + }); + } + }); + }, + { rootMargin: '-80px 0px -60% 0px', threshold: 0 } + ); + + sectionIds.forEach(function (item) { + var el = document.getElementById(item.id); + if (el) observer.observe(el); + }); + } + + /* -------------------------------------------------- */ + /* Init on DOMContentLoaded */ + /* -------------------------------------------------- */ + document.addEventListener('DOMContentLoaded', function () { + initScrollSpy(); + initDocsSearch(); + initBackToTop(); + initSidebarClose(); + initReveal(); + initSmoothScroll(); + initActiveNav(); + }); +})(); diff --git a/docs/website/sitemap.xml b/docs/website/sitemap.xml new file mode 100644 index 0000000..2454086 --- /dev/null +++ b/docs/website/sitemap.xml @@ -0,0 +1,15 @@ + + + + https://theotherdude.net/ + 2026-03-07 + weekly + 1.0 + + + https://theotherdude.net/docs.html + 2026-03-07 + weekly + 0.8 + + diff --git a/docs/website/style.css b/docs/website/style.css new file mode 100644 index 0000000..1dc5e8e --- /dev/null +++ b/docs/website/style.css @@ -0,0 +1,1868 @@ +/* ========================================================================== + TOD - The Other Dude + Fleet Management Platform for MikroTik RouterOS + + Premium stylesheet — dark landing + light docs + ========================================================================== */ + +/* -------------------------------------------------------------------------- + 0. CSS Custom Properties + -------------------------------------------------------------------------- */ + +:root { + /* Landing page (dark) */ + --bg-deep: #040810; + --bg-primary: #0A1628; + --bg-surface: #111B2E; + --bg-elevated: #182438; + --text-primary: #F1F5F9; + --text-secondary: #94A3B8; + --text-muted: #64748B; + --accent: #2A9D8F; + --accent-hover: #3DB8A9; + --accent-glow: rgba(42, 157, 143, 0.12); + --accent-secondary: #8B1A1A; + --border: rgba(148, 163, 184, 0.08); + --border-accent: rgba(42, 157, 143, 0.2); + + /* Docs page (light) — applied contextually under .docs-page */ + --docs-bg: #FAFBFC; + --docs-surface: #FFFFFF; + --docs-text: #1E293B; + --docs-text-secondary: #475569; + --docs-text-muted: #94A3B8; + --docs-border: #E2E8F0; + --docs-accent: #1F7A6F; + --docs-sidebar-bg: #F8FAFC; + --docs-code-bg: #F1F5F9; + --docs-code-border: #E2E8F0; +} + +/* -------------------------------------------------------------------------- + 1. Reset & Base + -------------------------------------------------------------------------- */ + +*, +*::before, +*::after { + box-sizing: border-box; + margin: 0; + padding: 0; +} + +html { + scroll-behavior: smooth; + -webkit-text-size-adjust: 100%; + text-size-adjust: 100%; +} + +body { + font-family: "DM Sans", -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif; + font-size: 16px; + line-height: 1.6; + color: var(--text-primary); + background: var(--bg-deep); + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; + text-rendering: optimizeLegibility; + overflow-x: hidden; +} + +h1, h2, h3, h4, h5, h6 { + font-family: "Outfit", -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif; + line-height: 1.2; +} + +code, pre, kbd, samp { + font-family: "Fira Code", "SF Mono", "Cascadia Code", "Consolas", monospace; +} + +img { + max-width: 100%; + height: auto; + display: block; +} + +a { + color: inherit; + text-decoration: none; +} + +button { + font-family: inherit; + cursor: pointer; + border: none; + background: none; +} + +ul, ol { + list-style: none; +} + +::selection { + background: rgba(42, 157, 143, 0.25); + color: var(--text-primary); +} + +/* -------------------------------------------------------------------------- + 2. Container + -------------------------------------------------------------------------- */ + +.container { + max-width: 1200px; + margin: 0 auto; + padding: 0 24px; +} + +/* -------------------------------------------------------------------------- + 3. Navigation — .site-nav + -------------------------------------------------------------------------- */ + +.site-nav { + height: 64px; + position: sticky; + top: 0; + z-index: 100; + display: flex; + align-items: center; + transition: background-color 0.3s ease, border-color 0.3s ease; +} + +.site-nav--dark { + background: rgba(4, 8, 16, 0.85); + backdrop-filter: blur(12px) saturate(180%); + -webkit-backdrop-filter: blur(12px) saturate(180%); + border-bottom: 1px solid var(--border); +} + +.site-nav--light { + background: rgba(255, 255, 255, 0.92); + backdrop-filter: blur(12px) saturate(180%); + -webkit-backdrop-filter: blur(12px) saturate(180%); + border-bottom: 1px solid var(--docs-border); +} + +.nav-inner { + display: flex; + justify-content: space-between; + align-items: center; + max-width: 1200px; + margin: 0 auto; + padding: 0 24px; + width: 100%; +} + +.nav-logo { + font-family: "Outfit", sans-serif; + font-weight: 700; + font-size: 20px; + display: flex; + align-items: center; + gap: 12px; + text-decoration: none; + color: inherit; +} + +.nav-logo-mark { + width: 32px; + height: 32px; + flex-shrink: 0; +} + +.nav-links { + display: flex; + align-items: center; + gap: 32px; +} + +.nav-link { + font-size: 14px; + font-weight: 500; + opacity: 0.7; + transition: opacity 0.2s ease, color 0.2s ease; + text-decoration: none; +} + +.nav-link:hover { + opacity: 1; +} + +.nav-link--active { + opacity: 1; + color: var(--accent); +} + +.site-nav--light .nav-link { + color: var(--docs-text-secondary); +} + +.site-nav--light .nav-link:hover { + color: var(--docs-text); +} + +.site-nav--light .nav-link--active { + color: var(--docs-accent); +} + +.nav-cta { + display: inline-flex; + align-items: center; + background: var(--accent); + color: #0A1628; + font-family: "Outfit", sans-serif; + font-weight: 600; + font-size: 14px; + padding: 8px 20px; + border-radius: 9999px; + transition: filter 0.2s ease, transform 0.15s ease; + text-decoration: none; + letter-spacing: 0.01em; +} + +.nav-cta:hover { + filter: brightness(1.1); + transform: translateY(-1px); +} + +.nav-cta:active { + transform: translateY(0); +} + +/* -------------------------------------------------------------------------- + 4. Hero — .hero + -------------------------------------------------------------------------- */ + +.hero { + min-height: calc(100vh - 64px); + display: flex; + align-items: center; + justify-content: center; + position: relative; + overflow: hidden; +} + +.hero-bg { + position: absolute; + inset: 0; + z-index: 0; + overflow: hidden; +} + +/* Animated gradient mesh */ +.hero-bg::before { + content: ""; + position: absolute; + top: -40%; + left: -20%; + width: 80%; + height: 80%; + border-radius: 50%; + background: radial-gradient(ellipse at center, rgba(42, 157, 143, 0.18) 0%, transparent 70%); + animation: meshFloat 20s ease-in-out infinite; + will-change: transform; +} + +.hero-bg::after { + content: ""; + position: absolute; + bottom: -30%; + right: -10%; + width: 70%; + height: 70%; + border-radius: 50%; + background: radial-gradient(ellipse at center, rgba(139, 26, 26, 0.12) 0%, transparent 70%); + animation: meshFloat 24s ease-in-out infinite reverse; + will-change: transform; +} + +/* Grid overlay */ +.hero-bg-grid { + position: absolute; + inset: 0; + background-image: + repeating-linear-gradient( + 0deg, + rgba(148, 163, 184, 0.03) 0px, + rgba(148, 163, 184, 0.03) 0.5px, + transparent 0.5px, + transparent 80px + ), + repeating-linear-gradient( + 90deg, + rgba(148, 163, 184, 0.03) 0px, + rgba(148, 163, 184, 0.03) 0.5px, + transparent 0.5px, + transparent 80px + ); + mask-image: radial-gradient(ellipse 70% 60% at 50% 40%, black 20%, transparent 100%); + -webkit-mask-image: radial-gradient(ellipse 70% 60% at 50% 40%, black 20%, transparent 100%); +} + +.hero-content { + position: relative; + z-index: 10; + text-align: center; + max-width: 800px; + padding: 0 24px; +} + +.hero-rosette { + margin-bottom: 32px; + animation: fadeInUp 0.6s ease both; +} + +.hero-rosette svg { + filter: drop-shadow(0 0 40px rgba(42, 157, 143, 0.3)) drop-shadow(0 0 80px rgba(139, 26, 26, 0.15)); +} + +.hero-badge { + display: inline-flex; + align-items: center; + gap: 8px; + background: rgba(42, 157, 143, 0.1); + color: var(--accent); + border: 1px solid rgba(42, 157, 143, 0.2); + border-radius: 9999px; + padding: 6px 16px; + font-family: "Outfit", sans-serif; + font-size: 13px; + font-weight: 500; + letter-spacing: 0.05em; + text-transform: uppercase; + margin-bottom: 24px; +} + +.hero-title { + font-family: "Outfit", sans-serif; + font-weight: 800; + font-size: clamp(3rem, 6vw, 5rem); + line-height: 1.05; + letter-spacing: -0.03em; + margin-bottom: 24px; + color: var(--text-primary); +} + +.hero-title .gradient-text { + background: linear-gradient(135deg, var(--accent) 0%, var(--accent-secondary) 100%); + -webkit-background-clip: text; + background-clip: text; + -webkit-text-fill-color: transparent; +} + +.hero-subtitle { + font-size: clamp(1.1rem, 2vw, 1.35rem); + color: var(--text-secondary); + max-width: 600px; + margin: 0 auto 48px; + line-height: 1.7; +} + +.hero-actions { + display: flex; + gap: 16px; + justify-content: center; + flex-wrap: wrap; +} + +.btn-primary { + display: inline-flex; + align-items: center; + gap: 8px; + background: var(--accent); + color: #0A1628; + font-family: "Outfit", sans-serif; + font-weight: 600; + font-size: 15px; + padding: 14px 28px; + border-radius: 12px; + border: none; + cursor: pointer; + transition: transform 0.2s ease, box-shadow 0.2s ease, filter 0.2s ease; + letter-spacing: 0.01em; + text-decoration: none; +} + +.btn-primary:hover { + transform: scale(1.02) translateY(-1px); + box-shadow: 0 8px 30px rgba(42, 157, 143, 0.25), 0 0 60px rgba(42, 157, 143, 0.08); + filter: brightness(1.05); +} + +.btn-primary:active { + transform: scale(0.99); +} + +.btn-secondary { + display: inline-flex; + align-items: center; + gap: 8px; + background: transparent; + color: var(--text-primary); + font-family: "Outfit", sans-serif; + font-weight: 600; + font-size: 15px; + padding: 14px 28px; + border-radius: 12px; + border: 1px solid var(--border); + cursor: pointer; + transition: background-color 0.2s ease, border-color 0.2s ease, transform 0.15s ease; + letter-spacing: 0.01em; + text-decoration: none; +} + +.btn-secondary:hover { + background: var(--bg-elevated); + border-color: rgba(148, 163, 184, 0.15); + transform: translateY(-1px); +} + +.btn-secondary:active { + transform: translateY(0); +} + +/* -------------------------------------------------------------------------- + 5. Features — .features-section + -------------------------------------------------------------------------- */ + +.features-section { + padding: 120px 0; +} + +.section-label { + color: var(--accent); + font-family: "Outfit", sans-serif; + text-transform: uppercase; + font-size: 13px; + font-weight: 600; + letter-spacing: 0.1em; + margin-bottom: 16px; +} + +.section-title { + font-family: "Outfit", sans-serif; + font-weight: 700; + font-size: 2.5rem; + color: var(--text-primary); + margin-bottom: 16px; + letter-spacing: -0.02em; +} + +.section-desc { + color: var(--text-secondary); + max-width: 600px; + margin-bottom: 64px; + font-size: 1.1rem; + line-height: 1.7; +} + +.features-grid { + display: grid; + grid-template-columns: repeat(3, 1fr); + gap: 24px; +} + +.feature-card { + background: var(--bg-surface); + border: 1px solid var(--border); + border-radius: 16px; + padding: 32px; + transition: transform 0.25s ease, border-color 0.25s ease, box-shadow 0.25s ease; +} + +.feature-card:hover { + border-color: var(--border-accent); + box-shadow: 0 8px 40px rgba(42, 157, 143, 0.06), 0 0 0 1px rgba(42, 157, 143, 0.08); + transform: translateY(-4px); +} + +.feature-icon { + width: 48px; + height: 48px; + border-radius: 12px; + background: var(--accent-glow); + display: flex; + align-items: center; + justify-content: center; + margin-bottom: 20px; + font-size: 22px; + color: var(--accent); +} + +.feature-icon svg { + width: 24px; + height: 24px; + stroke: var(--accent); + stroke-width: 1.5; + fill: none; +} + +.feature-title { + font-family: "Outfit", sans-serif; + font-weight: 600; + font-size: 1.15rem; + margin-bottom: 10px; + color: var(--text-primary); +} + +.feature-desc { + color: var(--text-secondary); + font-size: 0.95rem; + line-height: 1.7; +} + +/* -------------------------------------------------------------------------- + 6. Architecture — .arch-section + -------------------------------------------------------------------------- */ + +.arch-section { + padding: 120px 0; + background: var(--bg-surface); +} + +.arch-diagram { + background: var(--bg-elevated); + border: 1px solid var(--border); + border-radius: 16px; + padding: 48px; + overflow-x: auto; +} + +.arch-visual { + display: flex; + flex-direction: column; + align-items: center; + gap: 0; + padding: 48px 24px; + background: var(--bg-elevated); + border: 1px solid var(--border); + border-radius: 16px; +} + +.arch-row { + display: flex; + justify-content: center; + gap: 20px; + width: 100%; + max-width: 700px; +} + +.arch-row--triple { + max-width: 700px; +} + +.arch-node { + display: flex; + flex-direction: column; + align-items: center; + gap: 6px; + padding: 20px 28px; + border-radius: 12px; + border: 1px solid var(--border-accent); + background: rgba(42, 157, 143, 0.04); + min-width: 160px; + text-align: center; + transition: border-color 0.3s, box-shadow 0.3s; +} + +.arch-node:hover { + border-color: var(--accent); + box-shadow: 0 0 24px var(--accent-glow); +} + +.arch-node--infra { + border-color: rgba(139, 26, 26, 0.25); + background: rgba(139, 26, 26, 0.04); + flex: 1; + min-width: 0; +} + +.arch-node--infra:hover { + border-color: var(--accent-secondary); + box-shadow: 0 0 24px rgba(139, 26, 26, 0.12); +} + +.arch-node--device { + border-color: rgba(148, 163, 184, 0.2); + background: rgba(148, 163, 184, 0.04); +} + +.arch-node-icon { + color: var(--accent); + display: flex; + align-items: center; + justify-content: center; + width: 40px; + height: 40px; + border-radius: 10px; + background: var(--accent-glow); +} + +.arch-node--infra .arch-node-icon { + color: var(--accent-secondary); + background: rgba(139, 26, 26, 0.1); +} + +.arch-node--device .arch-node-icon { + color: var(--text-muted); + background: rgba(148, 163, 184, 0.1); +} + +.arch-node-label { + font-family: 'Outfit', sans-serif; + font-weight: 600; + font-size: 15px; + color: var(--text-primary); +} + +.arch-node-tech { + font-size: 12px; + color: var(--text-muted); + font-family: 'Fira Code', monospace; +} + +.arch-connector { + display: flex; + flex-direction: column; + align-items: center; + padding: 4px 0; +} + +.arch-connector-line { + width: 2px; + height: 32px; + background: linear-gradient(to bottom, var(--accent), rgba(42, 157, 143, 0.2)); + border-radius: 1px; +} + +.arch-connector-line--triple { + height: 24px; + background: linear-gradient(to bottom, rgba(139, 26, 26, 0.5), rgba(139, 26, 26, 0.15)); +} + +.arch-connector-label { + font-size: 11px; + color: var(--text-muted); + font-family: 'Fira Code', monospace; + margin-top: 4px; +} + +/* -------------------------------------------------------------------------- + 7. Tech Stack — .tech-section + -------------------------------------------------------------------------- */ + +.tech-section { + padding: 80px 0; +} + +.tech-grid { + display: flex; + flex-wrap: wrap; + gap: 12px; + justify-content: center; +} + +.tech-badge { + background: var(--bg-surface); + border: 1px solid var(--border); + border-radius: 8px; + padding: 8px 16px; + font-family: "Outfit", sans-serif; + font-size: 13px; + font-weight: 500; + color: var(--text-secondary); + display: flex; + align-items: center; + gap: 8px; + transition: border-color 0.2s ease, color 0.2s ease, transform 0.15s ease; +} + +.tech-badge:hover { + border-color: var(--border-accent); + color: var(--text-primary); + transform: translateY(-2px); +} + +.tech-badge svg, +.tech-badge img { + width: 18px; + height: 18px; + flex-shrink: 0; +} + +/* -------------------------------------------------------------------------- + 8. Screenshots — .screenshots-section + -------------------------------------------------------------------------- */ + +.screenshots-section { + padding: 120px 0; +} + +.screenshots-scroll { + max-width: 1200px; + margin: 0 auto; + padding: 0 24px; +} + +.screenshots-track, +.screenshot-gallery { + display: grid; + grid-template-columns: repeat(4, 1fr); + gap: 24px; + padding-bottom: 24px; +} + +.screenshot-card { + margin: 0; +} + +.screenshot-card img { + width: 100%; + border-radius: 12px; + border: 1px solid var(--border); + box-shadow: 0 8px 32px rgba(0, 0, 0, 0.3); + transition: transform 0.3s ease, box-shadow 0.3s ease; + display: block; + cursor: zoom-in; +} + +.screenshot-card img:hover { + transform: scale(1.03); + box-shadow: 0 16px 48px rgba(42, 157, 143, 0.15); +} + +/* Lightbox overlay */ +.lightbox { + position: fixed; + inset: 0; + z-index: 9999; + background: rgba(0, 0, 0, 0.92); + display: flex; + align-items: center; + justify-content: center; + cursor: zoom-out; + opacity: 0; + visibility: hidden; + transition: opacity 0.25s ease, visibility 0.25s ease; + padding: 24px; +} + +.lightbox.active { + opacity: 1; + visibility: visible; +} + +.lightbox img { + max-width: 95vw; + max-height: 92vh; + border-radius: 8px; + box-shadow: 0 24px 80px rgba(0, 0, 0, 0.6); + transform: scale(0.95); + transition: transform 0.25s ease; +} + +.lightbox.active img { + transform: scale(1); +} + +.lightbox-caption { + position: absolute; + bottom: 20px; + left: 50%; + transform: translateX(-50%); + color: rgba(255, 255, 255, 0.8); + font-family: 'Outfit', sans-serif; + font-size: 15px; + font-weight: 500; + background: rgba(0, 0, 0, 0.5); + padding: 6px 16px; + border-radius: 6px; +} + +.screenshot-card figcaption { + text-align: center; + margin-top: 12px; + color: var(--text-secondary); + font-size: 14px; + font-family: 'Outfit', sans-serif; + font-weight: 500; +} + +/* -------------------------------------------------------------------------- + 9. Quick Start — .quickstart-section + -------------------------------------------------------------------------- */ + +.quickstart-section { + padding: 120px 0; +} + +.code-window { + background: #0D1117; + border-radius: 16px; + overflow: hidden; + border: 1px solid rgba(148, 163, 184, 0.08); + max-width: 700px; + margin: 0 auto; + box-shadow: 0 16px 64px rgba(0, 0, 0, 0.4); +} + +.code-header { + background: #161B22; + padding: 12px 20px; + display: flex; + align-items: center; + gap: 8px; + border-bottom: 1px solid rgba(148, 163, 184, 0.06); +} + +.code-dot { + width: 12px; + height: 12px; + border-radius: 50%; + flex-shrink: 0; +} + +.code-dot:nth-child(1) { background: #FF5F56; } +.code-dot:nth-child(2) { background: #FFBD2E; } +.code-dot:nth-child(3) { background: #27C93F; } + +.code-title { + margin-left: auto; + color: var(--text-muted); + font-family: "Fira Code", monospace; + font-size: 12px; +} + +.code-body { + padding: 24px; + font-family: "Fira Code", monospace; + font-size: 14px; + line-height: 1.8; + overflow-x: auto; + color: var(--text-primary); +} + +.code-comment { + color: var(--text-muted); + font-style: italic; +} + +.code-command { + color: var(--text-primary); +} + +.code-output { + color: var(--accent); +} + +.code-prompt { + color: var(--accent); + user-select: none; +} + +.code-string { + color: #7DD3FC; +} + +.code-flag { + color: var(--accent-secondary); +} + +/* -------------------------------------------------------------------------- + 10. CTA Section — .cta-section + -------------------------------------------------------------------------- */ + +.cta-section { + padding: 120px 0; + text-align: center; + position: relative; +} + +.cta-section::before { + content: ""; + position: absolute; + top: 0; + left: 50%; + transform: translateX(-50%); + width: 80%; + height: 1px; + background: linear-gradient(90deg, transparent 0%, var(--accent) 50%, transparent 100%); + opacity: 0.3; +} + +.cta-title { + font-family: "Outfit", sans-serif; + font-weight: 700; + font-size: 2.5rem; + color: var(--text-primary); + margin-bottom: 16px; + letter-spacing: -0.02em; +} + +.cta-subtitle { + color: var(--text-secondary); + font-size: 1.1rem; + max-width: 500px; + margin: 0 auto 40px; + line-height: 1.7; +} + +.cta-actions { + display: flex; + gap: 16px; + justify-content: center; + flex-wrap: wrap; +} + +/* -------------------------------------------------------------------------- + 11. Footer — .site-footer + -------------------------------------------------------------------------- */ + +.site-footer { + background: var(--bg-surface); + border-top: 1px solid var(--border); + padding: 48px 0; +} + +.footer-inner { + display: flex; + justify-content: space-between; + align-items: center; + max-width: 1200px; + margin: 0 auto; + padding: 0 24px; + flex-wrap: wrap; + gap: 16px; +} + +.footer-links { + display: flex; + gap: 24px; +} + +.footer-link { + color: var(--text-muted); + font-size: 14px; + transition: color 0.2s ease; + text-decoration: none; +} + +.footer-link:hover { + color: var(--text-secondary); +} + +.footer-copy { + color: var(--text-muted); + font-size: 13px; +} + +/* -------------------------------------------------------------------------- + 12. DOCS PAGE — scoped under .docs-page + -------------------------------------------------------------------------- */ + +.docs-page { + background: var(--docs-bg); + color: var(--docs-text); + min-height: 100vh; +} + +/* Layout */ +.docs-layout { + display: flex; + min-height: calc(100vh - 64px); +} + +/* Sidebar */ +.docs-sidebar { + width: 280px; + flex-shrink: 0; + background: var(--docs-sidebar-bg); + border-right: 1px solid var(--docs-border); + padding: 32px 24px; + position: sticky; + top: 64px; + height: calc(100vh - 64px); + overflow-y: auto; + scrollbar-width: thin; + scrollbar-color: var(--docs-text-muted) transparent; +} + +.docs-sidebar::-webkit-scrollbar { + width: 5px; +} + +.docs-sidebar::-webkit-scrollbar-track { + background: transparent; +} + +.docs-sidebar::-webkit-scrollbar-thumb { + background: var(--docs-text-muted); + border-radius: 3px; + opacity: 0; + transition: opacity 0.2s; +} + +.docs-sidebar:hover::-webkit-scrollbar-thumb { + opacity: 1; +} + +/* Sidebar navigation */ +.sidebar-nav { + display: flex; + flex-direction: column; +} + +.sidebar-section { + margin-bottom: 24px; +} + +.sidebar-section-title { + color: var(--docs-text); + font-family: "Outfit", sans-serif; + font-weight: 600; + font-size: 13px; + text-transform: uppercase; + letter-spacing: 0.08em; + margin-bottom: 8px; + padding: 0 12px; + opacity: 0.5; +} + +.sidebar-link { + display: block; + padding: 6px 12px; + font-size: 14px; + color: var(--docs-text-secondary); + border-radius: 6px; + transition: background-color 0.15s ease, color 0.15s ease; + text-decoration: none; + line-height: 1.5; +} + +.sidebar-link:hover { + background: rgba(226, 232, 240, 0.5); + color: var(--docs-text); +} + +.sidebar-link--active { + background: rgba(2, 132, 199, 0.08); + color: var(--docs-accent); + font-weight: 500; +} + +.sidebar-link--active:hover { + background: rgba(2, 132, 199, 0.12); + color: var(--docs-accent); +} + +/* Docs search */ +.docs-search { + margin-bottom: 24px; +} + +.docs-search input { + width: 100%; + background: var(--docs-surface); + border: 1px solid var(--docs-border); + border-radius: 8px; + padding: 8px 12px; + font-family: "DM Sans", sans-serif; + font-size: 14px; + color: var(--docs-text); + transition: border-color 0.2s ease, box-shadow 0.2s ease; + outline: none; +} + +.docs-search input::placeholder { + color: var(--docs-text-muted); +} + +.docs-search input:focus { + border-color: var(--docs-accent); + box-shadow: 0 0 0 3px rgba(2, 132, 199, 0.1); +} + +/* Docs content area */ +.docs-content { + flex: 1; + padding: 48px 64px; + max-width: 860px; + overflow-x: hidden; + min-width: 0; +} + +/* Content typography */ +.docs-content h1 { + font-family: "Outfit", sans-serif; + font-weight: 700; + font-size: 2rem; + color: var(--docs-text); + margin-bottom: 8px; + letter-spacing: -0.02em; +} + +.docs-content .docs-subtitle { + color: var(--docs-text-secondary); + font-size: 1.1rem; + margin-bottom: 40px; + line-height: 1.7; +} + +.docs-content h2 { + font-family: "Outfit", sans-serif; + font-weight: 600; + font-size: 1.5rem; + color: var(--docs-text); + margin-top: 56px; + margin-bottom: 16px; + padding-top: 32px; + border-top: 1px solid var(--docs-border); + letter-spacing: -0.01em; +} + +.docs-content h2:first-of-type { + margin-top: 0; + padding-top: 0; + border-top: none; +} + +.docs-content h3 { + font-family: "Outfit", sans-serif; + font-weight: 600; + font-size: 1.15rem; + color: var(--docs-text); + margin-top: 40px; + margin-bottom: 12px; +} + +.docs-content p { + color: var(--docs-text-secondary); + margin-bottom: 16px; + line-height: 1.8; +} + +.docs-content a { + color: var(--docs-accent); + text-decoration: underline; + text-underline-offset: 2px; + text-decoration-color: rgba(2, 132, 199, 0.3); + transition: text-decoration-color 0.2s ease, opacity 0.2s ease; +} + +.docs-content a:hover { + opacity: 0.8; + text-decoration-color: var(--docs-accent); +} + +/* Inline code */ +.docs-content code { + background: var(--docs-code-bg); + padding: 2px 6px; + border-radius: 4px; + font-family: "Fira Code", monospace; + font-size: 0.875em; + color: var(--docs-text); + border: 1px solid var(--docs-code-border); +} + +/* Code blocks */ +.docs-content pre { + background: #1E293B; + border-radius: 12px; + padding: 24px; + overflow-x: auto; + margin-bottom: 24px; + border: 1px solid rgba(255, 255, 255, 0.05); + scrollbar-width: thin; + scrollbar-color: rgba(148, 163, 184, 0.3) transparent; +} + +.docs-content pre::-webkit-scrollbar { + height: 5px; +} + +.docs-content pre::-webkit-scrollbar-track { + background: transparent; +} + +.docs-content pre::-webkit-scrollbar-thumb { + background: rgba(148, 163, 184, 0.3); + border-radius: 3px; +} + +.docs-content pre code { + background: transparent; + color: #E2E8F0; + padding: 0; + border: none; + font-size: 14px; + line-height: 1.7; + border-radius: 0; +} + +/* Tables */ +.docs-content table { + width: 100%; + border-collapse: collapse; + margin-bottom: 24px; + font-size: 14px; +} + +.docs-content th { + background: var(--docs-code-bg); + text-align: left; + padding: 12px 16px; + font-family: "Outfit", sans-serif; + font-weight: 600; + font-size: 13px; + border-bottom: 2px solid var(--docs-border); + color: var(--docs-text); + white-space: nowrap; +} + +.docs-content td { + padding: 10px 16px; + border-bottom: 1px solid var(--docs-border); + color: var(--docs-text-secondary); + vertical-align: top; +} + +.docs-content tr:last-child td { + border-bottom: none; +} + +/* Lists */ +.docs-content ul, +.docs-content ol { + margin-bottom: 16px; + padding-left: 24px; + list-style: none; +} + +.docs-content ul li { + position: relative; + margin-bottom: 8px; + color: var(--docs-text-secondary); + line-height: 1.7; + padding-left: 4px; +} + +.docs-content ul li::before { + content: ""; + position: absolute; + left: -16px; + top: 10px; + width: 5px; + height: 5px; + border-radius: 50%; + background: var(--docs-accent); + opacity: 0.5; +} + +.docs-content ol { + list-style: decimal; +} + +.docs-content ol li { + margin-bottom: 8px; + color: var(--docs-text-secondary); + line-height: 1.7; +} + +.docs-content strong { + color: var(--docs-text); + font-weight: 600; +} + +/* Blockquote */ +.docs-content blockquote { + border-left: 3px solid var(--docs-accent); + padding-left: 20px; + margin-left: 0; + margin-bottom: 16px; + color: var(--docs-text-muted); + font-style: italic; +} + +.docs-content blockquote p { + color: inherit; +} + +/* Horizontal rule */ +.docs-content hr { + border: none; + height: 1px; + background: var(--docs-border); + margin: 40px 0; +} + +/* -------------------------------------------------------------------------- + 13. Back to Top — .back-to-top + -------------------------------------------------------------------------- */ + +.back-to-top { + position: fixed; + bottom: 32px; + right: 32px; + width: 44px; + height: 44px; + border-radius: 50%; + background: var(--docs-accent); + color: white; + display: flex; + align-items: center; + justify-content: center; + box-shadow: 0 4px 16px rgba(2, 132, 199, 0.3); + opacity: 0; + pointer-events: none; + transition: opacity 0.3s ease, transform 0.3s ease; + border: none; + cursor: pointer; + z-index: 50; + transform: translateY(8px); +} + +.back-to-top:hover { + transform: translateY(-2px); + box-shadow: 0 6px 24px rgba(2, 132, 199, 0.4); +} + +.back-to-top--visible { + opacity: 1; + pointer-events: auto; + transform: translateY(0); +} + +.back-to-top svg { + width: 20px; + height: 20px; + stroke: currentColor; + stroke-width: 2; + fill: none; +} + +/* -------------------------------------------------------------------------- + 14. Mobile Hamburger — .docs-hamburger + -------------------------------------------------------------------------- */ + +.docs-hamburger { + display: none; + width: 40px; + height: 40px; + border-radius: 8px; + background: transparent; + border: none; + align-items: center; + justify-content: center; + cursor: pointer; + padding: 0; + color: var(--docs-text); +} + +.docs-hamburger svg { + width: 22px; + height: 22px; + stroke: currentColor; + stroke-width: 2; + fill: none; +} + +/* Mobile sidebar overlay */ +.docs-sidebar-overlay { + display: none; + position: fixed; + inset: 0; + background: rgba(0, 0, 0, 0.5); + z-index: 90; + opacity: 0; + transition: opacity 0.3s ease; +} + +.docs-sidebar-overlay--visible { + opacity: 1; +} + +/* -------------------------------------------------------------------------- + 15. Animations + -------------------------------------------------------------------------- */ + +@keyframes meshFloat { + 0%, 100% { + transform: translate(0, 0) scale(1); + } + 25% { + transform: translate(5%, 3%) scale(1.05); + } + 50% { + transform: translate(-3%, 6%) scale(0.97); + } + 75% { + transform: translate(4%, -2%) scale(1.03); + } +} + +@keyframes fadeInUp { + from { + opacity: 0; + transform: translateY(20px); + } + to { + opacity: 1; + transform: translateY(0); + } +} + +@keyframes shimmerSlide { + 0% { + background-position: -200% 0; + } + 100% { + background-position: 200% 0; + } +} + +/* Scroll reveal */ +.reveal { + opacity: 0; + transform: translateY(20px); + transition: opacity 0.6s ease, transform 0.6s ease; +} + +.reveal--visible { + opacity: 1; + transform: none; +} + +/* Staggered children */ +.reveal-stagger > * { + opacity: 0; + transform: translateY(16px); + transition: opacity 0.5s ease, transform 0.5s ease; +} + +.reveal-stagger--visible > *:nth-child(1) { transition-delay: 0ms; opacity: 1; transform: none; } +.reveal-stagger--visible > *:nth-child(2) { transition-delay: 80ms; opacity: 1; transform: none; } +.reveal-stagger--visible > *:nth-child(3) { transition-delay: 160ms; opacity: 1; transform: none; } +.reveal-stagger--visible > *:nth-child(4) { transition-delay: 240ms; opacity: 1; transform: none; } +.reveal-stagger--visible > *:nth-child(5) { transition-delay: 320ms; opacity: 1; transform: none; } +.reveal-stagger--visible > *:nth-child(6) { transition-delay: 400ms; opacity: 1; transform: none; } + +/* Hero entrance */ +.hero-content .hero-badge { animation: fadeInUp 0.6s ease 0.1s both; } +.hero-content .hero-title { animation: fadeInUp 0.6s ease 0.2s both; } +.hero-content .hero-subtitle { animation: fadeInUp 0.6s ease 0.35s both; } +.hero-content .hero-actions { animation: fadeInUp 0.6s ease 0.5s both; } + +/* -------------------------------------------------------------------------- + 16. Scrollbar (docs page) + -------------------------------------------------------------------------- */ + +.docs-page { + scrollbar-width: thin; + scrollbar-color: var(--docs-text-muted) var(--docs-border); +} + +.docs-page::-webkit-scrollbar { + width: 8px; +} + +.docs-page::-webkit-scrollbar-track { + background: var(--docs-border); +} + +.docs-page::-webkit-scrollbar-thumb { + background: var(--docs-text-muted); + border-radius: 4px; +} + +.docs-page::-webkit-scrollbar-thumb:hover { + background: var(--docs-text-secondary); +} + +/* -------------------------------------------------------------------------- + 17. Utility Classes + -------------------------------------------------------------------------- */ + +.text-center { text-align: center; } +.text-left { text-align: left; } + +.mx-auto { margin-left: auto; margin-right: auto; } + +.mt-0 { margin-top: 0; } +.mb-0 { margin-bottom: 0; } +.mb-8 { margin-bottom: 8px; } +.mb-16 { margin-bottom: 16px; } +.mb-24 { margin-bottom: 24px; } +.mb-32 { margin-bottom: 32px; } +.mb-48 { margin-bottom: 48px; } +.mb-64 { margin-bottom: 64px; } + +.sr-only { + position: absolute; + width: 1px; + height: 1px; + padding: 0; + margin: -1px; + overflow: hidden; + clip: rect(0, 0, 0, 0); + white-space: nowrap; + border-width: 0; +} + +/* -------------------------------------------------------------------------- + 18. Responsive — Tablet (max-width: 768px) + -------------------------------------------------------------------------- */ + +@media (max-width: 768px) { + /* Features grid */ + .features-grid { + grid-template-columns: repeat(2, 1fr); + gap: 16px; + } + + /* Section spacing */ + .features-section, + .arch-section, + .screenshots-section, + .quickstart-section, + .cta-section { + padding: 80px 0; + } + + .section-title, + .cta-title { + font-size: 2rem; + } + + /* Architecture diagram */ + .arch-diagram { + padding: 24px; + } + + .arch-diagram pre { + font-size: 12px; + } + + /* Screenshots */ + .screenshots-track, + .screenshot-gallery { + grid-template-columns: repeat(2, 1fr); + } + + /* Docs layout */ + .docs-sidebar { + display: none; + position: fixed; + top: 64px; + left: 0; + z-index: 95; + width: 280px; + height: calc(100vh - 64px); + box-shadow: 4px 0 24px rgba(0, 0, 0, 0.1); + } + + .docs-sidebar--open { + display: block; + } + + .docs-sidebar-overlay--visible { + display: block; + opacity: 1; + } + + .docs-hamburger { + display: flex; + } + + .docs-content { + padding: 24px; + } + + /* Nav links */ + .nav-links { + gap: 16px; + } + + .nav-link { + font-size: 13px; + } + + /* Footer */ + .footer-inner { + flex-direction: column; + text-align: center; + gap: 24px; + } +} + +/* -------------------------------------------------------------------------- + 19. Responsive — Mobile (max-width: 480px) + -------------------------------------------------------------------------- */ + +@media (max-width: 480px) { + /* Features grid */ + .features-grid { + grid-template-columns: 1fr; + } + + /* Hero adjustments */ + .hero-actions { + flex-direction: column; + align-items: center; + } + + .btn-primary, + .btn-secondary { + width: 100%; + justify-content: center; + } + + /* Section spacing */ + .features-section, + .arch-section, + .screenshots-section, + .quickstart-section, + .cta-section { + padding: 60px 0; + } + + .section-title, + .cta-title { + font-size: 1.65rem; + } + + .section-desc { + margin-bottom: 40px; + } + + /* Feature cards */ + .feature-card { + padding: 24px; + } + + /* Code window */ + .code-body { + padding: 16px; + font-size: 13px; + } + + /* Screenshots */ + .screenshots-track, + .screenshot-gallery { + grid-template-columns: 1fr; + } + + /* Docs content */ + .docs-content h1 { + font-size: 1.65rem; + } + + .docs-content h2 { + font-size: 1.3rem; + margin-top: 40px; + padding-top: 24px; + } + + /* Nav hide on very small */ + .nav-links .nav-link:not(.nav-link--active):not(:first-child):not(:last-child) { + display: none; + } + + .nav-links { + gap: 12px; + } + + .nav-cta { + padding: 6px 14px; + font-size: 13px; + } +} + +/* -------------------------------------------------------------------------- + 20. Print Styles + -------------------------------------------------------------------------- */ + +@media print { + .site-nav, + .docs-sidebar, + .back-to-top, + .docs-hamburger, + .docs-sidebar-overlay, + .hero-bg, + .cta-section, + .site-footer { + display: none !important; + } + + body { + background: white; + color: black; + font-size: 12pt; + } + + .docs-content { + max-width: 100%; + padding: 0; + } + + .docs-content pre { + background: #f5f5f5; + border: 1px solid #ddd; + color: #333; + } + + .docs-content pre code { + color: #333; + } + + .docs-content a { + color: #000; + text-decoration: underline; + } + + .docs-content a::after { + content: " (" attr(href) ")"; + font-size: 0.85em; + color: #666; + } +} + +/* -------------------------------------------------------------------------- + 21. Focus Styles (accessibility) + -------------------------------------------------------------------------- */ + +:focus-visible { + outline: 2px solid var(--accent); + outline-offset: 2px; +} + +.docs-page :focus-visible { + outline-color: var(--docs-accent); +} + +.btn-primary:focus-visible, +.btn-secondary:focus-visible, +.nav-cta:focus-visible { + outline-offset: 3px; +} + +/* -------------------------------------------------------------------------- + 22. Dark docs-page code syntax highlighting helpers + -------------------------------------------------------------------------- */ + +.docs-content pre .token-keyword { color: #C084FC; } +.docs-content pre .token-string { color: #7DD3FC; } +.docs-content pre .token-comment { color: #64748B; font-style: italic; } +.docs-content pre .token-function { color: #2A9D8F; } +.docs-content pre .token-number { color: #FB923C; } +.docs-content pre .token-operator { color: #94A3B8; } +.docs-content pre .token-type { color: #8B1A1A; } +.docs-content pre .token-variable { color: #F1F5F9; } + +/* -------------------------------------------------------------------------- + 23. Additional Section Variants + -------------------------------------------------------------------------- */ + +/* Alternating background for visual rhythm */ +.section--alt { + background: var(--bg-surface); +} + +/* Stats / metrics row */ +.stats-row { + display: flex; + gap: 48px; + justify-content: center; + padding: 40px 0; + flex-wrap: wrap; +} + +.stat-item { + text-align: center; +} + +.stat-value { + font-family: "Outfit", sans-serif; + font-weight: 800; + font-size: 2.5rem; + color: var(--accent); + letter-spacing: -0.02em; + line-height: 1; + margin-bottom: 4px; +} + +.stat-label { + font-size: 14px; + color: var(--text-muted); + font-weight: 500; +} + +/* Testimonial / quote card */ +.quote-card { + background: var(--bg-surface); + border: 1px solid var(--border); + border-radius: 16px; + padding: 40px; + max-width: 640px; + margin: 0 auto; + position: relative; +} + +.quote-card::before { + content: "\201C"; + font-family: "Outfit", sans-serif; + font-size: 4rem; + color: var(--accent); + opacity: 0.3; + position: absolute; + top: 16px; + left: 28px; + line-height: 1; +} + +.quote-text { + font-size: 1.1rem; + line-height: 1.7; + color: var(--text-secondary); + font-style: italic; + margin-bottom: 16px; +} + +.quote-author { + font-family: "Outfit", sans-serif; + font-weight: 600; + font-size: 14px; + color: var(--text-primary); +} + +.quote-role { + font-size: 13px; + color: var(--text-muted); +} + +/* -------------------------------------------------------------------------- + 24. Reduced Motion + -------------------------------------------------------------------------- */ + +@media (prefers-reduced-motion: reduce) { + *, + *::before, + *::after { + animation-duration: 0.01ms !important; + animation-iteration-count: 1 !important; + transition-duration: 0.01ms !important; + } + + html { + scroll-behavior: auto; + } + + .reveal { + opacity: 1; + transform: none; + } + + .reveal-stagger > * { + opacity: 1; + transform: none; + } +} diff --git a/frontend/.gitignore b/frontend/.gitignore new file mode 100644 index 0000000..9dc3dd1 --- /dev/null +++ b/frontend/.gitignore @@ -0,0 +1,32 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? + +# Playwright +tests/e2e/.auth/ +test-results/ +playwright-report/ + +# Dev HTTPS certs (self-signed, regenerate with openssl) +certs/ diff --git a/frontend/README.md b/frontend/README.md new file mode 100644 index 0000000..d2e7761 --- /dev/null +++ b/frontend/README.md @@ -0,0 +1,73 @@ +# React + TypeScript + Vite + +This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules. + +Currently, two official plugins are available: + +- [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react) uses [Babel](https://babeljs.io/) (or [oxc](https://oxc.rs) when used in [rolldown-vite](https://vite.dev/guide/rolldown)) for Fast Refresh +- [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh + +## React Compiler + +The React Compiler is not enabled on this template because of its impact on dev & build performances. To add it, see [this documentation](https://react.dev/learn/react-compiler/installation). + +## Expanding the ESLint configuration + +If you are developing a production application, we recommend updating the configuration to enable type-aware lint rules: + +```js +export default defineConfig([ + globalIgnores(['dist']), + { + files: ['**/*.{ts,tsx}'], + extends: [ + // Other configs... + + // Remove tseslint.configs.recommended and replace with this + tseslint.configs.recommendedTypeChecked, + // Alternatively, use this for stricter rules + tseslint.configs.strictTypeChecked, + // Optionally, add this for stylistic rules + tseslint.configs.stylisticTypeChecked, + + // Other configs... + ], + languageOptions: { + parserOptions: { + project: ['./tsconfig.node.json', './tsconfig.app.json'], + tsconfigRootDir: import.meta.dirname, + }, + // other options... + }, + }, +]) +``` + +You can also install [eslint-plugin-react-x](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-x) and [eslint-plugin-react-dom](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-dom) for React-specific lint rules: + +```js +// eslint.config.js +import reactX from 'eslint-plugin-react-x' +import reactDom from 'eslint-plugin-react-dom' + +export default defineConfig([ + globalIgnores(['dist']), + { + files: ['**/*.{ts,tsx}'], + extends: [ + // Other configs... + // Enable lint rules for React + reactX.configs['recommended-typescript'], + // Enable lint rules for React DOM + reactDom.configs.recommended, + ], + languageOptions: { + parserOptions: { + project: ['./tsconfig.node.json', './tsconfig.app.json'], + tsconfigRootDir: import.meta.dirname, + }, + // other options... + }, + }, +]) +``` diff --git a/frontend/eslint.config.js b/frontend/eslint.config.js new file mode 100644 index 0000000..5e6b472 --- /dev/null +++ b/frontend/eslint.config.js @@ -0,0 +1,23 @@ +import js from '@eslint/js' +import globals from 'globals' +import reactHooks from 'eslint-plugin-react-hooks' +import reactRefresh from 'eslint-plugin-react-refresh' +import tseslint from 'typescript-eslint' +import { defineConfig, globalIgnores } from 'eslint/config' + +export default defineConfig([ + globalIgnores(['dist']), + { + files: ['**/*.{ts,tsx}'], + extends: [ + js.configs.recommended, + tseslint.configs.recommended, + reactHooks.configs.flat.recommended, + reactRefresh.configs.vite, + ], + languageOptions: { + ecmaVersion: 2020, + globals: globals.browser, + }, + }, +]) diff --git a/frontend/index.html b/frontend/index.html new file mode 100644 index 0000000..af223c4 --- /dev/null +++ b/frontend/index.html @@ -0,0 +1,14 @@ + + + + + + + + TOD - The Other Dude + + +
+ + + diff --git a/frontend/package-lock.json b/frontend/package-lock.json new file mode 100644 index 0000000..a0de7c1 --- /dev/null +++ b/frontend/package-lock.json @@ -0,0 +1,9521 @@ +{ + "name": "frontend", + "version": "9.0.1", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "frontend", + "version": "9.0.1", + "dependencies": { + "@dagrejs/dagre": "^2.0.4", + "@git-diff-view/lowlight": "^0.0.39", + "@git-diff-view/react": "^0.0.39", + "@radix-ui/react-avatar": "^1.1.11", + "@radix-ui/react-checkbox": "^1.3.3", + "@radix-ui/react-dialog": "^1.1.15", + "@radix-ui/react-dropdown-menu": "^2.1.16", + "@radix-ui/react-label": "^2.1.8", + "@radix-ui/react-popover": "^1.1.15", + "@radix-ui/react-select": "^2.2.6", + "@radix-ui/react-separator": "^1.1.8", + "@radix-ui/react-slot": "^1.2.4", + "@radix-ui/react-tabs": "^1.1.13", + "@radix-ui/react-toast": "^1.2.15", + "@tanstack/react-query": "^5.90.21", + "@tanstack/react-router": "^1.161.3", + "@tanstack/react-router-devtools": "^1.161.3", + "@tanstack/react-virtual": "^3.13.19", + "@tanstack/router-plugin": "^1.161.3", + "@zxcvbn-ts/core": "^3.0.4", + "@zxcvbn-ts/language-common": "^3.0.4", + "@zxcvbn-ts/language-en": "^3.0.2", + "axios": "^1.13.5", + "class-variance-authority": "^0.7.1", + "clsx": "^2.1.1", + "cmdk": "^1.1.1", + "diff": "^8.0.3", + "framer-motion": "^12.34.3", + "geist": "^1.7.0", + "leaflet": "^1.9.4", + "lucide-react": "^0.575.0", + "react": "^19.2.0", + "react-dom": "^19.2.0", + "react-leaflet": "^5.0.0", + "react-leaflet-cluster": "^4.0.0", + "reactflow": "^11.11.4", + "recharts": "^3.7.0", + "sonner": "^2.0.7", + "tailwind-merge": "^3.5.0", + "tailwindcss": "^3.4.19", + "zod": "^4.3.6", + "zustand": "^5.0.11" + }, + "devDependencies": { + "@eslint/js": "^9.39.1", + "@playwright/test": "^1.58.2", + "@testing-library/jest-dom": "^6.9.1", + "@testing-library/react": "^16.3.2", + "@testing-library/user-event": "^14.6.1", + "@types/diff": "^7.0.2", + "@types/leaflet": "^1.9.21", + "@types/node": "^24.10.1", + "@types/react": "^19.2.7", + "@types/react-dom": "^19.2.3", + "@vitejs/plugin-react": "^5.1.1", + "autoprefixer": "^10.4.24", + "eslint": "^9.39.1", + "eslint-plugin-react-hooks": "^7.0.1", + "eslint-plugin-react-refresh": "^0.4.24", + "globals": "^16.5.0", + "jsdom": "^28.1.0", + "postcss": "^8.5.6", + "typescript": "~5.9.3", + "typescript-eslint": "^8.48.0", + "vite": "^7.3.1", + "vite-plugin-sri3": "^1.3.0", + "vitest": "^4.0.18" + } + }, + "node_modules/@acemir/cssom": { + "version": "0.9.31", + "resolved": "https://registry.npmjs.org/@acemir/cssom/-/cssom-0.9.31.tgz", + "integrity": "sha512-ZnR3GSaH+/vJ0YlHau21FjfLYjMpYVIzTD8M8vIEQvIGxeOXyXdzCI140rrCY862p/C/BbzWsjc1dgnM9mkoTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@adobe/css-tools": { + "version": "4.4.4", + "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.4.4.tgz", + "integrity": "sha512-Elp+iwUx5rN5+Y8xLt5/GRoG20WGoDCQ/1Fb+1LiGtvwbDavuSk0jhD/eZdckHAuzcDzccnkv+rEjyWfRx18gg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", + "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@asamuzakjp/css-color": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-5.0.1.tgz", + "integrity": "sha512-2SZFvqMyvboVV1d15lMf7XiI3m7SDqXUuKaTymJYLN6dSGadqp+fVojqJlVoMlbZnlTmu3S0TLwLTJpvBMO1Aw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@csstools/css-calc": "^3.1.1", + "@csstools/css-color-parser": "^4.0.2", + "@csstools/css-parser-algorithms": "^4.0.0", + "@csstools/css-tokenizer": "^4.0.0", + "lru-cache": "^11.2.6" + }, + "engines": { + "node": "^20.19.0 || ^22.12.0 || >=24.0.0" + } + }, + "node_modules/@asamuzakjp/css-color/node_modules/lru-cache": { + "version": "11.2.6", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.6.tgz", + "integrity": "sha512-ESL2CrkS/2wTPfuend7Zhkzo2u0daGJ/A2VucJOgQ/C48S/zB8MMeMHSGKYpXhIjbPxfuezITkaBH1wqv00DDQ==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/@asamuzakjp/dom-selector": { + "version": "6.8.1", + "resolved": "https://registry.npmjs.org/@asamuzakjp/dom-selector/-/dom-selector-6.8.1.tgz", + "integrity": "sha512-MvRz1nCqW0fsy8Qz4dnLIvhOlMzqDVBabZx6lH+YywFDdjXhMY37SmpV1XFX3JzG5GWHn63j6HX6QPr3lZXHvQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@asamuzakjp/nwsapi": "^2.3.9", + "bidi-js": "^1.0.3", + "css-tree": "^3.1.0", + "is-potential-custom-element-name": "^1.0.1", + "lru-cache": "^11.2.6" + } + }, + "node_modules/@asamuzakjp/dom-selector/node_modules/lru-cache": { + "version": "11.2.6", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.6.tgz", + "integrity": "sha512-ESL2CrkS/2wTPfuend7Zhkzo2u0daGJ/A2VucJOgQ/C48S/zB8MMeMHSGKYpXhIjbPxfuezITkaBH1wqv00DDQ==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/@asamuzakjp/nwsapi": { + "version": "2.3.9", + "resolved": "https://registry.npmjs.org/@asamuzakjp/nwsapi/-/nwsapi-2.3.9.tgz", + "integrity": "sha512-n8GuYSrI9bF7FFZ/SjhwevlHc8xaVlb/7HmHelnc/PZXBD2ZR49NnN9sMMuDdEGPeeRQ5d0hqlSlEpgCX3Wl0Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/@babel/code-frame": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.29.0.tgz", + "integrity": "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==", + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.28.5", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.29.0.tgz", + "integrity": "sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.29.0.tgz", + "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-compilation-targets": "^7.28.6", + "@babel/helper-module-transforms": "^7.28.6", + "@babel/helpers": "^7.28.6", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/traverse": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/generator": { + "version": "7.29.1", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.29.1.tgz", + "integrity": "sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw==", + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.28.6.tgz", + "integrity": "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA==", + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.28.6", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.28.6.tgz", + "integrity": "sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw==", + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.6.tgz", + "integrity": "sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA==", + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.28.6", + "@babel/helper-validator-identifier": "^7.28.5", + "@babel/traverse": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.28.6.tgz", + "integrity": "sha512-S9gzZ/bz83GRysI7gAD4wPT/AI3uCnY+9xn+Mx/KPs2JwHJIz1W8PZkg2cqyt3RNOBM8ejcXhV6y8Og7ly/Dug==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.6.tgz", + "integrity": "sha512-xOBvwq86HHdB7WUDTfKfT/Vuxh7gElQ+Sfti2Cy6yIWNW05P8iUslOVcZ4/sKbE+/jQaukQAdz/gf3724kYdqw==", + "license": "MIT", + "dependencies": { + "@babel/template": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.29.0.tgz", + "integrity": "sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww==", + "license": "MIT", + "dependencies": { + "@babel/types": "^7.29.0" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.28.6.tgz", + "integrity": "sha512-wgEmr06G6sIpqr8YDwA2dSRTE3bJ+V0IfpzfSY3Lfgd7YWOaAdlykvJi13ZKBt8cZHfgH1IXN+CL656W3uUa4w==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.28.6.tgz", + "integrity": "sha512-+nDNmQye7nlnuuHDboPbGm00Vqg3oO8niRRL27/4LYHUsHYh0zJ1xWOz0uRwNFmM1Avzk8wZbc6rdiYhomzv/A==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-self": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz", + "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-source": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz", + "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.6.tgz", + "integrity": "sha512-05WQkdpL9COIMz4LjTxGpPNCdlpyimKppYNoJ5Di5EUObifl8t4tuLuUBBZEpoLYOmfvIWrsp9fCl0HoPRVTdA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.28.6.tgz", + "integrity": "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.28.6", + "@babel/parser": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.29.0.tgz", + "integrity": "sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/types": "^7.29.0", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.29.0.tgz", + "integrity": "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==", + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@bramus/specificity": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/@bramus/specificity/-/specificity-2.4.2.tgz", + "integrity": "sha512-ctxtJ/eA+t+6q2++vj5j7FYX3nRu311q1wfYH3xjlLOsczhlhxAg2FWNUXhpGvAw3BWo1xBcvOV6/YLc2r5FJw==", + "dev": true, + "license": "MIT", + "dependencies": { + "css-tree": "^3.0.0" + }, + "bin": { + "specificity": "bin/cli.js" + } + }, + "node_modules/@csstools/color-helpers": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-6.0.2.tgz", + "integrity": "sha512-LMGQLS9EuADloEFkcTBR3BwV/CGHV7zyDxVRtVDTwdI2Ca4it0CCVTT9wCkxSgokjE5Ho41hEPgb8OEUwoXr6Q==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=20.19.0" + } + }, + "node_modules/@csstools/css-calc": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@csstools/css-calc/-/css-calc-3.1.1.tgz", + "integrity": "sha512-HJ26Z/vmsZQqs/o3a6bgKslXGFAungXGbinULZO3eMsOyNJHeBBZfup5FiZInOghgoM4Hwnmw+OgbJCNg1wwUQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=20.19.0" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^4.0.0", + "@csstools/css-tokenizer": "^4.0.0" + } + }, + "node_modules/@csstools/css-color-parser": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@csstools/css-color-parser/-/css-color-parser-4.0.2.tgz", + "integrity": "sha512-0GEfbBLmTFf0dJlpsNU7zwxRIH0/BGEMuXLTCvFYxuL1tNhqzTbtnFICyJLTNK4a+RechKP75e7w42ClXSnJQw==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "@csstools/color-helpers": "^6.0.2", + "@csstools/css-calc": "^3.1.1" + }, + "engines": { + "node": ">=20.19.0" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^4.0.0", + "@csstools/css-tokenizer": "^4.0.0" + } + }, + "node_modules/@csstools/css-parser-algorithms": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@csstools/css-parser-algorithms/-/css-parser-algorithms-4.0.0.tgz", + "integrity": "sha512-+B87qS7fIG3L5h3qwJ/IFbjoVoOe/bpOdh9hAjXbvx0o8ImEmUsGXN0inFOnk2ChCFgqkkGFQ+TpM5rbhkKe4w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=20.19.0" + }, + "peerDependencies": { + "@csstools/css-tokenizer": "^4.0.0" + } + }, + "node_modules/@csstools/css-syntax-patches-for-csstree": { + "version": "1.0.28", + "resolved": "https://registry.npmjs.org/@csstools/css-syntax-patches-for-csstree/-/css-syntax-patches-for-csstree-1.0.28.tgz", + "integrity": "sha512-1NRf1CUBjnr3K7hu8BLxjQrKCxEe8FP/xmPTenAxCRZWVLbmGotkFvG9mfNpjA6k7Bw1bw4BilZq9cu19RA5pg==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0" + }, + "node_modules/@csstools/css-tokenizer": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@csstools/css-tokenizer/-/css-tokenizer-4.0.0.tgz", + "integrity": "sha512-QxULHAm7cNu72w97JUNCBFODFaXpbDg+dP8b/oWFAZ2MTRppA3U00Y2L1HqaS4J6yBqxwa/Y3nMBaxVKbB/NsA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=20.19.0" + } + }, + "node_modules/@dagrejs/dagre": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@dagrejs/dagre/-/dagre-2.0.4.tgz", + "integrity": "sha512-J6vCWTNpicHF4zFlZG1cS5DkGzMr9941gddYkakjrg3ZNev4bbqEgLHFTWiFrcJm7UCRu7olO3K6IRDd9gSGhA==", + "license": "MIT", + "dependencies": { + "@dagrejs/graphlib": "3.0.4" + } + }, + "node_modules/@dagrejs/graphlib": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@dagrejs/graphlib/-/graphlib-3.0.4.tgz", + "integrity": "sha512-HxZ7fCvAwTLCWCO0WjDkzAFQze8LdC6iOpKbetDKHIuDfIgMlIzYzqZ4nxwLlclQX+3ZVeZ1K2OuaOE2WWcyOg==", + "license": "MIT" + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.3.tgz", + "integrity": "sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.3.tgz", + "integrity": "sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.3.tgz", + "integrity": "sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.3.tgz", + "integrity": "sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.3.tgz", + "integrity": "sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.3.tgz", + "integrity": "sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.3.tgz", + "integrity": "sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.3.tgz", + "integrity": "sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.3.tgz", + "integrity": "sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.3.tgz", + "integrity": "sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.3.tgz", + "integrity": "sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.3.tgz", + "integrity": "sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA==", + "cpu": [ + "loong64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.3.tgz", + "integrity": "sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw==", + "cpu": [ + "mips64el" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.3.tgz", + "integrity": "sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.3.tgz", + "integrity": "sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ==", + "cpu": [ + "riscv64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.3.tgz", + "integrity": "sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw==", + "cpu": [ + "s390x" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.3.tgz", + "integrity": "sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.3.tgz", + "integrity": "sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.3.tgz", + "integrity": "sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.3.tgz", + "integrity": "sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.3.tgz", + "integrity": "sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.3.tgz", + "integrity": "sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.3.tgz", + "integrity": "sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.3.tgz", + "integrity": "sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.3.tgz", + "integrity": "sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.3.tgz", + "integrity": "sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz", + "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/eslint-utils/node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.2", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz", + "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/config-array": { + "version": "0.21.1", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.1.tgz", + "integrity": "sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/object-schema": "^2.1.7", + "debug": "^4.3.1", + "minimatch": "^3.1.2" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/config-helpers": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.4.2.tgz", + "integrity": "sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.17.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/core": { + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.17.0.tgz", + "integrity": "sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@types/json-schema": "^7.0.15" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "3.3.4", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.4.tgz", + "integrity": "sha512-4h4MVF8pmBsncB60r0wSJiIeUKTSD4m7FmTFThG8RHlsg9ajqckLm9OraguFGZE4vVdpiI1Q4+hFnisopmG6gQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^6.14.0", + "debug": "^4.3.2", + "espree": "^10.0.1", + "globals": "^14.0.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.1", + "minimatch": "^3.1.3", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/globals": { + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", + "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@eslint/js": { + "version": "9.39.3", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.39.3.tgz", + "integrity": "sha512-1B1VkCq6FuUNlQvlBYb+1jDu/gV297TIs/OeiaSR9l1H27SVW55ONE1e1Vp16NqP683+xEGzxYtv4XCiDPaQiw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + } + }, + "node_modules/@eslint/object-schema": { + "version": "2.1.7", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.7.tgz", + "integrity": "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/plugin-kit": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.4.1.tgz", + "integrity": "sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.17.0", + "levn": "^0.4.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@exodus/bytes": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@exodus/bytes/-/bytes-1.14.1.tgz", + "integrity": "sha512-OhkBFWI6GcRMUroChZiopRiSp2iAMvEBK47NhJooDqz1RERO4QuZIZnjP63TXX8GAiLABkYmX+fuQsdJ1dd2QQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^20.19.0 || ^22.12.0 || >=24.0.0" + }, + "peerDependencies": { + "@noble/hashes": "^1.8.0 || ^2.0.0" + }, + "peerDependenciesMeta": { + "@noble/hashes": { + "optional": true + } + } + }, + "node_modules/@floating-ui/core": { + "version": "1.7.4", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.4.tgz", + "integrity": "sha512-C3HlIdsBxszvm5McXlB8PeOEWfBhcGBTZGkGlWc2U0KFY5IwG5OQEuQ8rq52DZmcHDlPLd+YFBK+cZcytwIFWg==", + "license": "MIT", + "dependencies": { + "@floating-ui/utils": "^0.2.10" + } + }, + "node_modules/@floating-ui/dom": { + "version": "1.7.5", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.7.5.tgz", + "integrity": "sha512-N0bD2kIPInNHUHehXhMke1rBGs1dwqvC9O9KYMyyjK7iXt7GAhnro7UlcuYcGdS/yYOlq0MAVgrow8IbWJwyqg==", + "license": "MIT", + "dependencies": { + "@floating-ui/core": "^1.7.4", + "@floating-ui/utils": "^0.2.10" + } + }, + "node_modules/@floating-ui/react-dom": { + "version": "2.1.7", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.7.tgz", + "integrity": "sha512-0tLRojf/1Go2JgEVm+3Frg9A3IW8bJgKgdO0BN5RkF//ufuz2joZM63Npau2ff3J6lUVYgDSNzNkR+aH3IVfjg==", + "license": "MIT", + "dependencies": { + "@floating-ui/dom": "^1.7.5" + }, + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, + "node_modules/@floating-ui/utils": { + "version": "0.2.10", + "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.10.tgz", + "integrity": "sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ==", + "license": "MIT" + }, + "node_modules/@git-diff-view/core": { + "version": "0.0.39", + "resolved": "https://registry.npmjs.org/@git-diff-view/core/-/core-0.0.39.tgz", + "integrity": "sha512-GJGsti+R8XV11XFWVziXiSgZ8T26pcb1/7H/e5PLSByG7JKeDU9O9JPvjvSShQokj/5Zp5kXvtNM+tgCtrRrYQ==", + "license": "MIT", + "dependencies": { + "@git-diff-view/lowlight": "^0.0.39", + "fast-diff": "^1.3.0", + "highlight.js": "^11.11.0", + "lowlight": "^3.3.0" + } + }, + "node_modules/@git-diff-view/lowlight": { + "version": "0.0.39", + "resolved": "https://registry.npmjs.org/@git-diff-view/lowlight/-/lowlight-0.0.39.tgz", + "integrity": "sha512-S2hL5YsIl5Ao2JGeV95OswFjDnM3HRUZRlF4etVw/dbTmI27/Qp5Bnymb0cdx50ZLq8dV+BuxaeRu7w7jN8NHg==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "highlight.js": "^11.11.0", + "lowlight": "^3.3.0" + } + }, + "node_modules/@git-diff-view/react": { + "version": "0.0.39", + "resolved": "https://registry.npmjs.org/@git-diff-view/react/-/react-0.0.39.tgz", + "integrity": "sha512-p4MJOn0RMTrcLbzUYU90n5Ddsnf6wH9BM26uyPlULk8EfnCw5wHOoGlGa8zTrgl8L7ArOtmFauuhmch2d78V0w==", + "license": "MIT", + "dependencies": { + "@git-diff-view/core": "^0.0.39", + "@types/hast": "^3.0.0", + "fast-diff": "^1.3.0", + "highlight.js": "^11.11.0", + "lowlight": "^3.3.0", + "reactivity-store": "^0.3.12", + "use-sync-external-store": "^1.6.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@humanfs/core": { + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", + "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node": { + "version": "0.16.7", + "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.7.tgz", + "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@humanfs/core": "^0.19.1", + "@humanwhocodes/retry": "^0.4.0" + }, + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/retry": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz", + "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@next/env": { + "version": "16.1.6", + "resolved": "https://registry.npmjs.org/@next/env/-/env-16.1.6.tgz", + "integrity": "sha512-N1ySLuZjnAtN3kFnwhAwPvZah8RJxKasD7x1f8shFqhncnWZn4JMfg37diLNuoHsLAlrDfM3g4mawVdtAG8XLQ==", + "license": "MIT", + "peer": true + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@playwright/test": { + "version": "1.58.2", + "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.58.2.tgz", + "integrity": "sha512-akea+6bHYBBfA9uQqSYmlJXn61cTa+jbO87xVLCWbTqbWadRVmhxlXATaOjOgcBaWU4ePo0wB41KMFv3o35IXA==", + "devOptional": true, + "license": "Apache-2.0", + "dependencies": { + "playwright": "1.58.2" + }, + "bin": { + "playwright": "cli.js" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@radix-ui/number": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/number/-/number-1.1.1.tgz", + "integrity": "sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g==", + "license": "MIT" + }, + "node_modules/@radix-ui/primitive": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz", + "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", + "license": "MIT" + }, + "node_modules/@radix-ui/react-arrow": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.7.tgz", + "integrity": "sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-arrow/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-arrow/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-avatar": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-avatar/-/react-avatar-1.1.11.tgz", + "integrity": "sha512-0Qk603AHGV28BOBO34p7IgD5m+V5Sg/YovfayABkoDDBM5d3NCx0Mp4gGrjzLGes1jV5eNOE1r3itqOR33VC6Q==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-context": "1.1.3", + "@radix-ui/react-primitive": "2.1.4", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-is-hydrated": "0.1.0", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-checkbox": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-checkbox/-/react-checkbox-1.3.3.tgz", + "integrity": "sha512-wBbpv+NQftHDdG86Qc0pIyXk5IR3tM8Vd0nWLKDcX8nNn4nXFOFwsKuqw2okA/1D/mpaAkmuyndrPJTYDNZtFw==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-use-size": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-checkbox/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-checkbox/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-checkbox/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collection": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.7.tgz", + "integrity": "sha512-Fh9rGN0MoI4ZFUNyfFVNU4y9LUz93u9/0K+yLgA2bwRojxM8JU1DyvvMBabnZPBgMWREAJvU2jjVzq+LrFUglw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collection/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collection/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collection/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-compose-refs": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz", + "integrity": "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-context": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.3.tgz", + "integrity": "sha512-ieIFACdMpYfMEjF0rEf5KLvfVyIkOz6PDGyNnP+u+4xQ6jny3VCgA4OgXOwNx2aUkxn8zx9fiVcM8CfFYv9Lxw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.15.tgz", + "integrity": "sha512-TCglVRtzlffRNxRMEyR36DGBLJpeusFcgMVD9PZEzAKnUs1lKCgX5u9BmC2Yg+LL9MgZDugFFs1Vl+Jp4t/PGw==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-direction": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-direction/-/react-direction-1.1.1.tgz", + "integrity": "sha512-1UEWRX6jnOA2y4H5WczZ44gOOjTEmlqv1uNW4GAJEO5+bauCBhv8snY65Iw5/VOS/ghKN9gr2KjnLKxrsvoMVw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dismissable-layer": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.11.tgz", + "integrity": "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-escape-keydown": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dismissable-layer/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dismissable-layer/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dropdown-menu": { + "version": "2.1.16", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dropdown-menu/-/react-dropdown-menu-2.1.16.tgz", + "integrity": "sha512-1PLGQEynI/3OX/ftV54COn+3Sud/Mn8vALg2rWnBLnRaGtJDduNW/22XjlGgPdpcIbiQxjKtb7BkcjP00nqfJw==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-menu": "2.1.16", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dropdown-menu/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dropdown-menu/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dropdown-menu/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-guards": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.3.tgz", + "integrity": "sha512-0rFg/Rj2Q62NCm62jZw0QX7a3sz6QCQU0LpZdNrJX8byRGaGVTqbrW9jAoIAHyMQqsNpeZ81YgSizOt5WXq0Pw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-scope": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.7.tgz", + "integrity": "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-scope/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-scope/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-id": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.1.tgz", + "integrity": "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-label": { + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-label/-/react-label-2.1.8.tgz", + "integrity": "sha512-FmXs37I6hSBVDlO4y764TNz1rLgKwjJMQ0EGte6F3Cb3f4bIuHB/iLa/8I9VKkmOy+gNHq8rql3j686ACVV21A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.4" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-menu": { + "version": "2.1.16", + "resolved": "https://registry.npmjs.org/@radix-ui/react-menu/-/react-menu-2.1.16.tgz", + "integrity": "sha512-72F2T+PLlphrqLcAotYPp0uJMr5SjP5SL01wfEspJbru5Zs5vQaSHb4VB3ZMJPimgHHCHG7gMOeOB9H3Hdmtxg==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popover": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popover/-/react-popover-1.1.15.tgz", + "integrity": "sha512-kr0X2+6Yy/vJzLYJUPCZEc8SfQcf+1COFoAqauJm74umQhta9M7lNJHP7QQS3vkvcGLQUbWpMzwrXYwrYztHKA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.8.tgz", + "integrity": "sha512-0NJQ4LFFUuWkE7Oxf0htBKS6zLkkjBH+hM1uk7Ng705ReR8m/uelduy1DBo0PyBXPKVnBA6YBlU94MBGXrSBCw==", + "license": "MIT", + "dependencies": { + "@floating-ui/react-dom": "^2.0.0", + "@radix-ui/react-arrow": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-rect": "1.1.1", + "@radix-ui/react-use-size": "1.1.1", + "@radix-ui/rect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-portal": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz", + "integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-portal/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-portal/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-presence": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz", + "integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-primitive": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.4.tgz", + "integrity": "sha512-9hQc4+GNVtJAIEPEqlYqW5RiYdrr8ea5XQ0ZOnD6fgru+83kqT15mq2OCcbe8KnjRZl5vF3ks69AKz3kh1jrhg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.4" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-roving-focus": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-roving-focus/-/react-roving-focus-1.1.11.tgz", + "integrity": "sha512-7A6S9jSgm/S+7MdtNDSb+IU859vQqJ/QAtcYQcfFC6W8RS4IxIZDldLR0xqCFZ6DCyrQLjLPsxtTNch5jVA4lA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-roving-focus/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-roving-focus/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-roving-focus/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select": { + "version": "2.2.6", + "resolved": "https://registry.npmjs.org/@radix-ui/react-select/-/react-select-2.2.6.tgz", + "integrity": "sha512-I30RydO+bnn2PQztvo25tswPH+wFBjehVGtmagkU78yMdwTwVf12wnAOF+AeP8S2N8xD+5UPbGhkUfPyvT+mwQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/number": "1.1.1", + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-visually-hidden": "1.2.3", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-separator": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.8.tgz", + "integrity": "sha512-sDvqVY4itsKwwSMEe0jtKgfTh+72Sy3gPmQpjqcQneqQ4PFmr/1I0YA+2/puilhggCe2gJcx5EBAYFkWkdpa5g==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.4" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slot": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.4.tgz", + "integrity": "sha512-Jl+bCv8HxKnlTLVrcDE8zTMJ09R9/ukw4qBs/oZClOfoQk/cOTbDn+NceXfV7j09YPVQUryJPHurafcSg6EVKA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tabs": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tabs/-/react-tabs-1.1.13.tgz", + "integrity": "sha512-7xdcatg7/U+7+Udyoj2zodtI9H/IIopqo+YOIcZOq1nJwXWBZ9p8xiu5llXlekDbZkca79a/fozEYQXIA4sW6A==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-use-controllable-state": "1.2.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tabs/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tabs/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tabs/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-toast": { + "version": "1.2.15", + "resolved": "https://registry.npmjs.org/@radix-ui/react-toast/-/react-toast-1.2.15.tgz", + "integrity": "sha512-3OSz3TacUWy4WtOXV38DggwxoqJK4+eDkNMl5Z/MJZaoUPaP4/9lf81xXMe1I2ReTAptverZUpbPY4wWwWyL5g==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-visually-hidden": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-toast/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-toast/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-toast/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-callback-ref": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz", + "integrity": "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-controllable-state": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.2.2.tgz", + "integrity": "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-effect-event": "0.0.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-effect-event": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-effect-event/-/react-use-effect-event-0.0.2.tgz", + "integrity": "sha512-Qp8WbZOBe+blgpuUT+lw2xheLP8q0oatc9UpmiemEICxGvFLYmHm9QowVZGHtJlGbS6A6yJ3iViad/2cVjnOiA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-escape-keydown": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.1.tgz", + "integrity": "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-callback-ref": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-is-hydrated": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-is-hydrated/-/react-use-is-hydrated-0.1.0.tgz", + "integrity": "sha512-U+UORVEq+cTnRIaostJv9AGdV3G6Y+zbVd+12e18jQ5A3c0xL03IhnHuiU4UV69wolOQp5GfR58NW/EgdQhwOA==", + "license": "MIT", + "dependencies": { + "use-sync-external-store": "^1.5.0" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz", + "integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-previous": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-previous/-/react-use-previous-1.1.1.tgz", + "integrity": "sha512-2dHfToCj/pzca2Ck724OZ5L0EVrr3eHRNsG/b3xQJLA2hZpVCS99bLAX+hm1IHXDEnzU6by5z/5MIY794/a8NQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-rect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.1.1.tgz", + "integrity": "sha512-QTYuDesS0VtuHNNvMh+CjlKJ4LJickCMUAqjlE3+j8w+RlRpwyX3apEQKGFzbZGdo7XNG1tXa+bQqIE7HIXT2w==", + "license": "MIT", + "dependencies": { + "@radix-ui/rect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-size": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.1.1.tgz", + "integrity": "sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-visually-hidden": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.2.3.tgz", + "integrity": "sha512-pzJq12tEaaIhqjbzpCuv/OypJY/BPavOofm+dbab+MHLajy277+1lLm6JFcGgF5eskJ6mquGirhXY2GD/8u8Ug==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-visually-hidden/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-visually-hidden/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/rect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.1.1.tgz", + "integrity": "sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw==", + "license": "MIT" + }, + "node_modules/@react-leaflet/core": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@react-leaflet/core/-/core-3.0.0.tgz", + "integrity": "sha512-3EWmekh4Nz+pGcr+xjf0KNyYfC3U2JjnkWsh0zcqaexYqmmB5ZhH37kz41JXGmKzpaMZCnPofBBm64i+YrEvGQ==", + "license": "Hippocratic-2.1", + "peerDependencies": { + "leaflet": "^1.9.0", + "react": "^19.0.0", + "react-dom": "^19.0.0" + } + }, + "node_modules/@reactflow/background": { + "version": "11.3.14", + "resolved": "https://registry.npmjs.org/@reactflow/background/-/background-11.3.14.tgz", + "integrity": "sha512-Gewd7blEVT5Lh6jqrvOgd4G6Qk17eGKQfsDXgyRSqM+CTwDqRldG2LsWN4sNeno6sbqVIC2fZ+rAUBFA9ZEUDA==", + "license": "MIT", + "dependencies": { + "@reactflow/core": "11.11.4", + "classcat": "^5.0.3", + "zustand": "^4.4.1" + }, + "peerDependencies": { + "react": ">=17", + "react-dom": ">=17" + } + }, + "node_modules/@reactflow/background/node_modules/zustand": { + "version": "4.5.7", + "resolved": "https://registry.npmjs.org/zustand/-/zustand-4.5.7.tgz", + "integrity": "sha512-CHOUy7mu3lbD6o6LJLfllpjkzhHXSBlX8B9+qPddUsIfeF5S/UZ5q0kmCsnRqT1UHFQZchNFDDzMbQsuesHWlw==", + "license": "MIT", + "dependencies": { + "use-sync-external-store": "^1.2.2" + }, + "engines": { + "node": ">=12.7.0" + }, + "peerDependencies": { + "@types/react": ">=16.8", + "immer": ">=9.0.6", + "react": ">=16.8" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "immer": { + "optional": true + }, + "react": { + "optional": true + } + } + }, + "node_modules/@reactflow/controls": { + "version": "11.2.14", + "resolved": "https://registry.npmjs.org/@reactflow/controls/-/controls-11.2.14.tgz", + "integrity": "sha512-MiJp5VldFD7FrqaBNIrQ85dxChrG6ivuZ+dcFhPQUwOK3HfYgX2RHdBua+gx+40p5Vw5It3dVNp/my4Z3jF0dw==", + "license": "MIT", + "dependencies": { + "@reactflow/core": "11.11.4", + "classcat": "^5.0.3", + "zustand": "^4.4.1" + }, + "peerDependencies": { + "react": ">=17", + "react-dom": ">=17" + } + }, + "node_modules/@reactflow/controls/node_modules/zustand": { + "version": "4.5.7", + "resolved": "https://registry.npmjs.org/zustand/-/zustand-4.5.7.tgz", + "integrity": "sha512-CHOUy7mu3lbD6o6LJLfllpjkzhHXSBlX8B9+qPddUsIfeF5S/UZ5q0kmCsnRqT1UHFQZchNFDDzMbQsuesHWlw==", + "license": "MIT", + "dependencies": { + "use-sync-external-store": "^1.2.2" + }, + "engines": { + "node": ">=12.7.0" + }, + "peerDependencies": { + "@types/react": ">=16.8", + "immer": ">=9.0.6", + "react": ">=16.8" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "immer": { + "optional": true + }, + "react": { + "optional": true + } + } + }, + "node_modules/@reactflow/core": { + "version": "11.11.4", + "resolved": "https://registry.npmjs.org/@reactflow/core/-/core-11.11.4.tgz", + "integrity": "sha512-H4vODklsjAq3AMq6Np4LE12i1I4Ta9PrDHuBR9GmL8uzTt2l2jh4CiQbEMpvMDcp7xi4be0hgXj+Ysodde/i7Q==", + "license": "MIT", + "dependencies": { + "@types/d3": "^7.4.0", + "@types/d3-drag": "^3.0.1", + "@types/d3-selection": "^3.0.3", + "@types/d3-zoom": "^3.0.1", + "classcat": "^5.0.3", + "d3-drag": "^3.0.0", + "d3-selection": "^3.0.0", + "d3-zoom": "^3.0.0", + "zustand": "^4.4.1" + }, + "peerDependencies": { + "react": ">=17", + "react-dom": ">=17" + } + }, + "node_modules/@reactflow/core/node_modules/zustand": { + "version": "4.5.7", + "resolved": "https://registry.npmjs.org/zustand/-/zustand-4.5.7.tgz", + "integrity": "sha512-CHOUy7mu3lbD6o6LJLfllpjkzhHXSBlX8B9+qPddUsIfeF5S/UZ5q0kmCsnRqT1UHFQZchNFDDzMbQsuesHWlw==", + "license": "MIT", + "dependencies": { + "use-sync-external-store": "^1.2.2" + }, + "engines": { + "node": ">=12.7.0" + }, + "peerDependencies": { + "@types/react": ">=16.8", + "immer": ">=9.0.6", + "react": ">=16.8" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "immer": { + "optional": true + }, + "react": { + "optional": true + } + } + }, + "node_modules/@reactflow/minimap": { + "version": "11.7.14", + "resolved": "https://registry.npmjs.org/@reactflow/minimap/-/minimap-11.7.14.tgz", + "integrity": "sha512-mpwLKKrEAofgFJdkhwR5UQ1JYWlcAAL/ZU/bctBkuNTT1yqV+y0buoNVImsRehVYhJwffSWeSHaBR5/GJjlCSQ==", + "license": "MIT", + "dependencies": { + "@reactflow/core": "11.11.4", + "@types/d3-selection": "^3.0.3", + "@types/d3-zoom": "^3.0.1", + "classcat": "^5.0.3", + "d3-selection": "^3.0.0", + "d3-zoom": "^3.0.0", + "zustand": "^4.4.1" + }, + "peerDependencies": { + "react": ">=17", + "react-dom": ">=17" + } + }, + "node_modules/@reactflow/minimap/node_modules/zustand": { + "version": "4.5.7", + "resolved": "https://registry.npmjs.org/zustand/-/zustand-4.5.7.tgz", + "integrity": "sha512-CHOUy7mu3lbD6o6LJLfllpjkzhHXSBlX8B9+qPddUsIfeF5S/UZ5q0kmCsnRqT1UHFQZchNFDDzMbQsuesHWlw==", + "license": "MIT", + "dependencies": { + "use-sync-external-store": "^1.2.2" + }, + "engines": { + "node": ">=12.7.0" + }, + "peerDependencies": { + "@types/react": ">=16.8", + "immer": ">=9.0.6", + "react": ">=16.8" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "immer": { + "optional": true + }, + "react": { + "optional": true + } + } + }, + "node_modules/@reactflow/node-resizer": { + "version": "2.2.14", + "resolved": "https://registry.npmjs.org/@reactflow/node-resizer/-/node-resizer-2.2.14.tgz", + "integrity": "sha512-fwqnks83jUlYr6OHcdFEedumWKChTHRGw/kbCxj0oqBd+ekfs+SIp4ddyNU0pdx96JIm5iNFS0oNrmEiJbbSaA==", + "license": "MIT", + "dependencies": { + "@reactflow/core": "11.11.4", + "classcat": "^5.0.4", + "d3-drag": "^3.0.0", + "d3-selection": "^3.0.0", + "zustand": "^4.4.1" + }, + "peerDependencies": { + "react": ">=17", + "react-dom": ">=17" + } + }, + "node_modules/@reactflow/node-resizer/node_modules/zustand": { + "version": "4.5.7", + "resolved": "https://registry.npmjs.org/zustand/-/zustand-4.5.7.tgz", + "integrity": "sha512-CHOUy7mu3lbD6o6LJLfllpjkzhHXSBlX8B9+qPddUsIfeF5S/UZ5q0kmCsnRqT1UHFQZchNFDDzMbQsuesHWlw==", + "license": "MIT", + "dependencies": { + "use-sync-external-store": "^1.2.2" + }, + "engines": { + "node": ">=12.7.0" + }, + "peerDependencies": { + "@types/react": ">=16.8", + "immer": ">=9.0.6", + "react": ">=16.8" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "immer": { + "optional": true + }, + "react": { + "optional": true + } + } + }, + "node_modules/@reactflow/node-toolbar": { + "version": "1.3.14", + "resolved": "https://registry.npmjs.org/@reactflow/node-toolbar/-/node-toolbar-1.3.14.tgz", + "integrity": "sha512-rbynXQnH/xFNu4P9H+hVqlEUafDCkEoCy0Dg9mG22Sg+rY/0ck6KkrAQrYrTgXusd+cEJOMK0uOOFCK2/5rSGQ==", + "license": "MIT", + "dependencies": { + "@reactflow/core": "11.11.4", + "classcat": "^5.0.3", + "zustand": "^4.4.1" + }, + "peerDependencies": { + "react": ">=17", + "react-dom": ">=17" + } + }, + "node_modules/@reactflow/node-toolbar/node_modules/zustand": { + "version": "4.5.7", + "resolved": "https://registry.npmjs.org/zustand/-/zustand-4.5.7.tgz", + "integrity": "sha512-CHOUy7mu3lbD6o6LJLfllpjkzhHXSBlX8B9+qPddUsIfeF5S/UZ5q0kmCsnRqT1UHFQZchNFDDzMbQsuesHWlw==", + "license": "MIT", + "dependencies": { + "use-sync-external-store": "^1.2.2" + }, + "engines": { + "node": ">=12.7.0" + }, + "peerDependencies": { + "@types/react": ">=16.8", + "immer": ">=9.0.6", + "react": ">=16.8" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "immer": { + "optional": true + }, + "react": { + "optional": true + } + } + }, + "node_modules/@reduxjs/toolkit": { + "version": "2.11.2", + "resolved": "https://registry.npmjs.org/@reduxjs/toolkit/-/toolkit-2.11.2.tgz", + "integrity": "sha512-Kd6kAHTA6/nUpp8mySPqj3en3dm0tdMIgbttnQ1xFMVpufoj+ADi8pXLBsd4xzTRHQa7t/Jv8W5UnCuW4kuWMQ==", + "license": "MIT", + "dependencies": { + "@standard-schema/spec": "^1.0.0", + "@standard-schema/utils": "^0.3.0", + "immer": "^11.0.0", + "redux": "^5.0.1", + "redux-thunk": "^3.1.0", + "reselect": "^5.1.0" + }, + "peerDependencies": { + "react": "^16.9.0 || ^17.0.0 || ^18 || ^19", + "react-redux": "^7.2.1 || ^8.1.3 || ^9.0.0" + }, + "peerDependenciesMeta": { + "react": { + "optional": true + }, + "react-redux": { + "optional": true + } + } + }, + "node_modules/@reduxjs/toolkit/node_modules/immer": { + "version": "11.1.4", + "resolved": "https://registry.npmjs.org/immer/-/immer-11.1.4.tgz", + "integrity": "sha512-XREFCPo6ksxVzP4E0ekD5aMdf8WMwmdNaz6vuvxgI40UaEiu6q3p8X52aU6GdyvLY3XXX/8R7JOTXStz/nBbRw==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/immer" + } + }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-rc.3", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-rc.3.tgz", + "integrity": "sha512-eybk3TjzzzV97Dlj5c+XrBFW57eTNhzod66y9HrBlzJ6NsCrWCp/2kaPS3K9wJmurBC0Tdw4yPjXKZqlznim3Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.59.0.tgz", + "integrity": "sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.59.0.tgz", + "integrity": "sha512-hZ+Zxj3SySm4A/DylsDKZAeVg0mvi++0PYVceVyX7hemkw7OreKdCvW2oQ3T1FMZvCaQXqOTHb8qmBShoqk69Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.59.0.tgz", + "integrity": "sha512-W2Psnbh1J8ZJw0xKAd8zdNgF9HRLkdWwwdWqubSVk0pUuQkoHnv7rx4GiF9rT4t5DIZGAsConRE3AxCdJ4m8rg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.59.0.tgz", + "integrity": "sha512-ZW2KkwlS4lwTv7ZVsYDiARfFCnSGhzYPdiOU4IM2fDbL+QGlyAbjgSFuqNRbSthybLbIJ915UtZBtmuLrQAT/w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.59.0.tgz", + "integrity": "sha512-EsKaJ5ytAu9jI3lonzn3BgG8iRBjV4LxZexygcQbpiU0wU0ATxhNVEpXKfUa0pS05gTcSDMKpn3Sx+QB9RlTTA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.59.0.tgz", + "integrity": "sha512-d3DuZi2KzTMjImrxoHIAODUZYoUUMsuUiY4SRRcJy6NJoZ6iIqWnJu9IScV9jXysyGMVuW+KNzZvBLOcpdl3Vg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.59.0.tgz", + "integrity": "sha512-t4ONHboXi/3E0rT6OZl1pKbl2Vgxf9vJfWgmUoCEVQVxhW6Cw/c8I6hbbu7DAvgp82RKiH7TpLwxnJeKv2pbsw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.59.0.tgz", + "integrity": "sha512-CikFT7aYPA2ufMD086cVORBYGHffBo4K8MQ4uPS/ZnY54GKj36i196u8U+aDVT2LX4eSMbyHtyOh7D7Zvk2VvA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.59.0.tgz", + "integrity": "sha512-jYgUGk5aLd1nUb1CtQ8E+t5JhLc9x5WdBKew9ZgAXg7DBk0ZHErLHdXM24rfX+bKrFe+Xp5YuJo54I5HFjGDAA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.59.0.tgz", + "integrity": "sha512-peZRVEdnFWZ5Bh2KeumKG9ty7aCXzzEsHShOZEFiCQlDEepP1dpUl/SrUNXNg13UmZl+gzVDPsiCwnV1uI0RUA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.59.0.tgz", + "integrity": "sha512-gbUSW/97f7+r4gHy3Jlup8zDG190AuodsWnNiXErp9mT90iCy9NKKU0Xwx5k8VlRAIV2uU9CsMnEFg/xXaOfXg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.59.0.tgz", + "integrity": "sha512-yTRONe79E+o0FWFijasoTjtzG9EBedFXJMl888NBEDCDV9I2wGbFFfJQQe63OijbFCUZqxpHz1GzpbtSFikJ4Q==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.59.0.tgz", + "integrity": "sha512-sw1o3tfyk12k3OEpRddF68a1unZ5VCN7zoTNtSn2KndUE+ea3m3ROOKRCZxEpmT9nsGnogpFP9x6mnLTCaoLkA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.59.0.tgz", + "integrity": "sha512-+2kLtQ4xT3AiIxkzFVFXfsmlZiG5FXYW7ZyIIvGA7Bdeuh9Z0aN4hVyXS/G1E9bTP/vqszNIN/pUKCk/BTHsKA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.59.0.tgz", + "integrity": "sha512-NDYMpsXYJJaj+I7UdwIuHHNxXZ/b/N2hR15NyH3m2qAtb/hHPA4g4SuuvrdxetTdndfj9b1WOmy73kcPRoERUg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.59.0.tgz", + "integrity": "sha512-nLckB8WOqHIf1bhymk+oHxvM9D3tyPndZH8i8+35p/1YiVoVswPid2yLzgX7ZJP0KQvnkhM4H6QZ5m0LzbyIAg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.59.0.tgz", + "integrity": "sha512-oF87Ie3uAIvORFBpwnCvUzdeYUqi2wY6jRFWJAy1qus/udHFYIkplYRW+wo+GRUP4sKzYdmE1Y3+rY5Gc4ZO+w==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.59.0.tgz", + "integrity": "sha512-3AHmtQq/ppNuUspKAlvA8HtLybkDflkMuLK4DPo77DfthRb71V84/c4MlWJXixZz4uruIH4uaa07IqoAkG64fg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.59.0.tgz", + "integrity": "sha512-2UdiwS/9cTAx7qIUZB/fWtToJwvt0Vbo0zmnYt7ED35KPg13Q0ym1g442THLC7VyI6JfYTP4PiSOWyoMdV2/xg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.59.0.tgz", + "integrity": "sha512-M3bLRAVk6GOwFlPTIxVBSYKUaqfLrn8l0psKinkCFxl4lQvOSz8ZrKDz2gxcBwHFpci0B6rttydI4IpS4IS/jQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.59.0.tgz", + "integrity": "sha512-tt9KBJqaqp5i5HUZzoafHZX8b5Q2Fe7UjYERADll83O4fGqJ49O1FsL6LpdzVFQcpwvnyd0i+K/VSwu/o/nWlA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.59.0.tgz", + "integrity": "sha512-V5B6mG7OrGTwnxaNUzZTDTjDS7F75PO1ae6MJYdiMu60sq0CqN5CVeVsbhPxalupvTX8gXVSU9gq+Rx1/hvu6A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.59.0.tgz", + "integrity": "sha512-UKFMHPuM9R0iBegwzKF4y0C4J9u8C6MEJgFuXTBerMk7EJ92GFVFYBfOZaSGLu6COf7FxpQNqhNS4c4icUPqxA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.59.0.tgz", + "integrity": "sha512-laBkYlSS1n2L8fSo1thDNGrCTQMmxjYY5G0WFWjFFYZkKPjsMBsgJfGf4TLxXrF6RyhI60L8TMOjBMvXiTcxeA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.59.0.tgz", + "integrity": "sha512-2HRCml6OztYXyJXAvdDXPKcawukWY2GpR5/nxKp4iBgiO3wcoEGkAaqctIbZcNB6KlUQBIqt8VYkNSj2397EfA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@standard-schema/spec": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.1.0.tgz", + "integrity": "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==", + "license": "MIT" + }, + "node_modules/@standard-schema/utils": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/@standard-schema/utils/-/utils-0.3.0.tgz", + "integrity": "sha512-e7Mew686owMaPJVNNLs55PUvgz371nKgwsc4vxE49zsODpJEnxgxRo2y/OKrqueavXgZNMDVj3DdHFlaSAeU8g==", + "license": "MIT" + }, + "node_modules/@swc/helpers": { + "version": "0.5.15", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.15.tgz", + "integrity": "sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g==", + "license": "Apache-2.0", + "peer": true, + "dependencies": { + "tslib": "^2.8.0" + } + }, + "node_modules/@tanstack/history": { + "version": "1.154.14", + "resolved": "https://registry.npmjs.org/@tanstack/history/-/history-1.154.14.tgz", + "integrity": "sha512-xyIfof8eHBuub1CkBnbKNKQXeRZC4dClhmzePHVOEel4G7lk/dW+TQ16da7CFdeNLv6u6Owf5VoBQxoo6DFTSA==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@tanstack/query-core": { + "version": "5.90.20", + "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.90.20.tgz", + "integrity": "sha512-OMD2HLpNouXEfZJWcKeVKUgQ5n+n3A2JFmBaScpNDUqSrQSjiveC7dKMe53uJUg1nDG16ttFPz2xfilz6i2uVg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@tanstack/react-query": { + "version": "5.90.21", + "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.90.21.tgz", + "integrity": "sha512-0Lu6y5t+tvlTJMTO7oh5NSpJfpg/5D41LlThfepTixPYkJ0sE2Jj0m0f6yYqujBwIXlId87e234+MxG3D3g7kg==", + "license": "MIT", + "dependencies": { + "@tanstack/query-core": "5.90.20" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "react": "^18 || ^19" + } + }, + "node_modules/@tanstack/react-router": { + "version": "1.161.3", + "resolved": "https://registry.npmjs.org/@tanstack/react-router/-/react-router-1.161.3.tgz", + "integrity": "sha512-evYPrkuFt4T6E0WVyBGGq83lWHJjsYy3E5SpPpfPY/uRnEgmgwfr6Xl570msRnWYMj7DIkYg8ZWFFwzqKrSlBw==", + "license": "MIT", + "dependencies": { + "@tanstack/history": "1.154.14", + "@tanstack/react-store": "^0.9.1", + "@tanstack/router-core": "1.161.3", + "isbot": "^5.1.22", + "tiny-invariant": "^1.3.3", + "tiny-warning": "^1.0.3" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "react": ">=18.0.0 || >=19.0.0", + "react-dom": ">=18.0.0 || >=19.0.0" + } + }, + "node_modules/@tanstack/react-router-devtools": { + "version": "1.161.3", + "resolved": "https://registry.npmjs.org/@tanstack/react-router-devtools/-/react-router-devtools-1.161.3.tgz", + "integrity": "sha512-AlJPtaYvhDVuwe/TqZIYt5njmxAGxMEq6l7AXOXQLVu7UP0jysxGoQfrm2LZT+piMeUmJ5opRUTnxktpCphIFQ==", + "license": "MIT", + "dependencies": { + "@tanstack/router-devtools-core": "1.161.3" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "@tanstack/react-router": "^1.161.3", + "@tanstack/router-core": "^1.161.3", + "react": ">=18.0.0 || >=19.0.0", + "react-dom": ">=18.0.0 || >=19.0.0" + }, + "peerDependenciesMeta": { + "@tanstack/router-core": { + "optional": true + } + } + }, + "node_modules/@tanstack/react-store": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/@tanstack/react-store/-/react-store-0.9.1.tgz", + "integrity": "sha512-YzJLnRvy5lIEFTLWBAZmcOjK3+2AepnBv/sr6NZmiqJvq7zTQggyK99Gw8fqYdMdHPQWXjz0epFKJXC+9V2xDA==", + "license": "MIT", + "dependencies": { + "@tanstack/store": "0.9.1", + "use-sync-external-store": "^1.6.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@tanstack/react-virtual": { + "version": "3.13.19", + "resolved": "https://registry.npmjs.org/@tanstack/react-virtual/-/react-virtual-3.13.19.tgz", + "integrity": "sha512-KzwmU1IbE0IvCZSm6OXkS+kRdrgW2c2P3Ho3NC+zZXWK6oObv/L+lcV/2VuJ+snVESRlMJ+w/fg4WXI/JzoNGQ==", + "license": "MIT", + "dependencies": { + "@tanstack/virtual-core": "3.13.19" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@tanstack/router-core": { + "version": "1.161.3", + "resolved": "https://registry.npmjs.org/@tanstack/router-core/-/router-core-1.161.3.tgz", + "integrity": "sha512-8EuaGXLUjugQE9Rsb8VrWSy+wImcs/DZ9JORqUJYCmiiWnJzbat8KedQItq/9LCjMJyx4vTLCt8NnZCL+j1Ayg==", + "license": "MIT", + "dependencies": { + "@tanstack/history": "1.154.14", + "@tanstack/store": "^0.9.1", + "cookie-es": "^2.0.0", + "seroval": "^1.4.2", + "seroval-plugins": "^1.4.2", + "tiny-invariant": "^1.3.3", + "tiny-warning": "^1.0.3" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@tanstack/router-devtools-core": { + "version": "1.161.3", + "resolved": "https://registry.npmjs.org/@tanstack/router-devtools-core/-/router-devtools-core-1.161.3.tgz", + "integrity": "sha512-yLbBH9ovomvxAk4nbTzN+UacPX2C5r3Kq4p+4O8gZVopUjRqiYiQN7ZJ6tN6atQouJQtym2xXwa5pC4EyFlCgQ==", + "license": "MIT", + "dependencies": { + "clsx": "^2.1.1", + "goober": "^2.1.16", + "tiny-invariant": "^1.3.3" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "@tanstack/router-core": "^1.161.3", + "csstype": "^3.0.10" + }, + "peerDependenciesMeta": { + "csstype": { + "optional": true + } + } + }, + "node_modules/@tanstack/router-generator": { + "version": "1.161.3", + "resolved": "https://registry.npmjs.org/@tanstack/router-generator/-/router-generator-1.161.3.tgz", + "integrity": "sha512-GKOrsOu7u5aoK1+lRu6KUUOmbb42mYF2ezfXf27QMiBjMx/yDHXln8wmdR7ZQ+FdSGz2YVubt2Ns3KuFsDsZJg==", + "license": "MIT", + "dependencies": { + "@tanstack/router-core": "1.161.3", + "@tanstack/router-utils": "1.158.0", + "@tanstack/virtual-file-routes": "1.154.7", + "prettier": "^3.5.0", + "recast": "^0.23.11", + "source-map": "^0.7.4", + "tsx": "^4.19.2", + "zod": "^3.24.2" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@tanstack/router-generator/node_modules/zod": { + "version": "3.25.76", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", + "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + }, + "node_modules/@tanstack/router-plugin": { + "version": "1.161.3", + "resolved": "https://registry.npmjs.org/@tanstack/router-plugin/-/router-plugin-1.161.3.tgz", + "integrity": "sha512-3Uy4AxgHNYjmCGf2WYWB8Gy3C6m0YE5DV1SK2p3yUrA/PhCMYRe+xzjyD5pViMUSLUoPHQYGY6bOIM9OOPRI/Q==", + "license": "MIT", + "dependencies": { + "@babel/core": "^7.28.5", + "@babel/plugin-syntax-jsx": "^7.27.1", + "@babel/plugin-syntax-typescript": "^7.27.1", + "@babel/template": "^7.27.2", + "@babel/traverse": "^7.28.5", + "@babel/types": "^7.28.5", + "@tanstack/router-core": "1.161.3", + "@tanstack/router-generator": "1.161.3", + "@tanstack/router-utils": "1.158.0", + "@tanstack/virtual-file-routes": "1.154.7", + "chokidar": "^3.6.0", + "unplugin": "^2.1.2", + "zod": "^3.24.2" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "@rsbuild/core": ">=1.0.2", + "@tanstack/react-router": "^1.161.3", + "vite": ">=5.0.0 || >=6.0.0 || >=7.0.0", + "vite-plugin-solid": "^2.11.10", + "webpack": ">=5.92.0" + }, + "peerDependenciesMeta": { + "@rsbuild/core": { + "optional": true + }, + "@tanstack/react-router": { + "optional": true + }, + "vite": { + "optional": true + }, + "vite-plugin-solid": { + "optional": true + }, + "webpack": { + "optional": true + } + } + }, + "node_modules/@tanstack/router-plugin/node_modules/zod": { + "version": "3.25.76", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", + "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + }, + "node_modules/@tanstack/router-utils": { + "version": "1.158.0", + "resolved": "https://registry.npmjs.org/@tanstack/router-utils/-/router-utils-1.158.0.tgz", + "integrity": "sha512-qZ76eaLKU6Ae9iI/mc5zizBX149DXXZkBVVO3/QRIll79uKLJZHQlMKR++2ba7JsciBWz1pgpIBcCJPE9S0LVg==", + "license": "MIT", + "dependencies": { + "@babel/core": "^7.28.5", + "@babel/generator": "^7.28.5", + "@babel/parser": "^7.28.5", + "@babel/types": "^7.28.5", + "ansis": "^4.1.0", + "babel-dead-code-elimination": "^1.0.12", + "diff": "^8.0.2", + "pathe": "^2.0.3", + "tinyglobby": "^0.2.15" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@tanstack/store": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/@tanstack/store/-/store-0.9.1.tgz", + "integrity": "sha512-+qcNkOy0N1qSGsP7omVCW0SDrXtaDcycPqBDE726yryiA5eTDFpjBReaYjghVJwNf1pcPMyzIwTGlYjCSQR0Fg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@tanstack/virtual-core": { + "version": "3.13.19", + "resolved": "https://registry.npmjs.org/@tanstack/virtual-core/-/virtual-core-3.13.19.tgz", + "integrity": "sha512-/BMP7kNhzKOd7wnDeB8NrIRNLwkf5AhCYCvtfZV2GXWbBieFm/el0n6LOAXlTi6ZwHICSNnQcIxRCWHrLzDY+g==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@tanstack/virtual-file-routes": { + "version": "1.154.7", + "resolved": "https://registry.npmjs.org/@tanstack/virtual-file-routes/-/virtual-file-routes-1.154.7.tgz", + "integrity": "sha512-cHHDnewHozgjpI+MIVp9tcib6lYEQK5MyUr0ChHpHFGBl8Xei55rohFK0I0ve/GKoHeioaK42Smd8OixPp6CTg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@testing-library/dom": { + "version": "10.4.1", + "resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-10.4.1.tgz", + "integrity": "sha512-o4PXJQidqJl82ckFaXUeoAW+XysPLauYI43Abki5hABd853iMhitooc6znOnczgbTYmEP6U6/y1ZyKAIsvMKGg==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@babel/code-frame": "^7.10.4", + "@babel/runtime": "^7.12.5", + "@types/aria-query": "^5.0.1", + "aria-query": "5.3.0", + "dom-accessibility-api": "^0.5.9", + "lz-string": "^1.5.0", + "picocolors": "1.1.1", + "pretty-format": "^27.0.2" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@testing-library/jest-dom": { + "version": "6.9.1", + "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-6.9.1.tgz", + "integrity": "sha512-zIcONa+hVtVSSep9UT3jZ5rizo2BsxgyDYU7WFD5eICBE7no3881HGeb/QkGfsJs6JTkY1aQhT7rIPC7e+0nnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@adobe/css-tools": "^4.4.0", + "aria-query": "^5.0.0", + "css.escape": "^1.5.1", + "dom-accessibility-api": "^0.6.3", + "picocolors": "^1.1.1", + "redent": "^3.0.0" + }, + "engines": { + "node": ">=14", + "npm": ">=6", + "yarn": ">=1" + } + }, + "node_modules/@testing-library/jest-dom/node_modules/dom-accessibility-api": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.6.3.tgz", + "integrity": "sha512-7ZgogeTnjuHbo+ct10G9Ffp0mif17idi0IyWNVA/wcwcm7NPOD/WEHVP3n7n3MhXqxoIYm8d6MuZohYWIZ4T3w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@testing-library/react": { + "version": "16.3.2", + "resolved": "https://registry.npmjs.org/@testing-library/react/-/react-16.3.2.tgz", + "integrity": "sha512-XU5/SytQM+ykqMnAnvB2umaJNIOsLF3PVv//1Ew4CTcpz0/BRyy/af40qqrt7SjKpDdT1saBMc42CUok5gaw+g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.12.5" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@testing-library/dom": "^10.0.0", + "@types/react": "^18.0.0 || ^19.0.0", + "@types/react-dom": "^18.0.0 || ^19.0.0", + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@testing-library/user-event": { + "version": "14.6.1", + "resolved": "https://registry.npmjs.org/@testing-library/user-event/-/user-event-14.6.1.tgz", + "integrity": "sha512-vq7fv0rnt+QTXgPxr5Hjc210p6YKq2kmdziLgnsZGgLJ9e6VAShx1pACLuRjd/AS/sr7phAR58OIIpf0LlmQNw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12", + "npm": ">=6" + }, + "peerDependencies": { + "@testing-library/dom": ">=7.21.4" + } + }, + "node_modules/@types/aria-query": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/@types/aria-query/-/aria-query-5.0.4.tgz", + "integrity": "sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/chai": { + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.3.tgz", + "integrity": "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/deep-eql": "*", + "assertion-error": "^2.0.1" + } + }, + "node_modules/@types/d3": { + "version": "7.4.3", + "resolved": "https://registry.npmjs.org/@types/d3/-/d3-7.4.3.tgz", + "integrity": "sha512-lZXZ9ckh5R8uiFVt8ogUNf+pIrK4EsWrx2Np75WvF/eTpJ0FMHNhjXk8CKEx/+gpHbNQyJWehbFaTvqmHWB3ww==", + "license": "MIT", + "dependencies": { + "@types/d3-array": "*", + "@types/d3-axis": "*", + "@types/d3-brush": "*", + "@types/d3-chord": "*", + "@types/d3-color": "*", + "@types/d3-contour": "*", + "@types/d3-delaunay": "*", + "@types/d3-dispatch": "*", + "@types/d3-drag": "*", + "@types/d3-dsv": "*", + "@types/d3-ease": "*", + "@types/d3-fetch": "*", + "@types/d3-force": "*", + "@types/d3-format": "*", + "@types/d3-geo": "*", + "@types/d3-hierarchy": "*", + "@types/d3-interpolate": "*", + "@types/d3-path": "*", + "@types/d3-polygon": "*", + "@types/d3-quadtree": "*", + "@types/d3-random": "*", + "@types/d3-scale": "*", + "@types/d3-scale-chromatic": "*", + "@types/d3-selection": "*", + "@types/d3-shape": "*", + "@types/d3-time": "*", + "@types/d3-time-format": "*", + "@types/d3-timer": "*", + "@types/d3-transition": "*", + "@types/d3-zoom": "*" + } + }, + "node_modules/@types/d3-array": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.2.tgz", + "integrity": "sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw==", + "license": "MIT" + }, + "node_modules/@types/d3-axis": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-axis/-/d3-axis-3.0.6.tgz", + "integrity": "sha512-pYeijfZuBd87T0hGn0FO1vQ/cgLk6E1ALJjfkC0oJ8cbwkZl3TpgS8bVBLZN+2jjGgg38epgxb2zmoGtSfvgMw==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-brush": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-brush/-/d3-brush-3.0.6.tgz", + "integrity": "sha512-nH60IZNNxEcrh6L1ZSMNA28rj27ut/2ZmI3r96Zd+1jrZD++zD3LsMIjWlvg4AYrHn/Pqz4CF3veCxGjtbqt7A==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-chord": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-chord/-/d3-chord-3.0.6.tgz", + "integrity": "sha512-LFYWWd8nwfwEmTZG9PfQxd17HbNPksHBiJHaKuY1XeqscXacsS2tyoo6OdRsjf+NQYeB6XrNL3a25E3gH69lcg==", + "license": "MIT" + }, + "node_modules/@types/d3-color": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz", + "integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==", + "license": "MIT" + }, + "node_modules/@types/d3-contour": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-contour/-/d3-contour-3.0.6.tgz", + "integrity": "sha512-BjzLgXGnCWjUSYGfH1cpdo41/hgdWETu4YxpezoztawmqsvCeep+8QGfiY6YbDvfgHz/DkjeIkkZVJavB4a3rg==", + "license": "MIT", + "dependencies": { + "@types/d3-array": "*", + "@types/geojson": "*" + } + }, + "node_modules/@types/d3-delaunay": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-delaunay/-/d3-delaunay-6.0.4.tgz", + "integrity": "sha512-ZMaSKu4THYCU6sV64Lhg6qjf1orxBthaC161plr5KuPHo3CNm8DTHiLw/5Eq2b6TsNP0W0iJrUOFscY6Q450Hw==", + "license": "MIT" + }, + "node_modules/@types/d3-dispatch": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-dispatch/-/d3-dispatch-3.0.7.tgz", + "integrity": "sha512-5o9OIAdKkhN1QItV2oqaE5KMIiXAvDWBDPrD85e58Qlz1c1kI/J0NcqbEG88CoTwJrYe7ntUCVfeUl2UJKbWgA==", + "license": "MIT" + }, + "node_modules/@types/d3-drag": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.7.tgz", + "integrity": "sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-dsv": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-dsv/-/d3-dsv-3.0.7.tgz", + "integrity": "sha512-n6QBF9/+XASqcKK6waudgL0pf/S5XHPPI8APyMLLUHd8NqouBGLsU8MgtO7NINGtPBtk9Kko/W4ea0oAspwh9g==", + "license": "MIT" + }, + "node_modules/@types/d3-ease": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.2.tgz", + "integrity": "sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==", + "license": "MIT" + }, + "node_modules/@types/d3-fetch": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-fetch/-/d3-fetch-3.0.7.tgz", + "integrity": "sha512-fTAfNmxSb9SOWNB9IoG5c8Hg6R+AzUHDRlsXsDZsNp6sxAEOP0tkP3gKkNSO/qmHPoBFTxNrjDprVHDQDvo5aA==", + "license": "MIT", + "dependencies": { + "@types/d3-dsv": "*" + } + }, + "node_modules/@types/d3-force": { + "version": "3.0.10", + "resolved": "https://registry.npmjs.org/@types/d3-force/-/d3-force-3.0.10.tgz", + "integrity": "sha512-ZYeSaCF3p73RdOKcjj+swRlZfnYpK1EbaDiYICEEp5Q6sUiqFaFQ9qgoshp5CzIyyb/yD09kD9o2zEltCexlgw==", + "license": "MIT" + }, + "node_modules/@types/d3-format": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-format/-/d3-format-3.0.4.tgz", + "integrity": "sha512-fALi2aI6shfg7vM5KiR1wNJnZ7r6UuggVqtDA+xiEdPZQwy/trcQaHnwShLuLdta2rTymCNpxYTiMZX/e09F4g==", + "license": "MIT" + }, + "node_modules/@types/d3-geo": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@types/d3-geo/-/d3-geo-3.1.0.tgz", + "integrity": "sha512-856sckF0oP/diXtS4jNsiQw/UuK5fQG8l/a9VVLeSouf1/PPbBE1i1W852zVwKwYCBkFJJB7nCFTbk6UMEXBOQ==", + "license": "MIT", + "dependencies": { + "@types/geojson": "*" + } + }, + "node_modules/@types/d3-hierarchy": { + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/@types/d3-hierarchy/-/d3-hierarchy-3.1.7.tgz", + "integrity": "sha512-tJFtNoYBtRtkNysX1Xq4sxtjK8YgoWUNpIiUee0/jHGRwqvzYxkq0hGVbbOGSz+JgFxxRu4K8nb3YpG3CMARtg==", + "license": "MIT" + }, + "node_modules/@types/d3-interpolate": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz", + "integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==", + "license": "MIT", + "dependencies": { + "@types/d3-color": "*" + } + }, + "node_modules/@types/d3-path": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.1.1.tgz", + "integrity": "sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg==", + "license": "MIT" + }, + "node_modules/@types/d3-polygon": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-polygon/-/d3-polygon-3.0.2.tgz", + "integrity": "sha512-ZuWOtMaHCkN9xoeEMr1ubW2nGWsp4nIql+OPQRstu4ypeZ+zk3YKqQT0CXVe/PYqrKpZAi+J9mTs05TKwjXSRA==", + "license": "MIT" + }, + "node_modules/@types/d3-quadtree": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-quadtree/-/d3-quadtree-3.0.6.tgz", + "integrity": "sha512-oUzyO1/Zm6rsxKRHA1vH0NEDG58HrT5icx/azi9MF1TWdtttWl0UIUsjEQBBh+SIkrpd21ZjEv7ptxWys1ncsg==", + "license": "MIT" + }, + "node_modules/@types/d3-random": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/d3-random/-/d3-random-3.0.3.tgz", + "integrity": "sha512-Imagg1vJ3y76Y2ea0871wpabqp613+8/r0mCLEBfdtqC7xMSfj9idOnmBYyMoULfHePJyxMAw3nWhJxzc+LFwQ==", + "license": "MIT" + }, + "node_modules/@types/d3-scale": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.9.tgz", + "integrity": "sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==", + "license": "MIT", + "dependencies": { + "@types/d3-time": "*" + } + }, + "node_modules/@types/d3-scale-chromatic": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz", + "integrity": "sha512-iWMJgwkK7yTRmWqRB5plb1kadXyQ5Sj8V/zYlFGMUBbIPKQScw+Dku9cAAMgJG+z5GYDoMjWGLVOvjghDEFnKQ==", + "license": "MIT" + }, + "node_modules/@types/d3-selection": { + "version": "3.0.11", + "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.11.tgz", + "integrity": "sha512-bhAXu23DJWsrI45xafYpkQ4NtcKMwWnAC/vKrd2l+nxMFuvOT3XMYTIj2opv8vq8AO5Yh7Qac/nSeP/3zjTK0w==", + "license": "MIT" + }, + "node_modules/@types/d3-shape": { + "version": "3.1.8", + "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.8.tgz", + "integrity": "sha512-lae0iWfcDeR7qt7rA88BNiqdvPS5pFVPpo5OfjElwNaT2yyekbM0C9vK+yqBqEmHr6lDkRnYNoTBYlAgJa7a4w==", + "license": "MIT", + "dependencies": { + "@types/d3-path": "*" + } + }, + "node_modules/@types/d3-time": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.4.tgz", + "integrity": "sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==", + "license": "MIT" + }, + "node_modules/@types/d3-time-format": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/@types/d3-time-format/-/d3-time-format-4.0.3.tgz", + "integrity": "sha512-5xg9rC+wWL8kdDj153qZcsJ0FWiFt0J5RB6LYUNZjwSnesfblqrI/bJ1wBdJ8OQfncgbJG5+2F+qfqnqyzYxyg==", + "license": "MIT" + }, + "node_modules/@types/d3-timer": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.2.tgz", + "integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==", + "license": "MIT" + }, + "node_modules/@types/d3-transition": { + "version": "3.0.9", + "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.9.tgz", + "integrity": "sha512-uZS5shfxzO3rGlu0cC3bjmMFKsXv+SmZZcgp0KD22ts4uGXp5EVYGzu/0YdwZeKmddhcAccYtREJKkPfXkZuCg==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-zoom": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.8.tgz", + "integrity": "sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==", + "license": "MIT", + "dependencies": { + "@types/d3-interpolate": "*", + "@types/d3-selection": "*" + } + }, + "node_modules/@types/deep-eql": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", + "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/diff": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/@types/diff/-/diff-7.0.2.tgz", + "integrity": "sha512-JSWRMozjFKsGlEjiiKajUjIJVKuKdE3oVy2DNtK+fUo8q82nhFZ2CPQwicAIkXrofahDXrWJ7mjelvZphMS98Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "devOptional": true, + "license": "MIT" + }, + "node_modules/@types/geojson": { + "version": "7946.0.16", + "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.16.tgz", + "integrity": "sha512-6C8nqWur3j98U6+lXDfTUWIfgvZU+EumvpHKcYjujKH7woYyLj2sUmff0tRhrqM7BohUw7Pz3ZB1jj2gW9Fvmg==", + "license": "MIT" + }, + "node_modules/@types/hast": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/leaflet": { + "version": "1.9.21", + "resolved": "https://registry.npmjs.org/@types/leaflet/-/leaflet-1.9.21.tgz", + "integrity": "sha512-TbAd9DaPGSnzp6QvtYngntMZgcRk+igFELwR2N99XZn7RXUdKgsXMR+28bUO0rPsWp8MIu/f47luLIQuSLYv/w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/geojson": "*" + } + }, + "node_modules/@types/node": { + "version": "24.10.13", + "resolved": "https://registry.npmjs.org/@types/node/-/node-24.10.13.tgz", + "integrity": "sha512-oH72nZRfDv9lADUBSo104Aq7gPHpQZc4BTx38r9xf9pg5LfP6EzSyH2n7qFmmxRQXh7YlUXODcYsg6PuTDSxGg==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~7.16.0" + } + }, + "node_modules/@types/react": { + "version": "19.2.14", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.14.tgz", + "integrity": "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==", + "devOptional": true, + "license": "MIT", + "dependencies": { + "csstype": "^3.2.2" + } + }, + "node_modules/@types/react-dom": { + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.2.3.tgz", + "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==", + "devOptional": true, + "license": "MIT", + "peerDependencies": { + "@types/react": "^19.2.0" + } + }, + "node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, + "node_modules/@types/use-sync-external-store": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/@types/use-sync-external-store/-/use-sync-external-store-0.0.6.tgz", + "integrity": "sha512-zFDAD+tlpf2r4asuHEj0XH6pY6i0g5NeAHPn+15wk3BV6JA69eERFXC1gyGThDkVa1zCyKr5jox1+2LbV/AMLg==", + "license": "MIT" + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "8.56.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.56.1.tgz", + "integrity": "sha512-Jz9ZztpB37dNC+HU2HI28Bs9QXpzCz+y/twHOwhyrIRdbuVDxSytJNDl6z/aAKlaRIwC7y8wJdkBv7FxYGgi0A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.12.2", + "@typescript-eslint/scope-manager": "8.56.1", + "@typescript-eslint/type-utils": "8.56.1", + "@typescript-eslint/utils": "8.56.1", + "@typescript-eslint/visitor-keys": "8.56.1", + "ignore": "^7.0.5", + "natural-compare": "^1.4.0", + "ts-api-utils": "^2.4.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^8.56.1", + "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/ignore": { + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz", + "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "8.56.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.56.1.tgz", + "integrity": "sha512-klQbnPAAiGYFyI02+znpBRLyjL4/BrBd0nyWkdC0s/6xFLkXYQ8OoRrSkqacS1ddVxf/LDyODIKbQ5TgKAf/Fg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/scope-manager": "8.56.1", + "@typescript-eslint/types": "8.56.1", + "@typescript-eslint/typescript-estree": "8.56.1", + "@typescript-eslint/visitor-keys": "8.56.1", + "debug": "^4.4.3" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/project-service": { + "version": "8.56.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.56.1.tgz", + "integrity": "sha512-TAdqQTzHNNvlVFfR+hu2PDJrURiwKsUvxFn1M0h95BB8ah5jejas08jUWG4dBA68jDMI988IvtfdAI53JzEHOQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/tsconfig-utils": "^8.56.1", + "@typescript-eslint/types": "^8.56.1", + "debug": "^4.4.3" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "8.56.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.56.1.tgz", + "integrity": "sha512-YAi4VDKcIZp0O4tz/haYKhmIDZFEUPOreKbfdAN3SzUDMcPhJ8QI99xQXqX+HoUVq8cs85eRKnD+rne2UAnj2w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.56.1", + "@typescript-eslint/visitor-keys": "8.56.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/tsconfig-utils": { + "version": "8.56.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.56.1.tgz", + "integrity": "sha512-qOtCYzKEeyr3aR9f28mPJqBty7+DBqsdd63eO0yyDwc6vgThj2UjWfJIcsFeSucYydqcuudMOprZ+x1SpF3ZuQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "8.56.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.56.1.tgz", + "integrity": "sha512-yB/7dxi7MgTtGhZdaHCemf7PuwrHMenHjmzgUW1aJpO+bBU43OycnM3Wn+DdvDO/8zzA9HlhaJ0AUGuvri4oGg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.56.1", + "@typescript-eslint/typescript-estree": "8.56.1", + "@typescript-eslint/utils": "8.56.1", + "debug": "^4.4.3", + "ts-api-utils": "^2.4.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/types": { + "version": "8.56.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.56.1.tgz", + "integrity": "sha512-dbMkdIUkIkchgGDIv7KLUpa0Mda4IYjo4IAMJUZ+3xNoUXxMsk9YtKpTHSChRS85o+H9ftm51gsK1dZReY9CVw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "8.56.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.56.1.tgz", + "integrity": "sha512-qzUL1qgalIvKWAf9C1HpvBjif+Vm6rcT5wZd4VoMb9+Km3iS3Cv9DY6dMRMDtPnwRAFyAi7YXJpTIEXLvdfPxg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/project-service": "8.56.1", + "@typescript-eslint/tsconfig-utils": "8.56.1", + "@typescript-eslint/types": "8.56.1", + "@typescript-eslint/visitor-keys": "8.56.1", + "debug": "^4.4.3", + "minimatch": "^10.2.2", + "semver": "^7.7.3", + "tinyglobby": "^0.2.15", + "ts-api-utils": "^2.4.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/balanced-match": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-4.0.4.tgz", + "integrity": "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "18 || 20 || >=22" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.3.tgz", + "integrity": "sha512-fy6KJm2RawA5RcHkLa1z/ScpBeA762UF9KmZQxwIbDtRJrgLzM10depAiEQ+CXYcoiqW1/m96OAAoke2nE9EeA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^4.0.2" + }, + "engines": { + "node": "18 || 20 || >=22" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": { + "version": "10.2.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.2.2.tgz", + "integrity": "sha512-+G4CpNBxa5MprY+04MbgOw1v7So6n5JY166pFi9KfYwT78fxScCeSNQSNzp6dpPSW2rONOps6Ocam1wFhCgoVw==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "brace-expansion": "^5.0.2" + }, + "engines": { + "node": "18 || 20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/semver": { + "version": "7.7.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz", + "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "8.56.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.56.1.tgz", + "integrity": "sha512-HPAVNIME3tABJ61siYlHzSWCGtOoeP2RTIaHXFMPqjrQKCGB9OgUVdiNgH7TJS2JNIQ5qQ4RsAUDuGaGme/KOA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.9.1", + "@typescript-eslint/scope-manager": "8.56.1", + "@typescript-eslint/types": "8.56.1", + "@typescript-eslint/typescript-estree": "8.56.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "8.56.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.56.1.tgz", + "integrity": "sha512-KiROIzYdEV85YygXw6BI/Dx4fnBlFQu6Mq4QE4MOH9fFnhohw6wX/OAvDY2/C+ut0I3RSPKenvZJIVYqJNkhEw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.56.1", + "eslint-visitor-keys": "^5.0.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/visitor-keys/node_modules/eslint-visitor-keys": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-5.0.1.tgz", + "integrity": "sha512-tD40eHxA35h0PEIZNeIjkHoDR4YjjJp34biM0mDvplBe//mB+IHCqHDGV7pxF+7MklTvighcCPPZC7ynWyjdTA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^20.19.0 || ^22.13.0 || >=24" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@vitejs/plugin-react": { + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-5.1.4.tgz", + "integrity": "sha512-VIcFLdRi/VYRU8OL/puL7QXMYafHmqOnwTZY50U1JPlCNj30PxCMx65c494b1K9be9hX83KVt0+gTEwTWLqToA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.29.0", + "@babel/plugin-transform-react-jsx-self": "^7.27.1", + "@babel/plugin-transform-react-jsx-source": "^7.27.1", + "@rolldown/pluginutils": "1.0.0-rc.3", + "@types/babel__core": "^7.20.5", + "react-refresh": "^0.18.0" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "peerDependencies": { + "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0" + } + }, + "node_modules/@vitest/expect": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.0.18.tgz", + "integrity": "sha512-8sCWUyckXXYvx4opfzVY03EOiYVxyNrHS5QxX3DAIi5dpJAAkyJezHCP77VMX4HKA2LDT/Jpfo8i2r5BE3GnQQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@standard-schema/spec": "^1.0.0", + "@types/chai": "^5.2.2", + "@vitest/spy": "4.0.18", + "@vitest/utils": "4.0.18", + "chai": "^6.2.1", + "tinyrainbow": "^3.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/mocker": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.0.18.tgz", + "integrity": "sha512-HhVd0MDnzzsgevnOWCBj5Otnzobjy5wLBe4EdeeFGv8luMsGcYqDuFRMcttKWZA5vVO8RFjexVovXvAM4JoJDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/spy": "4.0.18", + "estree-walker": "^3.0.3", + "magic-string": "^0.30.21" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "msw": "^2.4.9", + "vite": "^6.0.0 || ^7.0.0-0" + }, + "peerDependenciesMeta": { + "msw": { + "optional": true + }, + "vite": { + "optional": true + } + } + }, + "node_modules/@vitest/pretty-format": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.0.18.tgz", + "integrity": "sha512-P24GK3GulZWC5tz87ux0m8OADrQIUVDPIjjj65vBXYG17ZeU3qD7r+MNZ1RNv4l8CGU2vtTRqixrOi9fYk/yKw==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyrainbow": "^3.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/runner": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.0.18.tgz", + "integrity": "sha512-rpk9y12PGa22Jg6g5M3UVVnTS7+zycIGk9ZNGN+m6tZHKQb7jrP7/77WfZy13Y/EUDd52NDsLRQhYKtv7XfPQw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/utils": "4.0.18", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/snapshot": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.0.18.tgz", + "integrity": "sha512-PCiV0rcl7jKQjbgYqjtakly6T1uwv/5BQ9SwBLekVg/EaYeQFPiXcgrC2Y7vDMA8dM1SUEAEV82kgSQIlXNMvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "4.0.18", + "magic-string": "^0.30.21", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.0.18.tgz", + "integrity": "sha512-cbQt3PTSD7P2OARdVW3qWER5EGq7PHlvE+QfzSC0lbwO+xnt7+XH06ZzFjFRgzUX//JmpxrCu92VdwvEPlWSNw==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.0.18.tgz", + "integrity": "sha512-msMRKLMVLWygpK3u2Hybgi4MNjcYJvwTb0Ru09+fOyCXIgT5raYP041DRRdiJiI3k/2U6SEbAETB3YtBrUkCFA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "4.0.18", + "tinyrainbow": "^3.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vue/reactivity": { + "version": "3.5.29", + "resolved": "https://registry.npmjs.org/@vue/reactivity/-/reactivity-3.5.29.tgz", + "integrity": "sha512-zcrANcrRdcLtmGZETBxWqIkoQei8HaFpZWx/GHKxx79JZsiZ8j1du0VUJtu4eJjgFvU/iKL5lRXFXksVmI+5DA==", + "license": "MIT", + "dependencies": { + "@vue/shared": "3.5.29" + } + }, + "node_modules/@vue/shared": { + "version": "3.5.29", + "resolved": "https://registry.npmjs.org/@vue/shared/-/shared-3.5.29.tgz", + "integrity": "sha512-w7SR0A5zyRByL9XUkCfdLs7t9XOHUyJ67qPGQjOou3p6GvBeBW+AVjUUmlxtZ4PIYaRvE+1LmK44O4uajlZwcg==", + "license": "MIT" + }, + "node_modules/@zxcvbn-ts/core": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@zxcvbn-ts/core/-/core-3.0.4.tgz", + "integrity": "sha512-aQeiT0F09FuJaAqNrxynlAwZ2mW/1MdXakKWNmGM1Qp/VaY6CnB/GfnMS2T8gB2231Esp1/maCWd8vTG4OuShw==", + "license": "MIT", + "dependencies": { + "fastest-levenshtein": "1.0.16" + } + }, + "node_modules/@zxcvbn-ts/language-common": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@zxcvbn-ts/language-common/-/language-common-3.0.4.tgz", + "integrity": "sha512-viSNNnRYtc7ULXzxrQIVUNwHAPSXRtoIwy/Tq4XQQdIknBzw4vz36lQLF6mvhMlTIlpjoN/Z1GFu/fwiAlUSsw==", + "license": "MIT" + }, + "node_modules/@zxcvbn-ts/language-en": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@zxcvbn-ts/language-en/-/language-en-3.0.2.tgz", + "integrity": "sha512-Zp+zL+I6Un2Bj0tRXNs6VUBq3Djt+hwTwUz4dkt2qgsQz47U0/XthZ4ULrT/RxjwJRl5LwiaKOOZeOtmixHnjg==", + "license": "MIT" + }, + "node_modules/acorn": { + "version": "8.16.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.16.0.tgz", + "integrity": "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==", + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/agent-base": { + "version": "7.1.4", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", + "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, + "node_modules/ajv": { + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.14.0.tgz", + "integrity": "sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/ansis": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/ansis/-/ansis-4.2.0.tgz", + "integrity": "sha512-HqZ5rWlFjGiV0tDm3UxxgNRqsOTniqoKZu0pIAfh7TZQMGuZK+hH0drySty0si0QXj1ieop4+SkSfPZBPPkHig==", + "license": "ISC", + "engines": { + "node": ">=14" + } + }, + "node_modules/any-promise": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", + "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", + "license": "MIT" + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/anymatch/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", + "license": "MIT" + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/aria-hidden": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/aria-hidden/-/aria-hidden-1.2.6.tgz", + "integrity": "sha512-ik3ZgC9dY/lYVVM++OISsaYDeg1tb0VtP5uL3ouh1koGOaUMDPpbFIei4JkFimWUFPn90sbMNMXQAIVOlnYKJA==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/aria-query": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.0.tgz", + "integrity": "sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "dequal": "^2.0.3" + } + }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/ast-types": { + "version": "0.16.1", + "resolved": "https://registry.npmjs.org/ast-types/-/ast-types-0.16.1.tgz", + "integrity": "sha512-6t10qk83GOG8p0vKmaCr8eiilZwO171AvbROMtvvNiwrTly62t+7XkA8RdIIVbpMhCASAsxgAzdRSwh6nw/5Dg==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "license": "MIT" + }, + "node_modules/autoprefixer": { + "version": "10.4.24", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.24.tgz", + "integrity": "sha512-uHZg7N9ULTVbutaIsDRoUkoS8/h3bdsmVJYZ5l3wv8Cp/6UIIoRDm90hZ+BwxUj/hGBEzLxdHNSKuFpn8WOyZw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "browserslist": "^4.28.1", + "caniuse-lite": "^1.0.30001766", + "fraction.js": "^5.3.4", + "picocolors": "^1.1.1", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/axios": { + "version": "1.13.5", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.5.tgz", + "integrity": "sha512-cz4ur7Vb0xS4/KUN0tPWe44eqxrIu31me+fbang3ijiNscE129POzipJJA6zniq2C/Z6sJCjMimjS8Lc/GAs8Q==", + "license": "MIT", + "dependencies": { + "follow-redirects": "^1.15.11", + "form-data": "^4.0.5", + "proxy-from-env": "^1.1.0" + } + }, + "node_modules/babel-dead-code-elimination": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/babel-dead-code-elimination/-/babel-dead-code-elimination-1.0.12.tgz", + "integrity": "sha512-GERT7L2TiYcYDtYk1IpD+ASAYXjKbLTDPhBtYj7X1NuRMDTMtAx9kyBenub1Ev41lo91OHCKdmP+egTDmfQ7Ig==", + "license": "MIT", + "dependencies": { + "@babel/core": "^7.23.7", + "@babel/parser": "^7.23.6", + "@babel/traverse": "^7.23.7", + "@babel/types": "^7.23.6" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/baseline-browser-mapping": { + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.0.tgz", + "integrity": "sha512-lIyg0szRfYbiy67j9KN8IyeD7q7hcmqnJ1ddWmNt19ItGpNN64mnllmxUNFIOdOm6by97jlL6wfpTTJrmnjWAA==", + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.cjs" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/bidi-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/bidi-js/-/bidi-js-1.0.3.tgz", + "integrity": "sha512-RKshQI1R3YQ+n9YJz2QQ147P66ELpa1FQEg20Dk8oW9t2KgLbpDLLp9aGZ7y8WHSshDknG0bknqGw5/tyCs5tw==", + "dev": true, + "license": "MIT", + "dependencies": { + "require-from-string": "^2.0.2" + } + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.28.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz", + "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "baseline-browser-mapping": "^2.9.0", + "caniuse-lite": "^1.0.30001759", + "electron-to-chromium": "^1.5.263", + "node-releases": "^2.0.27", + "update-browserslist-db": "^1.2.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase-css": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", + "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001774", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001774.tgz", + "integrity": "sha512-DDdwPGz99nmIEv216hKSgLD+D4ikHQHjBC/seF98N9CPqRX4M5mSxT9eTV6oyisnJcuzxtZy4n17yKKQYmYQOA==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/chai": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/chai/-/chai-6.2.2.tgz", + "integrity": "sha512-NUPRluOfOiTKBKvWPtSD4PhFvWCqOi0BGStNWs57X9js7XGTprSmFoz5F0tWhR4WPjNeR9jXqdC7/UpSJTnlRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/class-variance-authority": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/class-variance-authority/-/class-variance-authority-0.7.1.tgz", + "integrity": "sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg==", + "license": "Apache-2.0", + "dependencies": { + "clsx": "^2.1.1" + }, + "funding": { + "url": "https://polar.sh/cva" + } + }, + "node_modules/classcat": { + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/classcat/-/classcat-5.0.5.tgz", + "integrity": "sha512-JhZUT7JFcQy/EzW605k/ktHtncoo9vnyW/2GspNYwFlN1C/WmjuV/xtS04e9SOkL2sTdw0VAZ2UGCcQ9lR6p6w==", + "license": "MIT" + }, + "node_modules/client-only": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", + "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==", + "license": "MIT", + "peer": true + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/cmdk": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/cmdk/-/cmdk-1.1.1.tgz", + "integrity": "sha512-Vsv7kFaXm+ptHDMZ7izaRsP70GgrW9NBNGswt9OZaVBLlE0SNpDq8eu/VGXyF9r7M0azK3Wy7OlYXsuyYLFzHg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "^1.1.1", + "@radix-ui/react-dialog": "^1.1.6", + "@radix-ui/react-id": "^1.1.0", + "@radix-ui/react-primitive": "^2.0.2" + }, + "peerDependencies": { + "react": "^18 || ^19 || ^19.0.0-rc", + "react-dom": "^18 || ^19 || ^19.0.0-rc" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "license": "MIT" + }, + "node_modules/cookie-es": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/cookie-es/-/cookie-es-2.0.0.tgz", + "integrity": "sha512-RAj4E421UYRgqokKUmotqAwuplYw15qtdXfY+hGzgCJ/MBjCVZcSoHK/kH9kocfjRjcDME7IiDWR/1WX1TM2Pg==", + "license": "MIT" + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/css-tree": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-3.1.0.tgz", + "integrity": "sha512-0eW44TGN5SQXU1mWSkKwFstI/22X2bG1nYzZTYMAWjylYURhse752YgbE4Cx46AC+bAvI+/dYTPRk1LqSUnu6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "mdn-data": "2.12.2", + "source-map-js": "^1.0.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" + } + }, + "node_modules/css.escape": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/css.escape/-/css.escape-1.5.1.tgz", + "integrity": "sha512-YUifsXXuknHlUsmlgyY0PKzgPOr7/FjCePfHNt0jxm83wHZi44VDMQ7/fGNkjY3/jV1MC+1CmZbaHzugyeRtpg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/cssstyle": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-6.1.0.tgz", + "integrity": "sha512-Ml4fP2UT2K3CUBQnVlbdV/8aFDdlY69E+YnwJM+3VUWl08S3J8c8aRuJqCkD9Py8DHZ7zNNvsfKl8psocHZEFg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@asamuzakjp/css-color": "^5.0.0", + "@csstools/css-syntax-patches-for-csstree": "^1.0.28", + "css-tree": "^3.1.0", + "lru-cache": "^11.2.6" + }, + "engines": { + "node": ">=20" + } + }, + "node_modules/cssstyle/node_modules/lru-cache": { + "version": "11.2.6", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.6.tgz", + "integrity": "sha512-ESL2CrkS/2wTPfuend7Zhkzo2u0daGJ/A2VucJOgQ/C48S/zB8MMeMHSGKYpXhIjbPxfuezITkaBH1wqv00DDQ==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "license": "MIT" + }, + "node_modules/d3-array": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz", + "integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==", + "license": "ISC", + "dependencies": { + "internmap": "1 - 2" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-color": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz", + "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-dispatch": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz", + "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-drag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz", + "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-selection": "3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-ease": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz", + "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-format": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-3.1.2.tgz", + "integrity": "sha512-AJDdYOdnyRDV5b6ArilzCPPwc1ejkHcoyFarqlPqT7zRYjhavcT3uSrqcMvsgh2CgoPbK3RCwyHaVyxYcP2Arg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-interpolate": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz", + "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-path": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz", + "integrity": "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-scale": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz", + "integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==", + "license": "ISC", + "dependencies": { + "d3-array": "2.10.0 - 3", + "d3-format": "1 - 3", + "d3-interpolate": "1.2.0 - 3", + "d3-time": "2.1.1 - 3", + "d3-time-format": "2 - 4" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-selection": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz", + "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-shape": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz", + "integrity": "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==", + "license": "ISC", + "dependencies": { + "d3-path": "^3.1.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-time": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz", + "integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==", + "license": "ISC", + "dependencies": { + "d3-array": "2 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-time-format": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz", + "integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==", + "license": "ISC", + "dependencies": { + "d3-time": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-timer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz", + "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-transition": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz", + "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3", + "d3-dispatch": "1 - 3", + "d3-ease": "1 - 3", + "d3-interpolate": "1 - 3", + "d3-timer": "1 - 3" + }, + "engines": { + "node": ">=12" + }, + "peerDependencies": { + "d3-selection": "2 - 3" + } + }, + "node_modules/d3-zoom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz", + "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-drag": "2 - 3", + "d3-interpolate": "1 - 3", + "d3-selection": "2 - 3", + "d3-transition": "2 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/data-urls": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-7.0.0.tgz", + "integrity": "sha512-23XHcCF+coGYevirZceTVD7NdJOqVn+49IHyxgszm+JIiHLoB2TkmPtsYkNWT1pvRSGkc35L6NHs0yHkN2SumA==", + "dev": true, + "license": "MIT", + "dependencies": { + "whatwg-mimetype": "^5.0.0", + "whatwg-url": "^16.0.0" + }, + "engines": { + "node": "^20.19.0 || ^22.12.0 || >=24.0.0" + } + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decimal.js": { + "version": "10.6.0", + "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.6.0.tgz", + "integrity": "sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==", + "dev": true, + "license": "MIT" + }, + "node_modules/decimal.js-light": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/decimal.js-light/-/decimal.js-light-2.5.1.tgz", + "integrity": "sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg==", + "license": "MIT" + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/detect-node-es": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/detect-node-es/-/detect-node-es-1.1.0.tgz", + "integrity": "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==", + "license": "MIT" + }, + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "license": "MIT", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/didyoumean": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz", + "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", + "license": "Apache-2.0" + }, + "node_modules/diff": { + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/diff/-/diff-8.0.3.tgz", + "integrity": "sha512-qejHi7bcSD4hQAZE0tNAawRK1ZtafHDmMTMkrrIGgSLl7hTnQHmKCeB45xAcbfTqK2zowkM3j3bHt/4b/ARbYQ==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/dlv": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", + "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", + "license": "MIT" + }, + "node_modules/dom-accessibility-api": { + "version": "0.5.16", + "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.5.16.tgz", + "integrity": "sha512-X7BJ2yElsnOJ30pZF4uIIDfBEVgF4XEBxL9Bxhy6dnrm5hkzqmsWHGTiHqRiITNhMyFLyAiWndIJP7Z1NTteDg==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.302", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.302.tgz", + "integrity": "sha512-sM6HAN2LyK82IyPBpznDRqlTQAtuSaO+ShzFiWTvoMJLHyZ+Y39r8VMfHzwbU8MVBzQ4Wdn85+wlZl2TLGIlwg==", + "license": "ISC" + }, + "node_modules/entities": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz", + "integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-module-lexer": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", + "dev": true, + "license": "MIT" + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-toolkit": { + "version": "1.44.0", + "resolved": "https://registry.npmjs.org/es-toolkit/-/es-toolkit-1.44.0.tgz", + "integrity": "sha512-6penXeZalaV88MM3cGkFZZfOoLGWshWWfdy0tWw/RlVVyhvMaWSBTOvXNeiW3e5FwdS5ePW0LGEu17zT139ktg==", + "license": "MIT", + "workspaces": [ + "docs", + "benchmarks" + ] + }, + "node_modules/esbuild": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.3.tgz", + "integrity": "sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg==", + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.27.3", + "@esbuild/android-arm": "0.27.3", + "@esbuild/android-arm64": "0.27.3", + "@esbuild/android-x64": "0.27.3", + "@esbuild/darwin-arm64": "0.27.3", + "@esbuild/darwin-x64": "0.27.3", + "@esbuild/freebsd-arm64": "0.27.3", + "@esbuild/freebsd-x64": "0.27.3", + "@esbuild/linux-arm": "0.27.3", + "@esbuild/linux-arm64": "0.27.3", + "@esbuild/linux-ia32": "0.27.3", + "@esbuild/linux-loong64": "0.27.3", + "@esbuild/linux-mips64el": "0.27.3", + "@esbuild/linux-ppc64": "0.27.3", + "@esbuild/linux-riscv64": "0.27.3", + "@esbuild/linux-s390x": "0.27.3", + "@esbuild/linux-x64": "0.27.3", + "@esbuild/netbsd-arm64": "0.27.3", + "@esbuild/netbsd-x64": "0.27.3", + "@esbuild/openbsd-arm64": "0.27.3", + "@esbuild/openbsd-x64": "0.27.3", + "@esbuild/openharmony-arm64": "0.27.3", + "@esbuild/sunos-x64": "0.27.3", + "@esbuild/win32-arm64": "0.27.3", + "@esbuild/win32-ia32": "0.27.3", + "@esbuild/win32-x64": "0.27.3" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "9.39.3", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.39.3.tgz", + "integrity": "sha512-VmQ+sifHUbI/IcSopBCF/HO3YiHQx/AVd3UVyYL6weuwW+HvON9VYn5l6Zl1WZzPWXPNZrSQpxwkkZ/VuvJZzg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.8.0", + "@eslint-community/regexpp": "^4.12.1", + "@eslint/config-array": "^0.21.1", + "@eslint/config-helpers": "^0.4.2", + "@eslint/core": "^0.17.0", + "@eslint/eslintrc": "^3.3.1", + "@eslint/js": "9.39.3", + "@eslint/plugin-kit": "^0.4.1", + "@humanfs/node": "^0.16.6", + "@humanwhocodes/module-importer": "^1.0.1", + "@humanwhocodes/retry": "^0.4.2", + "@types/estree": "^1.0.6", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.6", + "debug": "^4.3.2", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^8.4.0", + "eslint-visitor-keys": "^4.2.1", + "espree": "^10.4.0", + "esquery": "^1.5.0", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^8.0.0", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + }, + "peerDependencies": { + "jiti": "*" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + } + } + }, + "node_modules/eslint-plugin-react-hooks": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-7.0.1.tgz", + "integrity": "sha512-O0d0m04evaNzEPoSW+59Mezf8Qt0InfgGIBJnpC0h3NH/WjUAR7BIKUfysC6todmtiZ/A0oUVS8Gce0WhBrHsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.24.4", + "@babel/parser": "^7.24.4", + "hermes-parser": "^0.25.1", + "zod": "^3.25.0 || ^4.0.0", + "zod-validation-error": "^3.5.0 || ^4.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0" + } + }, + "node_modules/eslint-plugin-react-refresh": { + "version": "0.4.26", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.26.tgz", + "integrity": "sha512-1RETEylht2O6FM/MvgnyvT+8K21wLqDNg4qD51Zj3guhjt433XbnnkVttHMyaVyAFD03QSV4LPS5iE3VQmO7XQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "eslint": ">=8.40" + } + }, + "node_modules/eslint-scope": { + "version": "8.4.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz", + "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/espree": { + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", + "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.15.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "license": "BSD-2-Clause", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esquery": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz", + "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eventemitter3": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-5.0.4.tgz", + "integrity": "sha512-mlsTRyGaPBjPedk6Bvw+aqbsXDtoAyAzm5MO7JgU+yVRyMQ5O8bD4Kcci7BS85f93veegeCPkL8R4GLClnjLFw==", + "license": "MIT" + }, + "node_modules/expect-type": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz", + "integrity": "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-diff": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/fast-diff/-/fast-diff-1.3.0.tgz", + "integrity": "sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw==", + "license": "Apache-2.0" + }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fastest-levenshtein": { + "version": "1.0.16", + "resolved": "https://registry.npmjs.org/fastest-levenshtein/-/fastest-levenshtein-1.0.16.tgz", + "integrity": "sha512-eRnCtTTtGZFpQCwhJiUOuxPQWRXVKYDn0b2PeHfXL6/Zi53SLAzAHfVhVWK2AryC/WH05kGfxhFIPvTF0SXQzg==", + "license": "MIT", + "engines": { + "node": ">= 4.9.1" + } + }, + "node_modules/fastq": { + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.20.1.tgz", + "integrity": "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==", + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/file-entry-cache": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", + "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "flat-cache": "^4.0.0" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", + "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", + "dev": true, + "license": "MIT", + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.4" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/flatted": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", + "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + "dev": true, + "license": "ISC" + }, + "node_modules/follow-redirects": { + "version": "1.15.11", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", + "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/form-data": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz", + "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==", + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fraction.js": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-5.3.4.tgz", + "integrity": "sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "*" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/framer-motion": { + "version": "12.34.3", + "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-12.34.3.tgz", + "integrity": "sha512-v81ecyZKYO/DfpTwHivqkxSUBzvceOpoI+wLfgCgoUIKxlFKEXdg0oR9imxwXumT4SFy8vRk9xzJ5l3/Du/55Q==", + "license": "MIT", + "dependencies": { + "motion-dom": "^12.34.3", + "motion-utils": "^12.29.2", + "tslib": "^2.4.0" + }, + "peerDependencies": { + "@emotion/is-prop-valid": "*", + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + }, + "peerDependenciesMeta": { + "@emotion/is-prop-valid": { + "optional": true + }, + "react": { + "optional": true + }, + "react-dom": { + "optional": true + } + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/geist": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/geist/-/geist-1.7.0.tgz", + "integrity": "sha512-ZaoiZwkSf0DwwB1ncdLKp+ggAldqxl5L1+SXaNIBGkPAqcu+xjVJLxlf3/S8vLt9UHx1xu5fz3lbzKCj5iOVdQ==", + "license": "SIL OPEN FONT LICENSE", + "peerDependencies": { + "next": ">=13.2.0" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-nonce": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-nonce/-/get-nonce-1.0.1.tgz", + "integrity": "sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-tsconfig": { + "version": "4.13.6", + "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.13.6.tgz", + "integrity": "sha512-shZT/QMiSHc/YBLxxOkMtgSid5HFoauqCE3/exfsEcwg1WkeqjG+V40yBbBrsD+jW2HDXcs28xOfcbm2jI8Ddw==", + "license": "MIT", + "dependencies": { + "resolve-pkg-maps": "^1.0.0" + }, + "funding": { + "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/globals": { + "version": "16.5.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-16.5.0.tgz", + "integrity": "sha512-c/c15i26VrJ4IRt5Z89DnIzCGDn9EcebibhAOjw5ibqEHsE1wLUgkPn9RDmNcUKyU87GeaL633nyJ+pplFR2ZQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/goober": { + "version": "2.1.18", + "resolved": "https://registry.npmjs.org/goober/-/goober-2.1.18.tgz", + "integrity": "sha512-2vFqsaDVIT9Gz7N6kAL++pLpp41l3PfDuusHcjnGLfR6+huZkl6ziX+zgVC3ZxpqWhzH6pyDdGrCeDhMIvwaxw==", + "license": "MIT", + "peerDependencies": { + "csstype": "^3.0.10" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/hermes-estree": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/hermes-estree/-/hermes-estree-0.25.1.tgz", + "integrity": "sha512-0wUoCcLp+5Ev5pDW2OriHC2MJCbwLwuRx+gAqMTOkGKJJiBCLjtrvy4PWUGn6MIVefecRpzoOZ/UV6iGdOr+Cw==", + "dev": true, + "license": "MIT" + }, + "node_modules/hermes-parser": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/hermes-parser/-/hermes-parser-0.25.1.tgz", + "integrity": "sha512-6pEjquH3rqaI6cYAXYPcz9MS4rY6R4ngRgrgfDshRptUZIc3lw0MCIJIGDj9++mfySOuPTHB4nrSW99BCvOPIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "hermes-estree": "0.25.1" + } + }, + "node_modules/highlight.js": { + "version": "11.11.1", + "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-11.11.1.tgz", + "integrity": "sha512-Xwwo44whKBVCYoliBQwaPvtd/2tYFkRQtXDWj1nackaV2JPXx3L0+Jvd8/qCJ2p+ML0/XVkJ2q+Mr+UVdpJK5w==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/html-encoding-sniffer": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-6.0.0.tgz", + "integrity": "sha512-CV9TW3Y3f8/wT0BRFc1/KAVQ3TUHiXmaAb6VW9vtiMFf7SLoMd1PdAc4W3KFOFETBJUb90KatHqlsZMWV+R9Gg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@exodus/bytes": "^1.6.0" + }, + "engines": { + "node": "^20.19.0 || ^22.12.0 || >=24.0.0" + } + }, + "node_modules/http-proxy-agent": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/immer": { + "version": "10.2.0", + "resolved": "https://registry.npmjs.org/immer/-/immer-10.2.0.tgz", + "integrity": "sha512-d/+XTN3zfODyjr89gM3mPq1WNX2B8pYsu7eORitdwyA2sBubnTl3laYlBk4sXY5FUa5qTZGBDPJICVbvqzjlbw==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/immer" + } + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/internmap": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz", + "integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-potential-custom-element-name": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz", + "integrity": "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/isbot": { + "version": "5.1.35", + "resolved": "https://registry.npmjs.org/isbot/-/isbot-5.1.35.tgz", + "integrity": "sha512-waFfC72ZNfwLLuJ2iLaoVaqcNo+CAaLR7xCpAn0Y5WfGzkNHv7ZN39Vbi1y+kb+Zs46XHOX3tZNExroFUPX+Kg==", + "license": "Unlicense", + "engines": { + "node": ">=18" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsdom": { + "version": "28.1.0", + "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-28.1.0.tgz", + "integrity": "sha512-0+MoQNYyr2rBHqO1xilltfDjV9G7ymYGlAUazgcDLQaUf8JDHbuGwsxN6U9qWaElZ4w1B2r7yEGIL3GdeW3Rug==", + "dev": true, + "license": "MIT", + "dependencies": { + "@acemir/cssom": "^0.9.31", + "@asamuzakjp/dom-selector": "^6.8.1", + "@bramus/specificity": "^2.4.2", + "@exodus/bytes": "^1.11.0", + "cssstyle": "^6.0.1", + "data-urls": "^7.0.0", + "decimal.js": "^10.6.0", + "html-encoding-sniffer": "^6.0.0", + "http-proxy-agent": "^7.0.2", + "https-proxy-agent": "^7.0.6", + "is-potential-custom-element-name": "^1.0.1", + "parse5": "^8.0.0", + "saxes": "^6.0.0", + "symbol-tree": "^3.2.4", + "tough-cookie": "^6.0.0", + "undici": "^7.21.0", + "w3c-xmlserializer": "^5.0.0", + "webidl-conversions": "^8.0.1", + "whatwg-mimetype": "^5.0.0", + "whatwg-url": "^16.0.0", + "xml-name-validator": "^5.0.0" + }, + "engines": { + "node": "^20.19.0 || ^22.12.0 || >=24.0.0" + }, + "peerDependencies": { + "canvas": "^3.0.0" + }, + "peerDependenciesMeta": { + "canvas": { + "optional": true + } + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/leaflet": { + "version": "1.9.4", + "resolved": "https://registry.npmjs.org/leaflet/-/leaflet-1.9.4.tgz", + "integrity": "sha512-nxS1ynzJOmOlHp+iL3FyWqK89GtNL8U8rvlMOsQdTTssxZwCXh8N2NB3GDQOL+YR3XnWyZAxwQixURb+FA74PA==", + "license": "BSD-2-Clause" + }, + "node_modules/leaflet.markercluster": { + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/leaflet.markercluster/-/leaflet.markercluster-1.5.3.tgz", + "integrity": "sha512-vPTw/Bndq7eQHjLBVlWpnGeLa3t+3zGiuM7fJwCkiMFq+nmRuG3RI3f7f4N4TDX7T4NpbAXpR2+NTRSEGfCSeA==", + "license": "MIT", + "peerDependencies": { + "leaflet": "^1.3.1" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lilconfig": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.3.tgz", + "integrity": "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==", + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "license": "MIT" + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/lowlight": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/lowlight/-/lowlight-3.3.0.tgz", + "integrity": "sha512-0JNhgFoPvP6U6lE/UdVsSq99tn6DhjjpAj5MxG49ewd2mOBVtwWYIT8ClyABhq198aXXODMU6Ox8DrGy/CpTZQ==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "devlop": "^1.0.0", + "highlight.js": "~11.11.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/lucide-react": { + "version": "0.575.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.575.0.tgz", + "integrity": "sha512-VuXgKZrk0uiDlWjGGXmKV6MSk9Yy4l10qgVvzGn2AWBx1Ylt0iBexKOAoA6I7JO3m+M9oeovJd3yYENfkUbOeg==", + "license": "ISC", + "peerDependencies": { + "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/lz-string": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/lz-string/-/lz-string-1.5.0.tgz", + "integrity": "sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==", + "dev": true, + "license": "MIT", + "peer": true, + "bin": { + "lz-string": "bin/bin.js" + } + }, + "node_modules/magic-string": { + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/mdn-data": { + "version": "2.12.2", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.12.2.tgz", + "integrity": "sha512-IEn+pegP1aManZuckezWCO+XZQDplx1366JoVhTpMpBB1sPey/SbveZQUosKiKiGYjg1wH4pMlNgXbCiYgihQA==", + "dev": true, + "license": "CC0-1.0" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/micromatch/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/min-indent": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz", + "integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/minimatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.3.tgz", + "integrity": "sha512-M2GCs7Vk83NxkUyQV1bkABc4yxgz9kILhHImZiBPAZ9ybuvCb0/H7lEl5XvIg3g+9d4eNotkZA5IWwYl0tibaA==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/motion-dom": { + "version": "12.34.3", + "resolved": "https://registry.npmjs.org/motion-dom/-/motion-dom-12.34.3.tgz", + "integrity": "sha512-sYgFe+pR9aIM7o4fhs2aXtOI+oqlUd33N9Yoxcgo1Fv7M20sRkHtCmzE/VRNIcq7uNJ+qio+Xubt1FXH3pQ+eQ==", + "license": "MIT", + "dependencies": { + "motion-utils": "^12.29.2" + } + }, + "node_modules/motion-utils": { + "version": "12.29.2", + "resolved": "https://registry.npmjs.org/motion-utils/-/motion-utils-12.29.2.tgz", + "integrity": "sha512-G3kc34H2cX2gI63RqU+cZq+zWRRPSsNIOjpdl9TN4AQwC4sgwYPl/Q/Obf/d53nOm569T0fYK+tcoSV50BWx8A==", + "license": "MIT" + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/mz": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", + "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/next": { + "version": "16.1.6", + "resolved": "https://registry.npmjs.org/next/-/next-16.1.6.tgz", + "integrity": "sha512-hkyRkcu5x/41KoqnROkfTm2pZVbKxvbZRuNvKXLRXxs3VfyO0WhY50TQS40EuKO9SW3rBj/sF3WbVwDACeMZyw==", + "license": "MIT", + "peer": true, + "dependencies": { + "@next/env": "16.1.6", + "@swc/helpers": "0.5.15", + "baseline-browser-mapping": "^2.8.3", + "caniuse-lite": "^1.0.30001579", + "postcss": "8.4.31", + "styled-jsx": "5.1.6" + }, + "bin": { + "next": "dist/bin/next" + }, + "engines": { + "node": ">=20.9.0" + }, + "optionalDependencies": { + "@next/swc-darwin-arm64": "16.1.6", + "@next/swc-darwin-x64": "16.1.6", + "@next/swc-linux-arm64-gnu": "16.1.6", + "@next/swc-linux-arm64-musl": "16.1.6", + "@next/swc-linux-x64-gnu": "16.1.6", + "@next/swc-linux-x64-musl": "16.1.6", + "@next/swc-win32-arm64-msvc": "16.1.6", + "@next/swc-win32-x64-msvc": "16.1.6", + "sharp": "^0.34.4" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.1.0", + "@playwright/test": "^1.51.1", + "babel-plugin-react-compiler": "*", + "react": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", + "react-dom": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", + "sass": "^1.3.0" + }, + "peerDependenciesMeta": { + "@opentelemetry/api": { + "optional": true + }, + "@playwright/test": { + "optional": true + }, + "babel-plugin-react-compiler": { + "optional": true + }, + "sass": { + "optional": true + } + } + }, + "node_modules/next/node_modules/postcss": { + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "peer": true, + "dependencies": { + "nanoid": "^3.3.6", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/node-releases": { + "version": "2.0.27", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", + "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==", + "license": "MIT" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-hash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz", + "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/obug": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/obug/-/obug-2.1.1.tgz", + "integrity": "sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ==", + "dev": true, + "funding": [ + "https://github.com/sponsors/sxzz", + "https://opencollective.com/debug" + ], + "license": "MIT" + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse5": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-8.0.0.tgz", + "integrity": "sha512-9m4m5GSgXjL4AjumKzq1Fgfp3Z8rsvjRNbnkVwfu2ImRqE5D0LnY2QfDen18FSY9C573YU5XxSapdHZTZ2WolA==", + "dev": true, + "license": "MIT", + "dependencies": { + "entities": "^6.0.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "license": "MIT" + }, + "node_modules/pathe": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pirates": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", + "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/playwright": { + "version": "1.58.2", + "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.58.2.tgz", + "integrity": "sha512-vA30H8Nvkq/cPBnNw4Q8TWz1EJyqgpuinBcHET0YVJVFldr8JDNiU9LaWAE1KqSkRYazuaBhTpB5ZzShOezQ6A==", + "devOptional": true, + "license": "Apache-2.0", + "dependencies": { + "playwright-core": "1.58.2" + }, + "bin": { + "playwright": "cli.js" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "fsevents": "2.3.2" + } + }, + "node_modules/playwright-core": { + "version": "1.58.2", + "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.58.2.tgz", + "integrity": "sha512-yZkEtftgwS8CsfYo7nm0KE8jsvm6i/PTgVtB8DL726wNf6H2IMsDuxCpJj59KDaxCtSnrWan2AeDqM7JBaultg==", + "devOptional": true, + "license": "Apache-2.0", + "bin": { + "playwright-core": "cli.js" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/playwright/node_modules/fsevents": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", + "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/postcss": { + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-import": { + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz", + "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.0.0", + "read-cache": "^1.0.0", + "resolve": "^1.1.7" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "postcss": "^8.0.0" + } + }, + "node_modules/postcss-js": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.1.0.tgz", + "integrity": "sha512-oIAOTqgIo7q2EOwbhb8UalYePMvYoIeRY2YKntdpFQXNosSu3vLrniGgmH9OKs/qAkfoj5oB3le/7mINW1LCfw==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "camelcase-css": "^2.0.1" + }, + "engines": { + "node": "^12 || ^14 || >= 16" + }, + "peerDependencies": { + "postcss": "^8.4.21" + } + }, + "node_modules/postcss-load-config": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-6.0.1.tgz", + "integrity": "sha512-oPtTM4oerL+UXmx+93ytZVN82RrlY/wPUV8IeDxFrzIjXOLF1pN+EmKPLbubvKHT2HC20xXsCAH2Z+CKV6Oz/g==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "lilconfig": "^3.1.1" + }, + "engines": { + "node": ">= 18" + }, + "peerDependencies": { + "jiti": ">=1.21.0", + "postcss": ">=8.0.9", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + }, + "postcss": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/postcss-nested": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.2.0.tgz", + "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.1.1" + }, + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "postcss": "^8.2.14" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz", + "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", + "license": "MIT" + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/prettier": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.8.1.tgz", + "integrity": "sha512-UOnG6LftzbdaHZcKoPFtOcCKztrQ57WkHDeRD9t/PTQtmT0NHSeWWepj6pS0z/N7+08BHFDQVUrfmfMRcZwbMg==", + "license": "MIT", + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, + "node_modules/pretty-format": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-27.5.1.tgz", + "integrity": "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "ansi-regex": "^5.0.1", + "ansi-styles": "^5.0.0", + "react-is": "^17.0.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/pretty-format/node_modules/react-is": { + "version": "17.0.2", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", + "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", + "license": "MIT" + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/react": { + "version": "19.2.4", + "resolved": "https://registry.npmjs.org/react/-/react-19.2.4.tgz", + "integrity": "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "19.2.4", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.4.tgz", + "integrity": "sha512-AXJdLo8kgMbimY95O2aKQqsz2iWi9jMgKJhRBAxECE4IFxfcazB2LmzloIoibJI3C12IlY20+KFaLv+71bUJeQ==", + "license": "MIT", + "dependencies": { + "scheduler": "^0.27.0" + }, + "peerDependencies": { + "react": "^19.2.4" + } + }, + "node_modules/react-is": { + "version": "19.2.4", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-19.2.4.tgz", + "integrity": "sha512-W+EWGn2v0ApPKgKKCy/7s7WHXkboGcsrXE+2joLyVxkbyVQfO3MUEaUQDHoSmb8TFFrSKYa9mw64WZHNHSDzYA==", + "license": "MIT", + "peer": true + }, + "node_modules/react-leaflet": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/react-leaflet/-/react-leaflet-5.0.0.tgz", + "integrity": "sha512-CWbTpr5vcHw5bt9i4zSlPEVQdTVcML390TjeDG0cK59z1ylexpqC6M1PJFjV8jD7CF+ACBFsLIDs6DRMoLEofw==", + "license": "Hippocratic-2.1", + "dependencies": { + "@react-leaflet/core": "^3.0.0" + }, + "peerDependencies": { + "leaflet": "^1.9.0", + "react": "^19.0.0", + "react-dom": "^19.0.0" + } + }, + "node_modules/react-leaflet-cluster": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/react-leaflet-cluster/-/react-leaflet-cluster-4.0.0.tgz", + "integrity": "sha512-Lu75+KOu2ruGyAx8LoCQvlHuw+3CLLJQGEoSk01ymsDN/YnCiRV6ChkpsvaruVyYBPzUHwiskFw4Jo7WHj5qNw==", + "license": "SEE LICENSE IN ", + "dependencies": { + "leaflet.markercluster": "^1.5.3" + }, + "peerDependencies": { + "@react-leaflet/core": "^3.0.0", + "leaflet": "^1.9.0", + "react": "^19.0.0", + "react-dom": "^19.0.0", + "react-leaflet": "^5.0.0" + } + }, + "node_modules/react-redux": { + "version": "9.2.0", + "resolved": "https://registry.npmjs.org/react-redux/-/react-redux-9.2.0.tgz", + "integrity": "sha512-ROY9fvHhwOD9ySfrF0wmvu//bKCQ6AeZZq1nJNtbDC+kk5DuSuNX/n6YWYF/SYy7bSba4D4FSz8DJeKY/S/r+g==", + "license": "MIT", + "dependencies": { + "@types/use-sync-external-store": "^0.0.6", + "use-sync-external-store": "^1.4.0" + }, + "peerDependencies": { + "@types/react": "^18.2.25 || ^19", + "react": "^18.0 || ^19", + "redux": "^5.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "redux": { + "optional": true + } + } + }, + "node_modules/react-refresh": { + "version": "0.18.0", + "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.18.0.tgz", + "integrity": "sha512-QgT5//D3jfjJb6Gsjxv0Slpj23ip+HtOpnNgnb2S5zU3CB26G/IDPGoy4RJB42wzFE46DRsstbW6tKHoKbhAxw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-remove-scroll": { + "version": "2.7.2", + "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.7.2.tgz", + "integrity": "sha512-Iqb9NjCCTt6Hf+vOdNIZGdTiH1QSqr27H/Ek9sv/a97gfueI/5h1s3yRi1nngzMUaOOToin5dI1dXKdXiF+u0Q==", + "license": "MIT", + "dependencies": { + "react-remove-scroll-bar": "^2.3.7", + "react-style-singleton": "^2.2.3", + "tslib": "^2.1.0", + "use-callback-ref": "^1.3.3", + "use-sidecar": "^1.1.3" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-remove-scroll-bar": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.8.tgz", + "integrity": "sha512-9r+yi9+mgU33AKcj6IbT9oRCO78WriSj6t/cF8DWBZJ9aOGPOTEDvdUDz1FwKim7QXWwmHqtdHnRJfhAxEG46Q==", + "license": "MIT", + "dependencies": { + "react-style-singleton": "^2.2.2", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-style-singleton": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.3.tgz", + "integrity": "sha512-b6jSvxvVnyptAiLjbkWLE/lOnR4lfTtDAl+eUC7RZy+QQWc6wRzIV2CE6xBuMmDxc2qIihtDCZD5NPOFl7fRBQ==", + "license": "MIT", + "dependencies": { + "get-nonce": "^1.0.0", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/reactflow": { + "version": "11.11.4", + "resolved": "https://registry.npmjs.org/reactflow/-/reactflow-11.11.4.tgz", + "integrity": "sha512-70FOtJkUWH3BAOsN+LU9lCrKoKbtOPnz2uq0CV2PLdNSwxTXOhCbsZr50GmZ+Rtw3jx8Uv7/vBFtCGixLfd4Og==", + "license": "MIT", + "dependencies": { + "@reactflow/background": "11.3.14", + "@reactflow/controls": "11.2.14", + "@reactflow/core": "11.11.4", + "@reactflow/minimap": "11.7.14", + "@reactflow/node-resizer": "2.2.14", + "@reactflow/node-toolbar": "1.3.14" + }, + "peerDependencies": { + "react": ">=17", + "react-dom": ">=17" + } + }, + "node_modules/reactivity-store": { + "version": "0.3.12", + "resolved": "https://registry.npmjs.org/reactivity-store/-/reactivity-store-0.3.12.tgz", + "integrity": "sha512-Idz9EL4dFUtQbHySZQzckWOTUfqjdYpUtNW0iOysC32mG7IjiUGB77QrsyR5eAWBkRiS9JscF6A3fuQAIy+LrQ==", + "license": "MIT", + "dependencies": { + "@vue/reactivity": "~3.5.22", + "@vue/shared": "~3.5.22", + "use-sync-external-store": "^1.6.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/read-cache": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", + "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==", + "license": "MIT", + "dependencies": { + "pify": "^2.3.0" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/readdirp/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/recast": { + "version": "0.23.11", + "resolved": "https://registry.npmjs.org/recast/-/recast-0.23.11.tgz", + "integrity": "sha512-YTUo+Flmw4ZXiWfQKGcwwc11KnoRAYgzAE2E7mXKCjSviTKShtxBsN6YUUBB2gtaBzKzeKunxhUwNHQuRryhWA==", + "license": "MIT", + "dependencies": { + "ast-types": "^0.16.1", + "esprima": "~4.0.0", + "source-map": "~0.6.1", + "tiny-invariant": "^1.3.3", + "tslib": "^2.0.1" + }, + "engines": { + "node": ">= 4" + } + }, + "node_modules/recast/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/recharts": { + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/recharts/-/recharts-3.7.0.tgz", + "integrity": "sha512-l2VCsy3XXeraxIID9fx23eCb6iCBsxUQDnE8tWm6DFdszVAO7WVY/ChAD9wVit01y6B2PMupYiMmQwhgPHc9Ew==", + "license": "MIT", + "workspaces": [ + "www" + ], + "dependencies": { + "@reduxjs/toolkit": "1.x.x || 2.x.x", + "clsx": "^2.1.1", + "decimal.js-light": "^2.5.1", + "es-toolkit": "^1.39.3", + "eventemitter3": "^5.0.1", + "immer": "^10.1.1", + "react-redux": "8.x.x || 9.x.x", + "reselect": "5.1.1", + "tiny-invariant": "^1.3.3", + "use-sync-external-store": "^1.2.2", + "victory-vendor": "^37.0.2" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-is": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/redent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz", + "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==", + "dev": true, + "license": "MIT", + "dependencies": { + "indent-string": "^4.0.0", + "strip-indent": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/redux": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/redux/-/redux-5.0.1.tgz", + "integrity": "sha512-M9/ELqF6fy8FwmkpnF0S3YKOqMyoWJ4+CS5Efg2ct3oY9daQvd/Pc71FpGZsVsbl3Cpb+IIcjBDUnnyBdQbq4w==", + "license": "MIT" + }, + "node_modules/redux-thunk": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/redux-thunk/-/redux-thunk-3.1.0.tgz", + "integrity": "sha512-NW2r5T6ksUKXCabzhL9z+h206HQw/NJkcLm1GPImRQ8IzfXwRGqjVhKJGauHirT0DAuyy6hjdnMZaRoAcy0Klw==", + "license": "MIT", + "peerDependencies": { + "redux": "^5.0.0" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/reselect": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/reselect/-/reselect-5.1.1.tgz", + "integrity": "sha512-K/BG6eIky/SBpzfHZv/dd+9JBFiS4SWV7FIujVyJRux6e45+73RaUHXLmIR1f7WOMaQ0U1km6qwklRQxpJJY0w==", + "license": "MIT" + }, + "node_modules/resolve": { + "version": "1.22.11", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", + "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/resolve-pkg-maps": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz", + "integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==", + "license": "MIT", + "funding": { + "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1" + } + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rollup": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.59.0.tgz", + "integrity": "sha512-2oMpl67a3zCH9H79LeMcbDhXW/UmWG/y2zuqnF2jQq5uq9TbM9TVyXvA4+t+ne2IIkBdrLpAaRQAvo7YI/Yyeg==", + "devOptional": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.59.0", + "@rollup/rollup-android-arm64": "4.59.0", + "@rollup/rollup-darwin-arm64": "4.59.0", + "@rollup/rollup-darwin-x64": "4.59.0", + "@rollup/rollup-freebsd-arm64": "4.59.0", + "@rollup/rollup-freebsd-x64": "4.59.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.59.0", + "@rollup/rollup-linux-arm-musleabihf": "4.59.0", + "@rollup/rollup-linux-arm64-gnu": "4.59.0", + "@rollup/rollup-linux-arm64-musl": "4.59.0", + "@rollup/rollup-linux-loong64-gnu": "4.59.0", + "@rollup/rollup-linux-loong64-musl": "4.59.0", + "@rollup/rollup-linux-ppc64-gnu": "4.59.0", + "@rollup/rollup-linux-ppc64-musl": "4.59.0", + "@rollup/rollup-linux-riscv64-gnu": "4.59.0", + "@rollup/rollup-linux-riscv64-musl": "4.59.0", + "@rollup/rollup-linux-s390x-gnu": "4.59.0", + "@rollup/rollup-linux-x64-gnu": "4.59.0", + "@rollup/rollup-linux-x64-musl": "4.59.0", + "@rollup/rollup-openbsd-x64": "4.59.0", + "@rollup/rollup-openharmony-arm64": "4.59.0", + "@rollup/rollup-win32-arm64-msvc": "4.59.0", + "@rollup/rollup-win32-ia32-msvc": "4.59.0", + "@rollup/rollup-win32-x64-gnu": "4.59.0", + "@rollup/rollup-win32-x64-msvc": "4.59.0", + "fsevents": "~2.3.2" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/saxes": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/saxes/-/saxes-6.0.0.tgz", + "integrity": "sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==", + "dev": true, + "license": "ISC", + "dependencies": { + "xmlchars": "^2.2.0" + }, + "engines": { + "node": ">=v12.22.7" + } + }, + "node_modules/scheduler": { + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz", + "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==", + "license": "MIT" + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/seroval": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/seroval/-/seroval-1.5.0.tgz", + "integrity": "sha512-OE4cvmJ1uSPrKorFIH9/w/Qwuvi/IMcGbv5RKgcJ/zjA/IohDLU6SVaxFN9FwajbP7nsX0dQqMDes1whk3y+yw==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/seroval-plugins": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/seroval-plugins/-/seroval-plugins-1.5.0.tgz", + "integrity": "sha512-EAHqADIQondwRZIdeW2I636zgsODzoBDwb3PT/+7TLDWyw1Dy/Xv7iGUIEXXav7usHDE9HVhOU61irI3EnyyHA==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "seroval": "^1.0" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", + "dev": true, + "license": "ISC" + }, + "node_modules/sonner": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/sonner/-/sonner-2.0.7.tgz", + "integrity": "sha512-W6ZN4p58k8aDKA4XPcx2hpIQXBRAgyiWVkYhT7CvK6D3iAu7xjvVyhQHg2/iaKJZ1XVJ4r7XuwGL+WGEK37i9w==", + "license": "MIT", + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0 || ^19.0.0-rc", + "react-dom": "^18.0.0 || ^19.0.0 || ^19.0.0-rc" + } + }, + "node_modules/source-map": { + "version": "0.7.6", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.6.tgz", + "integrity": "sha512-i5uvt8C3ikiWeNZSVZNWcfZPItFQOsYTUAOkcUPGd8DqDy1uOUikjt5dG+uRlwyvR108Fb9DOd4GvXfT0N2/uQ==", + "license": "BSD-3-Clause", + "engines": { + "node": ">= 12" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", + "dev": true, + "license": "MIT" + }, + "node_modules/std-env": { + "version": "3.10.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz", + "integrity": "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==", + "dev": true, + "license": "MIT" + }, + "node_modules/strip-indent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz", + "integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "min-indent": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/styled-jsx": { + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.6.tgz", + "integrity": "sha512-qSVyDTeMotdvQYoHWLNGwRFJHC+i+ZvdBRYosOFgC+Wg1vx4frN2/RG/NA7SYqqvKNLf39P2LSRA2pu6n0XYZA==", + "license": "MIT", + "peer": true, + "dependencies": { + "client-only": "0.0.1" + }, + "engines": { + "node": ">= 12.0.0" + }, + "peerDependencies": { + "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0 || ^19.0.0-0" + }, + "peerDependenciesMeta": { + "@babel/core": { + "optional": true + }, + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/sucrase": { + "version": "3.35.1", + "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.1.tgz", + "integrity": "sha512-DhuTmvZWux4H1UOnWMB3sk0sbaCVOoQZjv8u1rDoTV0HTdGem9hkAZtl4JZy8P2z4Bg0nT+YMeOFyVr4zcG5Tw==", + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", + "commander": "^4.0.0", + "lines-and-columns": "^1.1.6", + "mz": "^2.7.0", + "pirates": "^4.0.1", + "tinyglobby": "^0.2.11", + "ts-interface-checker": "^0.1.9" + }, + "bin": { + "sucrase": "bin/sucrase", + "sucrase-node": "bin/sucrase-node" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/symbol-tree": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz", + "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==", + "dev": true, + "license": "MIT" + }, + "node_modules/tailwind-merge": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-3.5.0.tgz", + "integrity": "sha512-I8K9wewnVDkL1NTGoqWmVEIlUcB9gFriAEkXkfCjX5ib8ezGxtR3xD7iZIxrfArjEsH7F1CHD4RFUtxefdqV/A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/dcastil" + } + }, + "node_modules/tailwindcss": { + "version": "3.4.19", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.19.tgz", + "integrity": "sha512-3ofp+LL8E+pK/JuPLPggVAIaEuhvIz4qNcf3nA1Xn2o/7fb7s/TYpHhwGDv1ZU3PkBluUVaF8PyCHcm48cKLWQ==", + "license": "MIT", + "dependencies": { + "@alloc/quick-lru": "^5.2.0", + "arg": "^5.0.2", + "chokidar": "^3.6.0", + "didyoumean": "^1.2.2", + "dlv": "^1.1.3", + "fast-glob": "^3.3.2", + "glob-parent": "^6.0.2", + "is-glob": "^4.0.3", + "jiti": "^1.21.7", + "lilconfig": "^3.1.3", + "micromatch": "^4.0.8", + "normalize-path": "^3.0.0", + "object-hash": "^3.0.0", + "picocolors": "^1.1.1", + "postcss": "^8.4.47", + "postcss-import": "^15.1.0", + "postcss-js": "^4.0.1", + "postcss-load-config": "^4.0.2 || ^5.0 || ^6.0", + "postcss-nested": "^6.2.0", + "postcss-selector-parser": "^6.1.2", + "resolve": "^1.22.8", + "sucrase": "^3.35.0" + }, + "bin": { + "tailwind": "lib/cli.js", + "tailwindcss": "lib/cli.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tailwindcss/node_modules/jiti": { + "version": "1.21.7", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz", + "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", + "license": "MIT", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/thenify": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", + "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0" + } + }, + "node_modules/thenify-all": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", + "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", + "license": "MIT", + "dependencies": { + "thenify": ">= 3.1.0 < 4" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/tiny-invariant": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz", + "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==", + "license": "MIT" + }, + "node_modules/tiny-warning": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", + "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==", + "license": "MIT" + }, + "node_modules/tinybench": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", + "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyexec": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-1.0.2.tgz", + "integrity": "sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinyrainbow": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-3.0.3.tgz", + "integrity": "sha512-PSkbLUoxOFRzJYjjxHJt9xro7D+iilgMX/C9lawzVuYiIdcihh9DXmVibBe8lmcFrRi/VzlPjBxbN7rH24q8/Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tldts": { + "version": "7.0.23", + "resolved": "https://registry.npmjs.org/tldts/-/tldts-7.0.23.tgz", + "integrity": "sha512-ASdhgQIBSay0R/eXggAkQ53G4nTJqTXqC2kbaBbdDwM7SkjyZyO0OaaN1/FH7U/yCeqOHDwFO5j8+Os/IS1dXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "tldts-core": "^7.0.23" + }, + "bin": { + "tldts": "bin/cli.js" + } + }, + "node_modules/tldts-core": { + "version": "7.0.23", + "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-7.0.23.tgz", + "integrity": "sha512-0g9vrtDQLrNIiCj22HSe9d4mLVG3g5ph5DZ8zCKBr4OtrspmNB6ss7hVyzArAeE88ceZocIEGkyW1Ime7fxPtQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tough-cookie": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-6.0.0.tgz", + "integrity": "sha512-kXuRi1mtaKMrsLUxz3sQYvVl37B0Ns6MzfrtV5DvJceE9bPyspOqk9xxv7XbZWcfLWbFmm997vl83qUWVJA64w==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "tldts": "^7.0.5" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/tr46": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-6.0.0.tgz", + "integrity": "sha512-bLVMLPtstlZ4iMQHpFHTR7GAGj2jxi8Dg0s2h2MafAE4uSWF98FC/3MomU51iQAMf8/qDUbKWf5GxuvvVcXEhw==", + "dev": true, + "license": "MIT", + "dependencies": { + "punycode": "^2.3.1" + }, + "engines": { + "node": ">=20" + } + }, + "node_modules/ts-api-utils": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.4.0.tgz", + "integrity": "sha512-3TaVTaAv2gTiMB35i3FiGJaRfwb3Pyn/j3m/bfAvGe8FB7CF6u+LMYqYlDh7reQf7UNvoTvdfAqHGmPGOSsPmA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.12" + }, + "peerDependencies": { + "typescript": ">=4.8.4" + } + }, + "node_modules/ts-interface-checker": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", + "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", + "license": "Apache-2.0" + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" + }, + "node_modules/tsx": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/tsx/-/tsx-4.21.0.tgz", + "integrity": "sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==", + "license": "MIT", + "dependencies": { + "esbuild": "~0.27.0", + "get-tsconfig": "^4.7.5" + }, + "bin": { + "tsx": "dist/cli.mjs" + }, + "engines": { + "node": ">=18.0.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + } + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/typescript-eslint": { + "version": "8.56.1", + "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.56.1.tgz", + "integrity": "sha512-U4lM6pjmBX7J5wk4szltF7I1cGBHXZopnAXCMXb3+fZ3B/0Z3hq3wS/CCUB2NZBNAExK92mCU2tEohWuwVMsDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/eslint-plugin": "8.56.1", + "@typescript-eslint/parser": "8.56.1", + "@typescript-eslint/typescript-estree": "8.56.1", + "@typescript-eslint/utils": "8.56.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/undici": { + "version": "7.22.0", + "resolved": "https://registry.npmjs.org/undici/-/undici-7.22.0.tgz", + "integrity": "sha512-RqslV2Us5BrllB+JeiZnK4peryVTndy9Dnqq62S3yYRRTj0tFQCwEniUy2167skdGOy3vqRzEvl1Dm4sV2ReDg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=20.18.1" + } + }, + "node_modules/undici-types": { + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.16.0.tgz", + "integrity": "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==", + "dev": true, + "license": "MIT" + }, + "node_modules/unplugin": { + "version": "2.3.11", + "resolved": "https://registry.npmjs.org/unplugin/-/unplugin-2.3.11.tgz", + "integrity": "sha512-5uKD0nqiYVzlmCRs01Fhs2BdkEgBS3SAVP6ndrBsuK42iC2+JHyxM05Rm9G8+5mkmRtzMZGY8Ct5+mliZxU/Ww==", + "license": "MIT", + "dependencies": { + "@jridgewell/remapping": "^2.3.5", + "acorn": "^8.15.0", + "picomatch": "^4.0.3", + "webpack-virtual-modules": "^0.6.2" + }, + "engines": { + "node": ">=18.12.0" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/use-callback-ref": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.3.tgz", + "integrity": "sha512-jQL3lRnocaFtu3V00JToYz/4QkNWswxijDaCVNZRiRTO3HQDLsdu1ZtmIUvV4yPp+rvWm5j0y0TG/S61cuijTg==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-sidecar": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.3.tgz", + "integrity": "sha512-Fedw0aZvkhynoPYlA5WXrMCAMm+nSWdZt6lzJQ7Ok8S6Q+VsHmHpRWndVRJ8Be0ZbkfPc5LRYH+5XrzXcEeLRQ==", + "license": "MIT", + "dependencies": { + "detect-node-es": "^1.1.0", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-sync-external-store": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.6.0.tgz", + "integrity": "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==", + "license": "MIT", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "license": "MIT" + }, + "node_modules/victory-vendor": { + "version": "37.3.6", + "resolved": "https://registry.npmjs.org/victory-vendor/-/victory-vendor-37.3.6.tgz", + "integrity": "sha512-SbPDPdDBYp+5MJHhBCAyI7wKM3d5ivekigc2Dk2s7pgbZ9wIgIBYGVw4zGHBml/qTFbexrofXW6Gu4noGxrOwQ==", + "license": "MIT AND ISC", + "dependencies": { + "@types/d3-array": "^3.0.3", + "@types/d3-ease": "^3.0.0", + "@types/d3-interpolate": "^3.0.1", + "@types/d3-scale": "^4.0.2", + "@types/d3-shape": "^3.1.0", + "@types/d3-time": "^3.0.0", + "@types/d3-timer": "^3.0.0", + "d3-array": "^3.1.6", + "d3-ease": "^3.0.1", + "d3-interpolate": "^3.0.1", + "d3-scale": "^4.0.2", + "d3-shape": "^3.1.0", + "d3-time": "^3.0.0", + "d3-timer": "^3.0.1" + } + }, + "node_modules/vite": { + "version": "7.3.1", + "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.1.tgz", + "integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==", + "devOptional": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.27.0", + "fdir": "^6.5.0", + "picomatch": "^4.0.3", + "postcss": "^8.5.6", + "rollup": "^4.43.0", + "tinyglobby": "^0.2.15" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^20.19.0 || >=22.12.0", + "jiti": ">=1.21.0", + "less": "^4.0.0", + "lightningcss": "^1.21.0", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/vite-plugin-sri3": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/vite-plugin-sri3/-/vite-plugin-sri3-1.3.0.tgz", + "integrity": "sha512-wOdmXQhKQzwNOeUfsPoi7Zz3bh6KXkjup5t/n3bbOo8ITOIgrpwTi//8YwByTy8UeGTz6AHInVFQSoywka0dEQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.0.0" + }, + "peerDependencies": { + "vite": "^2 || ^3 || ^4 || ^5 || ^6 || ^7" + } + }, + "node_modules/vitest": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-4.0.18.tgz", + "integrity": "sha512-hOQuK7h0FGKgBAas7v0mSAsnvrIgAvWmRFjmzpJ7SwFHH3g1k2u37JtYwOwmEKhK6ZO3v9ggDBBm0La1LCK4uQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/expect": "4.0.18", + "@vitest/mocker": "4.0.18", + "@vitest/pretty-format": "4.0.18", + "@vitest/runner": "4.0.18", + "@vitest/snapshot": "4.0.18", + "@vitest/spy": "4.0.18", + "@vitest/utils": "4.0.18", + "es-module-lexer": "^1.7.0", + "expect-type": "^1.2.2", + "magic-string": "^0.30.21", + "obug": "^2.1.1", + "pathe": "^2.0.3", + "picomatch": "^4.0.3", + "std-env": "^3.10.0", + "tinybench": "^2.9.0", + "tinyexec": "^1.0.2", + "tinyglobby": "^0.2.15", + "tinyrainbow": "^3.0.3", + "vite": "^6.0.0 || ^7.0.0", + "why-is-node-running": "^2.3.0" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^20.0.0 || ^22.0.0 || >=24.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@opentelemetry/api": "^1.9.0", + "@types/node": "^20.0.0 || ^22.0.0 || >=24.0.0", + "@vitest/browser-playwright": "4.0.18", + "@vitest/browser-preview": "4.0.18", + "@vitest/browser-webdriverio": "4.0.18", + "@vitest/ui": "4.0.18", + "happy-dom": "*", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@opentelemetry/api": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser-playwright": { + "optional": true + }, + "@vitest/browser-preview": { + "optional": true + }, + "@vitest/browser-webdriverio": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + } + } + }, + "node_modules/w3c-xmlserializer": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/w3c-xmlserializer/-/w3c-xmlserializer-5.0.0.tgz", + "integrity": "sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "xml-name-validator": "^5.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/webidl-conversions": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-8.0.1.tgz", + "integrity": "sha512-BMhLD/Sw+GbJC21C/UgyaZX41nPt8bUTg+jWyDeg7e7YN4xOM05YPSIXceACnXVtqyEw/LMClUQMtMZ+PGGpqQ==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=20" + } + }, + "node_modules/webpack-virtual-modules": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/webpack-virtual-modules/-/webpack-virtual-modules-0.6.2.tgz", + "integrity": "sha512-66/V2i5hQanC51vBQKPH4aI8NMAcBW59FVBs+rC7eGHupMyfn34q7rZIE+ETlJ+XTevqfUhVVBgSUNSW2flEUQ==", + "license": "MIT" + }, + "node_modules/whatwg-mimetype": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-5.0.0.tgz", + "integrity": "sha512-sXcNcHOC51uPGF0P/D4NVtrkjSU2fNsm9iog4ZvZJsL3rjoDAzXZhkm2MWt1y+PUdggKAYVoMAIYcs78wJ51Cw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=20" + } + }, + "node_modules/whatwg-url": { + "version": "16.0.1", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-16.0.1.tgz", + "integrity": "sha512-1to4zXBxmXHV3IiSSEInrreIlu02vUOvrhxJJH5vcxYTBDAx51cqZiKdyTxlecdKNSjj8EcxGBxNf6Vg+945gw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@exodus/bytes": "^1.11.0", + "tr46": "^6.0.0", + "webidl-conversions": "^8.0.1" + }, + "engines": { + "node": "^20.19.0 || ^22.12.0 || >=24.0.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/why-is-node-running": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", + "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", + "dev": true, + "license": "MIT", + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/xml-name-validator": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-5.0.0.tgz", + "integrity": "sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18" + } + }, + "node_modules/xmlchars": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/xmlchars/-/xmlchars-2.2.0.tgz", + "integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==", + "dev": true, + "license": "MIT" + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "license": "ISC" + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zod": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/zod/-/zod-4.3.6.tgz", + "integrity": "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + }, + "node_modules/zod-validation-error": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/zod-validation-error/-/zod-validation-error-4.0.2.tgz", + "integrity": "sha512-Q6/nZLe6jxuU80qb/4uJ4t5v2VEZ44lzQjPDhYJNztRQ4wyWc6VF3D3Kb/fAuPetZQnhS3hnajCf9CsWesghLQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.0.0" + }, + "peerDependencies": { + "zod": "^3.25.0 || ^4.0.0" + } + }, + "node_modules/zustand": { + "version": "5.0.11", + "resolved": "https://registry.npmjs.org/zustand/-/zustand-5.0.11.tgz", + "integrity": "sha512-fdZY+dk7zn/vbWNCYmzZULHRrss0jx5pPFiOuMZ/5HJN6Yv3u+1Wswy/4MpZEkEGhtNH+pwxZB8OKgUBPzYAGg==", + "license": "MIT", + "engines": { + "node": ">=12.20.0" + }, + "peerDependencies": { + "@types/react": ">=18.0.0", + "immer": ">=9.0.6", + "react": ">=18.0.0", + "use-sync-external-store": ">=1.2.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "immer": { + "optional": true + }, + "react": { + "optional": true + }, + "use-sync-external-store": { + "optional": true + } + } + } + } +} diff --git a/frontend/package.json b/frontend/package.json new file mode 100644 index 0000000..7f3bb2b --- /dev/null +++ b/frontend/package.json @@ -0,0 +1,86 @@ +{ + "name": "frontend", + "private": true, + "version": "9.0.1", + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc -b && vite build", + "lint": "eslint .", + "preview": "vite preview", + "test": "vitest run", + "test:watch": "vitest", + "test:coverage": "vitest run --coverage", + "test:e2e": "playwright test", + "test:e2e:headed": "playwright test --headed" + }, + "dependencies": { + "@dagrejs/dagre": "^2.0.4", + "@git-diff-view/lowlight": "^0.0.39", + "@git-diff-view/react": "^0.0.39", + "@radix-ui/react-avatar": "^1.1.11", + "@radix-ui/react-checkbox": "^1.3.3", + "@radix-ui/react-dialog": "^1.1.15", + "@radix-ui/react-dropdown-menu": "^2.1.16", + "@radix-ui/react-label": "^2.1.8", + "@radix-ui/react-popover": "^1.1.15", + "@radix-ui/react-select": "^2.2.6", + "@radix-ui/react-separator": "^1.1.8", + "@radix-ui/react-slot": "^1.2.4", + "@radix-ui/react-tabs": "^1.1.13", + "@radix-ui/react-toast": "^1.2.15", + "@tanstack/react-query": "^5.90.21", + "@tanstack/react-router": "^1.161.3", + "@tanstack/react-router-devtools": "^1.161.3", + "@tanstack/react-virtual": "^3.13.19", + "@tanstack/router-plugin": "^1.161.3", + "@zxcvbn-ts/core": "^3.0.4", + "@zxcvbn-ts/language-common": "^3.0.4", + "@zxcvbn-ts/language-en": "^3.0.2", + "axios": "^1.13.5", + "class-variance-authority": "^0.7.1", + "clsx": "^2.1.1", + "cmdk": "^1.1.1", + "diff": "^8.0.3", + "framer-motion": "^12.34.3", + "geist": "^1.7.0", + "leaflet": "^1.9.4", + "lucide-react": "^0.575.0", + "react": "^19.2.0", + "react-dom": "^19.2.0", + "react-leaflet": "^5.0.0", + "react-leaflet-cluster": "^4.0.0", + "reactflow": "^11.11.4", + "recharts": "^3.7.0", + "sonner": "^2.0.7", + "tailwind-merge": "^3.5.0", + "tailwindcss": "^3.4.19", + "zod": "^4.3.6", + "zustand": "^5.0.11" + }, + "devDependencies": { + "@eslint/js": "^9.39.1", + "@playwright/test": "^1.58.2", + "@testing-library/jest-dom": "^6.9.1", + "@testing-library/react": "^16.3.2", + "@testing-library/user-event": "^14.6.1", + "@types/diff": "^7.0.2", + "@types/leaflet": "^1.9.21", + "@types/node": "^24.10.1", + "@types/react": "^19.2.7", + "@types/react-dom": "^19.2.3", + "@vitejs/plugin-react": "^5.1.1", + "autoprefixer": "^10.4.24", + "eslint": "^9.39.1", + "eslint-plugin-react-hooks": "^7.0.1", + "eslint-plugin-react-refresh": "^0.4.24", + "globals": "^16.5.0", + "jsdom": "^28.1.0", + "postcss": "^8.5.6", + "typescript": "~5.9.3", + "typescript-eslint": "^8.48.0", + "vite": "^7.3.1", + "vite-plugin-sri3": "^1.3.0", + "vitest": "^4.0.18" + } +} diff --git a/frontend/playwright.config.ts b/frontend/playwright.config.ts new file mode 100644 index 0000000..0d5365d --- /dev/null +++ b/frontend/playwright.config.ts @@ -0,0 +1,25 @@ +import { defineConfig } from '@playwright/test' + +export default defineConfig({ + testDir: './tests/e2e', + timeout: 30000, + expect: { timeout: 5000 }, + fullyParallel: false, // Run sequentially for stability + forbidOnly: !!process.env.CI, + retries: process.env.CI ? 2 : 0, + workers: 1, + reporter: 'html', + use: { + baseURL: process.env.PLAYWRIGHT_BASE_URL || 'http://localhost:5173', + trace: 'on-first-retry', + screenshot: 'only-on-failure', + }, + projects: [ + { name: 'setup', testMatch: /.*\.setup\.ts/ }, + { + name: 'chromium', + use: { browserName: 'chromium' }, + dependencies: ['setup'], + }, + ], +}) diff --git a/frontend/postcss.config.js b/frontend/postcss.config.js new file mode 100644 index 0000000..2e7af2b --- /dev/null +++ b/frontend/postcss.config.js @@ -0,0 +1,6 @@ +export default { + plugins: { + tailwindcss: {}, + autoprefixer: {}, + }, +} diff --git a/frontend/public/favicon.svg b/frontend/public/favicon.svg new file mode 100644 index 0000000..983f5b5 --- /dev/null +++ b/frontend/public/favicon.svg @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/frontend/public/vite.svg b/frontend/public/vite.svg new file mode 100644 index 0000000..e7b8dfb --- /dev/null +++ b/frontend/public/vite.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx new file mode 100644 index 0000000..8ee7510 --- /dev/null +++ b/frontend/src/App.tsx @@ -0,0 +1,45 @@ +import { RouterProvider, createRouter } from '@tanstack/react-router' +import { useEffect, useState } from 'react' +import { routeTree } from './routeTree.gen' +import { useAuth } from './lib/auth' +import { Skeleton } from './components/ui/skeleton' + +const router = createRouter({ + routeTree, + defaultPreload: 'intent', +}) + +declare module '@tanstack/react-router' { + interface Register { + router: typeof router + } +} + +function AppInner() { + const { checkAuth } = useAuth() + const [hasChecked, setHasChecked] = useState(false) + + useEffect(() => { + checkAuth().finally(() => setHasChecked(true)) + // eslint-disable-next-line react-hooks/exhaustive-deps + }, []) + + // Only show skeleton during initial auth check -- NOT on subsequent isLoading changes. + // Reacting to isLoading here would unmount the entire router tree (including LoginPage) + // every time an auth action sets isLoading, destroying all component local state. + if (!hasChecked) { + return ( +
+
+ + + +
+
+ ) + } + + return +} + +export default AppInner diff --git a/frontend/src/assets/fonts/Geist-Variable.woff2 b/frontend/src/assets/fonts/Geist-Variable.woff2 new file mode 100644 index 0000000..b2f0121 Binary files /dev/null and b/frontend/src/assets/fonts/Geist-Variable.woff2 differ diff --git a/frontend/src/assets/fonts/GeistMono-Variable.woff2 b/frontend/src/assets/fonts/GeistMono-Variable.woff2 new file mode 100644 index 0000000..dbdb8c2 Binary files /dev/null and b/frontend/src/assets/fonts/GeistMono-Variable.woff2 differ diff --git a/frontend/src/assets/react.svg b/frontend/src/assets/react.svg new file mode 100644 index 0000000..6c87de9 --- /dev/null +++ b/frontend/src/assets/react.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/frontend/src/components/__tests__/DeviceList.test.tsx b/frontend/src/components/__tests__/DeviceList.test.tsx new file mode 100644 index 0000000..5654655 --- /dev/null +++ b/frontend/src/components/__tests__/DeviceList.test.tsx @@ -0,0 +1,214 @@ +/** + * Device list (FleetTable) component tests -- verifies device data rendering, + * loading state, empty state, and table structure. + * + * Tests the FleetTable component directly since DevicesPage is tightly coupled + * to TanStack Router file-based routing (Route.useParams/useSearch). + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest' +import { render, screen, within } from '@/test/test-utils' +import type { DeviceListResponse } from '@/lib/api' + +// -------------------------------------------------------------------------- +// Mocks +// -------------------------------------------------------------------------- + +const mockNavigate = vi.fn() + +vi.mock('@tanstack/react-router', () => ({ + useNavigate: () => mockNavigate, + Link: ({ children, ...props }: { children: React.ReactNode; to?: string }) => ( + {children} + ), +})) + +// Mock devicesApi at the module level +const mockDevicesList = vi.fn() + +vi.mock('@/lib/api', async () => { + const actual = await vi.importActual('@/lib/api') + return { + ...actual, + devicesApi: { + ...actual.devicesApi, + list: (...args: unknown[]) => mockDevicesList(...args), + }, + } +}) + +// -------------------------------------------------------------------------- +// Test data +// -------------------------------------------------------------------------- + +const testDevices: DeviceListResponse = { + items: [ + { + id: 'dev-1', + hostname: 'router-office-1', + ip_address: '192.168.1.1', + api_port: 8728, + api_ssl_port: 8729, + model: 'RB4011', + serial_number: 'ABC123', + firmware_version: '7.12', + routeros_version: '7.12.1', + uptime_seconds: 86400, + last_seen: '2026-03-01T12:00:00Z', + latitude: null, + longitude: null, + status: 'online', + tags: [{ id: 'tag-1', name: 'core', color: '#00ff00' }], + groups: [], + created_at: '2026-01-01T00:00:00Z', + }, + { + id: 'dev-2', + hostname: 'ap-floor2', + ip_address: '192.168.1.10', + api_port: 8728, + api_ssl_port: 8729, + model: 'cAP ac', + serial_number: 'DEF456', + firmware_version: '7.10', + routeros_version: '7.10.2', + uptime_seconds: 3600, + last_seen: '2026-03-01T11:00:00Z', + latitude: null, + longitude: null, + status: 'offline', + tags: [], + groups: [], + created_at: '2026-01-15T00:00:00Z', + }, + ], + total: 2, + page: 1, + page_size: 25, +} + +const emptyDevices: DeviceListResponse = { + items: [], + total: 0, + page: 1, + page_size: 25, +} + +// -------------------------------------------------------------------------- +// Component import (after mocks) +// -------------------------------------------------------------------------- +import { FleetTable } from '@/components/fleet/FleetTable' + +// -------------------------------------------------------------------------- +// Tests +// -------------------------------------------------------------------------- + +describe('FleetTable (Device List)', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + it('renders device list with data', async () => { + mockDevicesList.mockResolvedValueOnce(testDevices) + + render() + + // Wait for data to load + expect(await screen.findByText('router-office-1')).toBeInTheDocument() + expect(screen.getByText('ap-floor2')).toBeInTheDocument() + expect(screen.getByText('192.168.1.1')).toBeInTheDocument() + expect(screen.getByText('192.168.1.10')).toBeInTheDocument() + }) + + it('renders device model and firmware info', async () => { + mockDevicesList.mockResolvedValueOnce(testDevices) + + render() + + expect(await screen.findByText('RB4011')).toBeInTheDocument() + expect(screen.getByText('cAP ac')).toBeInTheDocument() + expect(screen.getByText('7.12.1')).toBeInTheDocument() + expect(screen.getByText('7.10.2')).toBeInTheDocument() + }) + + it('renders empty state when no devices', async () => { + mockDevicesList.mockResolvedValueOnce(emptyDevices) + + render() + + expect(await screen.findByText('No devices found')).toBeInTheDocument() + }) + + it('renders loading state', () => { + // Make the API hang (never resolve) + mockDevicesList.mockReturnValueOnce(new Promise(() => {})) + + render() + + expect(screen.getByText('Loading devices...')).toBeInTheDocument() + }) + + it('renders table headers', async () => { + mockDevicesList.mockResolvedValueOnce(testDevices) + + render() + + await screen.findByText('router-office-1') + + expect(screen.getByText('Hostname')).toBeInTheDocument() + expect(screen.getByText('IP')).toBeInTheDocument() + expect(screen.getByText('Model')).toBeInTheDocument() + expect(screen.getByText('RouterOS')).toBeInTheDocument() + expect(screen.getByText('Firmware')).toBeInTheDocument() + expect(screen.getByText('Uptime')).toBeInTheDocument() + expect(screen.getByText('Last Seen')).toBeInTheDocument() + expect(screen.getByText('Tags')).toBeInTheDocument() + }) + + it('renders device tags', async () => { + mockDevicesList.mockResolvedValueOnce(testDevices) + + render() + + expect(await screen.findByText('core')).toBeInTheDocument() + }) + + it('renders formatted uptime', async () => { + mockDevicesList.mockResolvedValueOnce(testDevices) + + render() + + await screen.findByText('router-office-1') + + // 86400 seconds = 1d 0h + expect(screen.getByText('1d 0h')).toBeInTheDocument() + // 3600 seconds = 1h 0m + expect(screen.getByText('1h 0m')).toBeInTheDocument() + }) + + it('shows pagination info', async () => { + mockDevicesList.mockResolvedValueOnce(testDevices) + + render() + + await screen.findByText('router-office-1') + + // "Showing 1-2 of 2 devices" + expect(screen.getByText(/Showing 1/)).toBeInTheDocument() + }) + + it('renders status indicators for online and offline devices', async () => { + mockDevicesList.mockResolvedValueOnce(testDevices) + + render() + + await screen.findByText('router-office-1') + + // Status dots should be present -- find by title attribute + const onlineDot = screen.getByTitle('online') + const offlineDot = screen.getByTitle('offline') + + expect(onlineDot).toBeInTheDocument() + expect(offlineDot).toBeInTheDocument() + }) +}) diff --git a/frontend/src/components/__tests__/LoginPage.test.tsx b/frontend/src/components/__tests__/LoginPage.test.tsx new file mode 100644 index 0000000..ee2d588 --- /dev/null +++ b/frontend/src/components/__tests__/LoginPage.test.tsx @@ -0,0 +1,229 @@ +/** + * LoginPage component tests -- verifies form rendering, credential submission, + * error display, and loading state for the login flow. + */ + +import { describe, it, expect, vi, beforeEach, type Mock } from 'vitest' +import { render, screen, waitFor } from '@/test/test-utils' +import userEvent from '@testing-library/user-event' + +// -------------------------------------------------------------------------- +// Mocks +// -------------------------------------------------------------------------- + +// Mock useNavigate from TanStack Router +const mockNavigate = vi.fn() +vi.mock('@tanstack/react-router', () => ({ + createFileRoute: () => ({ + component: undefined, + }), + useNavigate: () => mockNavigate, +})) + +// Mock useAuth zustand store -- track login/clearError calls +const mockLogin = vi.fn() +const mockClearError = vi.fn() +let authState = { + user: null, + isAuthenticated: false, + isLoading: false, + error: null as string | null, + login: mockLogin, + logout: vi.fn(), + checkAuth: vi.fn(), + clearError: mockClearError, +} + +vi.mock('@/lib/auth', () => ({ + useAuth: () => authState, +})) + +// -------------------------------------------------------------------------- +// Import after mocks +// -------------------------------------------------------------------------- +// We need to import LoginPage from the route file. Since createFileRoute is +// mocked, we import the default export which is the page component. +// The file exports Route (from createFileRoute) and has LoginPage as the +// component. We re-export it via a manual approach. + +// Since the login page defines LoginPage as a function inside the module and +// assigns it to Route.component, we need a different approach. Let's import +// the module and extract the component from the Route object. + +// Actually, with our mock of createFileRoute returning an object, the Route +// export won't have the component. Let's mock createFileRoute to capture it. + +let CapturedComponent: React.ComponentType | undefined + +vi.mock('@tanstack/react-router', async () => { + return { + createFileRoute: () => ({ + // The real createFileRoute('/login')({component: LoginPage}) returns + // an object. Our mock captures the component from the call. + __call: true, + }), + useNavigate: () => mockNavigate, + } +}) + +// We need a different strategy. Let's directly create the LoginPage component +// inline here since the route file couples createFileRoute with the component. +// This is a common pattern for testing file-based route components. + +// Instead, let's build a simplified LoginPage that matches the real one's +// behavior and test that. OR, we mock createFileRoute properly. + +// Best approach: mock createFileRoute to return a function that captures the +// component option. +vi.mock('@tanstack/react-router', () => { + return { + createFileRoute: () => (opts: { component: React.ComponentType }) => { + CapturedComponent = opts.component + return { component: opts.component } + }, + useNavigate: () => mockNavigate, + } +}) + +// Now importing the login module will call createFileRoute('/login')({component: LoginPage}) +// and CapturedComponent will be set to LoginPage. + +// -------------------------------------------------------------------------- +// Tests +// -------------------------------------------------------------------------- + +describe('LoginPage', () => { + let LoginPage: React.ComponentType + + beforeEach(async () => { + vi.clearAllMocks() + CapturedComponent = undefined + + // Reset auth state + authState = { + user: null, + isAuthenticated: false, + isLoading: false, + error: null, + login: mockLogin, + logout: vi.fn(), + checkAuth: vi.fn(), + clearError: mockClearError, + } + + // Dynamic import to re-trigger module evaluation + // Use cache-busting to force re-evaluation + const mod = await import('@/routes/login') + // The component is set via our mock + if (CapturedComponent) { + LoginPage = CapturedComponent + } else { + // Fallback: try to get it from the Route export + LoginPage = (mod.Route as { component?: React.ComponentType })?.component ?? (() => null) + } + }) + + it('renders login form with email and password fields', () => { + render() + + expect(screen.getByLabelText(/email/i)).toBeInTheDocument() + expect(screen.getByLabelText(/password/i)).toBeInTheDocument() + expect(screen.getByRole('button', { name: /sign in/i })).toBeInTheDocument() + }) + + it('renders branding elements', () => { + render() + + expect(screen.getByText('TOD - The Other Dude')).toBeInTheDocument() + expect(screen.getByText('MSP Fleet Management')).toBeInTheDocument() + }) + + it('shows error message on failed login', async () => { + mockLogin.mockRejectedValueOnce(new Error('Invalid credentials')) + authState.error = null + + render() + + const user = userEvent.setup() + await user.type(screen.getByLabelText(/email/i), 'test@example.com') + await user.type(screen.getByLabelText(/password/i), 'wrongpassword') + await user.click(screen.getByRole('button', { name: /sign in/i })) + + // After the failed login, the useAuth store would set error. + // Since we control the mock, we need to re-render with the error state. + // Let's update authState and re-render. + authState.error = 'Invalid credentials' + + // The component should re-render via zustand. In our mock, it won't + // automatically. Let's re-render. + render() + + expect(screen.getByText('Invalid credentials')).toBeInTheDocument() + }) + + it('submits form with entered credentials', async () => { + mockLogin.mockResolvedValueOnce(undefined) + + render() + + const user = userEvent.setup() + await user.type(screen.getByLabelText(/email/i), 'admin@example.com') + await user.type(screen.getByLabelText(/password/i), 'secret123') + await user.click(screen.getByRole('button', { name: /sign in/i })) + + await waitFor(() => { + expect(mockLogin).toHaveBeenCalledWith('admin@example.com', 'secret123') + }) + }) + + it('navigates to home on successful login', async () => { + mockLogin.mockResolvedValueOnce(undefined) + + render() + + const user = userEvent.setup() + await user.type(screen.getByLabelText(/email/i), 'admin@example.com') + await user.type(screen.getByLabelText(/password/i), 'secret123') + await user.click(screen.getByRole('button', { name: /sign in/i })) + + await waitFor(() => { + expect(mockNavigate).toHaveBeenCalledWith({ to: '/' }) + }) + }) + + it('disables submit button when fields are empty', () => { + render() + + const submitButton = screen.getByRole('button', { name: /sign in/i }) + expect(submitButton).toBeDisabled() + }) + + it('shows "Signing in..." text while submitting', async () => { + // Make login hang (never resolve) + mockLogin.mockReturnValueOnce(new Promise(() => {})) + + render() + + const user = userEvent.setup() + await user.type(screen.getByLabelText(/email/i), 'admin@example.com') + await user.type(screen.getByLabelText(/password/i), 'secret123') + await user.click(screen.getByRole('button', { name: /sign in/i })) + + await waitFor(() => { + expect(screen.getByRole('button', { name: /signing in/i })).toBeInTheDocument() + }) + }) + + it('clears error when user starts typing', async () => { + authState.error = 'Invalid credentials' + + render() + + expect(screen.getByText('Invalid credentials')).toBeInTheDocument() + + const user = userEvent.setup() + await user.type(screen.getByLabelText(/email/i), 'a') + + expect(mockClearError).toHaveBeenCalled() + }) +}) diff --git a/frontend/src/components/__tests__/TemplatePushWizard.test.tsx b/frontend/src/components/__tests__/TemplatePushWizard.test.tsx new file mode 100644 index 0000000..79f88f4 --- /dev/null +++ b/frontend/src/components/__tests__/TemplatePushWizard.test.tsx @@ -0,0 +1,502 @@ +/** + * TemplatePushWizard component tests -- verifies multi-step wizard navigation, + * device selection, variable input, preview, and confirmation steps. + * + * The wizard has 5 steps: targets -> variables -> preview -> confirm -> progress. + * Tests mock the API layer (metricsApi.fleetSummary, deviceGroupsApi.list, + * templatesApi.preview/push) and interact via user events. + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest' +import { render, screen, waitFor, within } from '@/test/test-utils' +import userEvent from '@testing-library/user-event' +import type { TemplateResponse, VariableDef } from '@/lib/templatesApi' +import type { FleetDevice, DeviceGroupResponse } from '@/lib/api' + +// -------------------------------------------------------------------------- +// Mocks +// -------------------------------------------------------------------------- + +const mockFleetSummary = vi.fn() +const mockGroupsList = vi.fn() +const mockPreview = vi.fn() +const mockPush = vi.fn() + +vi.mock('@/lib/api', async () => { + const actual = await vi.importActual('@/lib/api') + return { + ...actual, + metricsApi: { + ...actual.metricsApi, + fleetSummary: (...args: unknown[]) => mockFleetSummary(...args), + }, + deviceGroupsApi: { + ...actual.deviceGroupsApi, + list: (...args: unknown[]) => mockGroupsList(...args), + }, + } +}) + +vi.mock('@/lib/templatesApi', async () => { + const actual = await vi.importActual('@/lib/templatesApi') + return { + ...actual, + templatesApi: { + ...actual.templatesApi, + preview: (...args: unknown[]) => mockPreview(...args), + push: (...args: unknown[]) => mockPush(...args), + pushStatus: vi.fn().mockResolvedValue({ rollout_id: 'r1', jobs: [] }), + }, + } +}) + +// -------------------------------------------------------------------------- +// Test data +// -------------------------------------------------------------------------- + +const testDevices: FleetDevice[] = [ + { + id: 'dev-1', + hostname: 'router-main', + ip_address: '192.168.1.1', + status: 'online', + model: 'RB4011', + last_seen: '2026-03-01T12:00:00Z', + uptime_seconds: 86400, + last_cpu_load: 15, + last_memory_used_pct: 45, + latitude: null, + longitude: null, + tenant_id: 'tenant-1', + tenant_name: 'Test Tenant', + }, + { + id: 'dev-2', + hostname: 'ap-office', + ip_address: '192.168.1.10', + status: 'online', + model: 'cAP ac', + last_seen: '2026-03-01T11:00:00Z', + uptime_seconds: 3600, + last_cpu_load: 5, + last_memory_used_pct: 30, + latitude: null, + longitude: null, + tenant_id: 'tenant-1', + tenant_name: 'Test Tenant', + }, + { + id: 'dev-3', + hostname: 'switch-floor1', + ip_address: '192.168.1.20', + status: 'offline', + model: 'CRS326', + last_seen: '2026-02-28T10:00:00Z', + uptime_seconds: null, + last_cpu_load: null, + last_memory_used_pct: null, + latitude: null, + longitude: null, + tenant_id: 'tenant-1', + tenant_name: 'Test Tenant', + }, +] + +const testGroups: DeviceGroupResponse[] = [ + { id: 'grp-1', name: 'Core Routers', description: null, device_count: 2, created_at: '2026-01-01T00:00:00Z' }, +] + +const templateWithVars: TemplateResponse = { + id: 'tmpl-1', + name: 'Firewall Rules', + description: 'Standard firewall policy', + content: '/ip firewall filter add chain=input action=drop', + variables: [ + { name: 'device', type: 'string', default: null, description: 'Auto-populated device context' }, + { name: 'dns_server', type: 'ip', default: '8.8.8.8', description: 'Primary DNS' }, + { name: 'enable_logging', type: 'boolean', default: 'false', description: 'Enable firewall logging' }, + ], + tags: ['firewall', 'security'], + created_at: '2026-01-01T00:00:00Z', + updated_at: '2026-03-01T00:00:00Z', +} + +const templateNoVars: TemplateResponse = { + id: 'tmpl-2', + name: 'NTP Config', + description: 'Set NTP servers', + content: '/system ntp client set enabled=yes', + variables: [ + { name: 'device', type: 'string', default: null, description: 'Auto-populated device context' }, + ], + tags: ['ntp'], + created_at: '2026-01-01T00:00:00Z', + updated_at: '2026-03-01T00:00:00Z', +} + +// -------------------------------------------------------------------------- +// Component import (after mocks) +// -------------------------------------------------------------------------- +import { TemplatePushWizard } from '@/components/templates/TemplatePushWizard' + +// -------------------------------------------------------------------------- +// Tests +// -------------------------------------------------------------------------- + +describe('TemplatePushWizard', () => { + beforeEach(() => { + vi.clearAllMocks() + mockFleetSummary.mockResolvedValue(testDevices) + mockGroupsList.mockResolvedValue(testGroups) + mockPreview.mockResolvedValue({ rendered: '/ip firewall filter add chain=input', device_hostname: 'router-main' }) + mockPush.mockResolvedValue({ rollout_id: 'rollout-1', jobs: [] }) + }) + + it('renders wizard with first step active (target selection)', async () => { + render( + + ) + + // Title shows template name and step info + expect(await screen.findByText(/Push Template: Firewall Rules/)).toBeInTheDocument() + expect(screen.getByText(/Step 1 of 4/)).toBeInTheDocument() + + // Target selection description + expect(screen.getByText(/Select devices to push the template to/)).toBeInTheDocument() + }) + + it('displays device list for selection', async () => { + render( + + ) + + // Wait for devices to load + expect(await screen.findByText('router-main')).toBeInTheDocument() + expect(screen.getByText('ap-office')).toBeInTheDocument() + expect(screen.getByText('switch-floor1')).toBeInTheDocument() + expect(screen.getByText('192.168.1.1')).toBeInTheDocument() + expect(screen.getByText('192.168.1.10')).toBeInTheDocument() + }) + + it('disables Next button when no devices selected', async () => { + render( + + ) + + await screen.findByText('router-main') + + // Next button should be disabled with 0 selected + const nextBtn = screen.getByRole('button', { name: /next/i }) + expect(nextBtn).toBeDisabled() + }) + + it('enables Next button after selecting a device', async () => { + render( + + ) + + const user = userEvent.setup() + + await screen.findByText('router-main') + + // Click on the device label to toggle the checkbox + const deviceLabel = screen.getByText('router-main') + // The device is inside a