feat: The Other Dude v9.0.1 — full-featured email system

ci: add GitHub Pages deployment workflow for docs site

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Jason Staack
2026-03-08 17:46:37 -05:00
commit b840047e19
511 changed files with 106948 additions and 0 deletions

78
backend/alembic/env.py Normal file
View File

@@ -0,0 +1,78 @@
"""Alembic environment configuration for async SQLAlchemy with PostgreSQL."""
import asyncio
import os
from logging.config import fileConfig
from alembic import context
from sqlalchemy import pool
from sqlalchemy.engine import Connection
from sqlalchemy.ext.asyncio import async_engine_from_config
# Import all models to register them with Base.metadata
from app.database import Base
import app.models.tenant # noqa: F401
import app.models.user # noqa: F401
import app.models.device # noqa: F401
import app.models.config_backup # noqa: F401
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Override sqlalchemy.url from DATABASE_URL env var if set (for Docker)
if os.environ.get("DATABASE_URL"):
config.set_main_option("sqlalchemy.url", os.environ["DATABASE_URL"])
# Interpret the config file for Python logging.
if config.config_file_name is not None:
fileConfig(config.config_file_name)
# Add your model's MetaData object here for 'autogenerate' support
target_metadata = Base.metadata
def run_migrations_offline() -> None:
"""Run migrations in 'offline' mode."""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def do_run_migrations(connection: Connection) -> None:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
async def run_async_migrations() -> None:
"""Run migrations in 'online' mode with async engine."""
connectable = async_engine_from_config(
config.get_section(config.config_ini_section, {}),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
async with connectable.connect() as connection:
await connection.run_sync(do_run_migrations)
await connectable.dispose()
def run_migrations_online() -> None:
"""Run migrations in 'online' mode."""
asyncio.run(run_async_migrations())
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()

View File

@@ -0,0 +1,26 @@
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision: str = ${repr(up_revision)}
down_revision: Union[str, None] = ${repr(down_revision)}
branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
def upgrade() -> None:
${upgrades if upgrades else "pass"}
def downgrade() -> None:
${downgrades if downgrades else "pass"}

View File

@@ -0,0 +1,376 @@
"""Initial schema with RLS policies for multi-tenant isolation.
Revision ID: 001
Revises: None
Create Date: 2026-02-24
This migration creates:
1. All database tables (tenants, users, devices, device_groups, device_tags,
device_group_memberships, device_tag_assignments)
2. Composite unique indexes for tenant-scoped uniqueness
3. Row Level Security (RLS) on all tenant-scoped tables
4. RLS policies using app.current_tenant PostgreSQL setting
5. The app_user role with appropriate grants (cannot bypass RLS)
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision: str = "001"
down_revision: Union[str, None] = None
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# =========================================================================
# TENANTS TABLE
# =========================================================================
op.create_table(
"tenants",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column("name", sa.String(255), nullable=False),
sa.Column("description", sa.Text, nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
op.create_index("ix_tenants_name", "tenants", ["name"], unique=True)
# =========================================================================
# USERS TABLE
# =========================================================================
op.create_table(
"users",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column("email", sa.String(255), nullable=False),
sa.Column("hashed_password", sa.String(255), nullable=False),
sa.Column("name", sa.String(255), nullable=False),
sa.Column("role", sa.String(50), nullable=False, server_default="viewer"),
sa.Column("tenant_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("is_active", sa.Boolean, nullable=False, server_default="true"),
sa.Column("last_login", sa.DateTime(timezone=True), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("email"),
sa.ForeignKeyConstraint(["tenant_id"], ["tenants.id"], ondelete="CASCADE"),
)
op.create_index("ix_users_email", "users", ["email"], unique=True)
op.create_index("ix_users_tenant_id", "users", ["tenant_id"])
# =========================================================================
# DEVICES TABLE
# =========================================================================
op.create_table(
"devices",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column("tenant_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("hostname", sa.String(255), nullable=False),
sa.Column("ip_address", sa.String(45), nullable=False),
sa.Column("api_port", sa.Integer, nullable=False, server_default="8728"),
sa.Column("api_ssl_port", sa.Integer, nullable=False, server_default="8729"),
sa.Column("model", sa.String(255), nullable=True),
sa.Column("serial_number", sa.String(255), nullable=True),
sa.Column("firmware_version", sa.String(100), nullable=True),
sa.Column("routeros_version", sa.String(100), nullable=True),
sa.Column("uptime_seconds", sa.Integer, nullable=True),
sa.Column("last_seen", sa.DateTime(timezone=True), nullable=True),
sa.Column("encrypted_credentials", sa.LargeBinary, nullable=True),
sa.Column("status", sa.String(20), nullable=False, server_default="unknown"),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["tenant_id"], ["tenants.id"], ondelete="CASCADE"),
sa.UniqueConstraint("tenant_id", "hostname", name="uq_devices_tenant_hostname"),
)
op.create_index("ix_devices_tenant_id", "devices", ["tenant_id"])
# =========================================================================
# DEVICE GROUPS TABLE
# =========================================================================
op.create_table(
"device_groups",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column("tenant_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("name", sa.String(255), nullable=False),
sa.Column("description", sa.Text, nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["tenant_id"], ["tenants.id"], ondelete="CASCADE"),
sa.UniqueConstraint("tenant_id", "name", name="uq_device_groups_tenant_name"),
)
op.create_index("ix_device_groups_tenant_id", "device_groups", ["tenant_id"])
# =========================================================================
# DEVICE TAGS TABLE
# =========================================================================
op.create_table(
"device_tags",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column("tenant_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("name", sa.String(100), nullable=False),
sa.Column("color", sa.String(7), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["tenant_id"], ["tenants.id"], ondelete="CASCADE"),
sa.UniqueConstraint("tenant_id", "name", name="uq_device_tags_tenant_name"),
)
op.create_index("ix_device_tags_tenant_id", "device_tags", ["tenant_id"])
# =========================================================================
# DEVICE GROUP MEMBERSHIPS TABLE
# =========================================================================
op.create_table(
"device_group_memberships",
sa.Column("device_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("group_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.PrimaryKeyConstraint("device_id", "group_id"),
sa.ForeignKeyConstraint(["device_id"], ["devices.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["group_id"], ["device_groups.id"], ondelete="CASCADE"),
)
# =========================================================================
# DEVICE TAG ASSIGNMENTS TABLE
# =========================================================================
op.create_table(
"device_tag_assignments",
sa.Column("device_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("tag_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.PrimaryKeyConstraint("device_id", "tag_id"),
sa.ForeignKeyConstraint(["device_id"], ["devices.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["tag_id"], ["device_tags.id"], ondelete="CASCADE"),
)
# =========================================================================
# ROW LEVEL SECURITY (RLS)
# =========================================================================
# RLS is the core tenant isolation mechanism. The app_user role CANNOT
# bypass RLS (only superusers can). All queries through app_user will
# be filtered by the current_setting('app.current_tenant') value which
# is set per-request by the tenant_context middleware.
conn = op.get_bind()
# --- TENANTS RLS ---
# Super admin sees all; tenant users see only their tenant
conn.execute(sa.text("ALTER TABLE tenants ENABLE ROW LEVEL SECURITY"))
conn.execute(sa.text("ALTER TABLE tenants FORCE ROW LEVEL SECURITY"))
conn.execute(sa.text("""
CREATE POLICY tenant_isolation ON tenants
USING (
id::text = current_setting('app.current_tenant', true)
OR current_setting('app.current_tenant', true) = 'super_admin'
)
WITH CHECK (
id::text = current_setting('app.current_tenant', true)
OR current_setting('app.current_tenant', true) = 'super_admin'
)
"""))
# --- USERS RLS ---
# Users see only other users in their tenant; super_admin sees all
conn.execute(sa.text("ALTER TABLE users ENABLE ROW LEVEL SECURITY"))
conn.execute(sa.text("ALTER TABLE users FORCE ROW LEVEL SECURITY"))
conn.execute(sa.text("""
CREATE POLICY tenant_isolation ON users
USING (
tenant_id::text = current_setting('app.current_tenant', true)
OR current_setting('app.current_tenant', true) = 'super_admin'
)
WITH CHECK (
tenant_id::text = current_setting('app.current_tenant', true)
OR current_setting('app.current_tenant', true) = 'super_admin'
)
"""))
# --- DEVICES RLS ---
conn.execute(sa.text("ALTER TABLE devices ENABLE ROW LEVEL SECURITY"))
conn.execute(sa.text("ALTER TABLE devices FORCE ROW LEVEL SECURITY"))
conn.execute(sa.text("""
CREATE POLICY tenant_isolation ON devices
USING (tenant_id::text = current_setting('app.current_tenant', true))
WITH CHECK (tenant_id::text = current_setting('app.current_tenant', true))
"""))
# --- DEVICE GROUPS RLS ---
conn.execute(sa.text("ALTER TABLE device_groups ENABLE ROW LEVEL SECURITY"))
conn.execute(sa.text("ALTER TABLE device_groups FORCE ROW LEVEL SECURITY"))
conn.execute(sa.text("""
CREATE POLICY tenant_isolation ON device_groups
USING (tenant_id::text = current_setting('app.current_tenant', true))
WITH CHECK (tenant_id::text = current_setting('app.current_tenant', true))
"""))
# --- DEVICE TAGS RLS ---
conn.execute(sa.text("ALTER TABLE device_tags ENABLE ROW LEVEL SECURITY"))
conn.execute(sa.text("ALTER TABLE device_tags FORCE ROW LEVEL SECURITY"))
conn.execute(sa.text("""
CREATE POLICY tenant_isolation ON device_tags
USING (tenant_id::text = current_setting('app.current_tenant', true))
WITH CHECK (tenant_id::text = current_setting('app.current_tenant', true))
"""))
# --- DEVICE GROUP MEMBERSHIPS RLS ---
# These are filtered by joining through devices/groups (which already have RLS)
# But we also add direct RLS via a join to the devices table
conn.execute(sa.text("ALTER TABLE device_group_memberships ENABLE ROW LEVEL SECURITY"))
conn.execute(sa.text("ALTER TABLE device_group_memberships FORCE ROW LEVEL SECURITY"))
conn.execute(sa.text("""
CREATE POLICY tenant_isolation ON device_group_memberships
USING (
EXISTS (
SELECT 1 FROM devices d
WHERE d.id = device_id
AND d.tenant_id::text = current_setting('app.current_tenant', true)
)
)
WITH CHECK (
EXISTS (
SELECT 1 FROM devices d
WHERE d.id = device_id
AND d.tenant_id::text = current_setting('app.current_tenant', true)
)
)
"""))
# --- DEVICE TAG ASSIGNMENTS RLS ---
conn.execute(sa.text("ALTER TABLE device_tag_assignments ENABLE ROW LEVEL SECURITY"))
conn.execute(sa.text("ALTER TABLE device_tag_assignments FORCE ROW LEVEL SECURITY"))
conn.execute(sa.text("""
CREATE POLICY tenant_isolation ON device_tag_assignments
USING (
EXISTS (
SELECT 1 FROM devices d
WHERE d.id = device_id
AND d.tenant_id::text = current_setting('app.current_tenant', true)
)
)
WITH CHECK (
EXISTS (
SELECT 1 FROM devices d
WHERE d.id = device_id
AND d.tenant_id::text = current_setting('app.current_tenant', true)
)
)
"""))
# =========================================================================
# GRANT PERMISSIONS TO app_user (RLS-enforcing application role)
# =========================================================================
# app_user is a non-superuser role — it CANNOT bypass RLS policies.
# All API queries use this role to ensure tenant isolation.
tables = [
"tenants",
"users",
"devices",
"device_groups",
"device_tags",
"device_group_memberships",
"device_tag_assignments",
]
for table in tables:
conn.execute(sa.text(
f"GRANT SELECT, INSERT, UPDATE, DELETE ON {table} TO app_user"
))
# Grant sequence usage for UUID generation (gen_random_uuid is built-in, but just in case)
conn.execute(sa.text("GRANT USAGE ON SCHEMA public TO app_user"))
# Allow app_user to set the tenant context variable
conn.execute(sa.text("GRANT SET ON PARAMETER app.current_tenant TO app_user"))
def downgrade() -> None:
conn = op.get_bind()
# Revoke grants
tables = [
"tenants",
"users",
"devices",
"device_groups",
"device_tags",
"device_group_memberships",
"device_tag_assignments",
]
for table in tables:
try:
conn.execute(sa.text(f"REVOKE ALL ON {table} FROM app_user"))
except Exception:
pass
# Drop tables (in reverse dependency order)
op.drop_table("device_tag_assignments")
op.drop_table("device_group_memberships")
op.drop_table("device_tags")
op.drop_table("device_groups")
op.drop_table("devices")
op.drop_table("users")
op.drop_table("tenants")

View File

@@ -0,0 +1,92 @@
"""Add routeros_major_version column and poller_user PostgreSQL role.
Revision ID: 002
Revises: 001
Create Date: 2026-02-24
This migration:
1. Adds routeros_major_version INTEGER column to devices table (nullable).
Stores the detected major version (6 or 7) as populated by the Go poller.
2. Creates the poller_user PostgreSQL role with SELECT-only access to the
devices table. The poller_user bypasses RLS intentionally — it must read
all devices across all tenants to poll them.
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "002"
down_revision: Union[str, None] = "001"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# =========================================================================
# ADD routeros_major_version COLUMN
# =========================================================================
# Stores the detected RouterOS major version (6 or 7) as an INTEGER.
# Populated by the Go poller after a successful connection and
# /system/resource/print query. NULL until the poller has connected at
# least once.
op.add_column(
"devices",
sa.Column("routeros_major_version", sa.Integer(), nullable=True),
)
# =========================================================================
# CREATE poller_user ROLE AND GRANT PERMISSIONS
# =========================================================================
# The poller_user role is used exclusively by the Go poller service.
# It has SELECT-only access to the devices table and does NOT enforce
# RLS policies (RLS is applied to app_user only). This allows the poller
# to read all devices across all tenants, which is required for polling.
conn = op.get_bind()
conn.execute(sa.text("""
DO $$
BEGIN
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'poller_user') THEN
CREATE ROLE poller_user WITH LOGIN PASSWORD 'poller_password' BYPASSRLS;
END IF;
END
$$
"""))
conn.execute(sa.text("GRANT CONNECT ON DATABASE mikrotik TO poller_user"))
conn.execute(sa.text("GRANT USAGE ON SCHEMA public TO poller_user"))
# SELECT on devices only — poller needs to read encrypted_credentials
# and other device fields. No INSERT/UPDATE/DELETE needed.
conn.execute(sa.text("GRANT SELECT ON devices TO poller_user"))
def downgrade() -> None:
conn = op.get_bind()
# Revoke grants from poller_user
try:
conn.execute(sa.text("REVOKE SELECT ON devices FROM poller_user"))
except Exception:
pass
try:
conn.execute(sa.text("REVOKE USAGE ON SCHEMA public FROM poller_user"))
except Exception:
pass
try:
conn.execute(sa.text("REVOKE CONNECT ON DATABASE mikrotik FROM poller_user"))
except Exception:
pass
try:
conn.execute(sa.text("DROP ROLE IF EXISTS poller_user"))
except Exception:
pass
# Drop the column
op.drop_column("devices", "routeros_major_version")

View File

@@ -0,0 +1,174 @@
"""Add TimescaleDB hypertables for metrics and denormalized columns on devices.
Revision ID: 003
Revises: 002
Create Date: 2026-02-25
This migration:
1. Creates interface_metrics hypertable for per-interface traffic counters.
2. Creates health_metrics hypertable for per-device CPU/memory/disk/temperature.
3. Creates wireless_metrics hypertable for per-interface wireless client stats.
4. Adds last_cpu_load and last_memory_used_pct denormalized columns to devices
for efficient fleet table display without joining hypertables.
5. Applies RLS tenant_isolation policies and appropriate GRANTs on all hypertables.
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "003"
down_revision: Union[str, None] = "002"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
conn = op.get_bind()
# =========================================================================
# CREATE interface_metrics HYPERTABLE
# =========================================================================
# Stores per-interface byte counters from /interface/print on every poll cycle.
# rx_bps/tx_bps are stored as NULL — computed at query time via LAG() window
# function to avoid delta state in the poller.
conn.execute(sa.text("""
CREATE TABLE IF NOT EXISTS interface_metrics (
time TIMESTAMPTZ NOT NULL,
device_id UUID NOT NULL,
tenant_id UUID NOT NULL,
interface TEXT NOT NULL,
rx_bytes BIGINT,
tx_bytes BIGINT,
rx_bps BIGINT,
tx_bps BIGINT
)
"""))
conn.execute(sa.text(
"SELECT create_hypertable('interface_metrics', 'time', if_not_exists => TRUE)"
))
conn.execute(sa.text(
"CREATE INDEX IF NOT EXISTS idx_interface_metrics_device_time "
"ON interface_metrics (device_id, time DESC)"
))
conn.execute(sa.text("ALTER TABLE interface_metrics ENABLE ROW LEVEL SECURITY"))
conn.execute(sa.text("""
CREATE POLICY tenant_isolation ON interface_metrics
USING (tenant_id::text = current_setting('app.current_tenant'))
"""))
conn.execute(sa.text("GRANT SELECT, INSERT ON interface_metrics TO app_user"))
conn.execute(sa.text("GRANT SELECT, INSERT ON interface_metrics TO poller_user"))
# =========================================================================
# CREATE health_metrics HYPERTABLE
# =========================================================================
# Stores per-device system health metrics from /system/resource/print and
# /system/health/print on every poll cycle.
# temperature is nullable — not all RouterOS devices have temperature sensors.
conn.execute(sa.text("""
CREATE TABLE IF NOT EXISTS health_metrics (
time TIMESTAMPTZ NOT NULL,
device_id UUID NOT NULL,
tenant_id UUID NOT NULL,
cpu_load SMALLINT,
free_memory BIGINT,
total_memory BIGINT,
free_disk BIGINT,
total_disk BIGINT,
temperature SMALLINT
)
"""))
conn.execute(sa.text(
"SELECT create_hypertable('health_metrics', 'time', if_not_exists => TRUE)"
))
conn.execute(sa.text(
"CREATE INDEX IF NOT EXISTS idx_health_metrics_device_time "
"ON health_metrics (device_id, time DESC)"
))
conn.execute(sa.text("ALTER TABLE health_metrics ENABLE ROW LEVEL SECURITY"))
conn.execute(sa.text("""
CREATE POLICY tenant_isolation ON health_metrics
USING (tenant_id::text = current_setting('app.current_tenant'))
"""))
conn.execute(sa.text("GRANT SELECT, INSERT ON health_metrics TO app_user"))
conn.execute(sa.text("GRANT SELECT, INSERT ON health_metrics TO poller_user"))
# =========================================================================
# CREATE wireless_metrics HYPERTABLE
# =========================================================================
# Stores per-wireless-interface aggregated client stats from
# /interface/wireless/registration-table/print (v6) or
# /interface/wifi/registration-table/print (v7).
# ccq may be 0 on RouterOS v7 (not available in the WiFi API path).
# avg_signal is dBm (negative integer, e.g. -67).
conn.execute(sa.text("""
CREATE TABLE IF NOT EXISTS wireless_metrics (
time TIMESTAMPTZ NOT NULL,
device_id UUID NOT NULL,
tenant_id UUID NOT NULL,
interface TEXT NOT NULL,
client_count SMALLINT,
avg_signal SMALLINT,
ccq SMALLINT,
frequency INTEGER
)
"""))
conn.execute(sa.text(
"SELECT create_hypertable('wireless_metrics', 'time', if_not_exists => TRUE)"
))
conn.execute(sa.text(
"CREATE INDEX IF NOT EXISTS idx_wireless_metrics_device_time "
"ON wireless_metrics (device_id, time DESC)"
))
conn.execute(sa.text("ALTER TABLE wireless_metrics ENABLE ROW LEVEL SECURITY"))
conn.execute(sa.text("""
CREATE POLICY tenant_isolation ON wireless_metrics
USING (tenant_id::text = current_setting('app.current_tenant'))
"""))
conn.execute(sa.text("GRANT SELECT, INSERT ON wireless_metrics TO app_user"))
conn.execute(sa.text("GRANT SELECT, INSERT ON wireless_metrics TO poller_user"))
# =========================================================================
# ADD DENORMALIZED COLUMNS TO devices TABLE
# =========================================================================
# These columns are updated by the metrics subscriber alongside each
# health_metrics insert, enabling the fleet table to display CPU and memory
# usage without a JOIN to the hypertable.
op.add_column(
"devices",
sa.Column("last_cpu_load", sa.SmallInteger(), nullable=True),
)
op.add_column(
"devices",
sa.Column("last_memory_used_pct", sa.SmallInteger(), nullable=True),
)
def downgrade() -> None:
# Remove denormalized columns from devices first
op.drop_column("devices", "last_memory_used_pct")
op.drop_column("devices", "last_cpu_load")
conn = op.get_bind()
# Drop hypertables (CASCADE handles indexes, policies, and chunks)
conn.execute(sa.text("DROP TABLE IF EXISTS wireless_metrics CASCADE"))
conn.execute(sa.text("DROP TABLE IF EXISTS health_metrics CASCADE"))
conn.execute(sa.text("DROP TABLE IF EXISTS interface_metrics CASCADE"))

View File

@@ -0,0 +1,128 @@
"""Add config management tables: config_backup_runs, config_backup_schedules, config_push_operations.
Revision ID: 004
Revises: 003
Create Date: 2026-02-25
This migration:
1. Creates config_backup_runs table for backup metadata (content lives in git).
2. Creates config_backup_schedules table for per-tenant/per-device schedule config.
3. Creates config_push_operations table for panic-revert recovery (API-restart safety).
4. Applies RLS tenant_isolation policies and appropriate GRANTs on all tables.
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "004"
down_revision: Union[str, None] = "003"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
conn = op.get_bind()
# =========================================================================
# CREATE config_backup_runs TABLE
# =========================================================================
# Stores metadata for each backup run. The actual config content lives in
# the tenant's bare git repository (GIT_STORE_PATH). This table provides
# the timeline view and change tracking without duplicating file content.
conn.execute(sa.text("""
CREATE TABLE IF NOT EXISTS config_backup_runs (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
device_id UUID NOT NULL REFERENCES devices(id) ON DELETE CASCADE,
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
commit_sha TEXT NOT NULL,
trigger_type TEXT NOT NULL,
lines_added INT,
lines_removed INT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
)
"""))
conn.execute(sa.text(
"CREATE INDEX IF NOT EXISTS idx_config_backup_runs_device_created "
"ON config_backup_runs (device_id, created_at DESC)"
))
conn.execute(sa.text("ALTER TABLE config_backup_runs ENABLE ROW LEVEL SECURITY"))
conn.execute(sa.text("""
CREATE POLICY tenant_isolation ON config_backup_runs
USING (tenant_id::text = current_setting('app.current_tenant'))
"""))
conn.execute(sa.text("GRANT SELECT, INSERT ON config_backup_runs TO app_user"))
conn.execute(sa.text("GRANT SELECT ON config_backup_runs TO poller_user"))
# =========================================================================
# CREATE config_backup_schedules TABLE
# =========================================================================
# Stores per-tenant default and per-device override schedules.
# device_id = NULL means tenant default (applies to all devices in tenant).
# A per-device row with a specific device_id overrides the tenant default.
# UNIQUE(tenant_id, device_id) allows one entry per (tenant, device) pair
# where device_id NULL is the tenant-level default.
conn.execute(sa.text("""
CREATE TABLE IF NOT EXISTS config_backup_schedules (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
device_id UUID REFERENCES devices(id) ON DELETE CASCADE,
cron_expression TEXT NOT NULL DEFAULT '0 2 * * *',
enabled BOOL NOT NULL DEFAULT TRUE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(tenant_id, device_id)
)
"""))
conn.execute(sa.text("ALTER TABLE config_backup_schedules ENABLE ROW LEVEL SECURITY"))
conn.execute(sa.text("""
CREATE POLICY tenant_isolation ON config_backup_schedules
USING (tenant_id::text = current_setting('app.current_tenant'))
"""))
conn.execute(sa.text("GRANT SELECT, INSERT, UPDATE ON config_backup_schedules TO app_user"))
# =========================================================================
# CREATE config_push_operations TABLE
# =========================================================================
# Tracks pending two-phase config push operations for panic-revert recovery.
# If the API pod restarts during the 60-second verification window, the
# startup handler checks for 'pending_verification' rows and either verifies
# connectivity (clean up the RouterOS scheduler job) or marks as failed.
# See Pitfall 6 in 04-RESEARCH.md.
conn.execute(sa.text("""
CREATE TABLE IF NOT EXISTS config_push_operations (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
device_id UUID NOT NULL REFERENCES devices(id) ON DELETE CASCADE,
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
pre_push_commit_sha TEXT NOT NULL,
scheduler_name TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'pending_verification',
started_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
completed_at TIMESTAMPTZ
)
"""))
conn.execute(sa.text("ALTER TABLE config_push_operations ENABLE ROW LEVEL SECURITY"))
conn.execute(sa.text("""
CREATE POLICY tenant_isolation ON config_push_operations
USING (tenant_id::text = current_setting('app.current_tenant'))
"""))
conn.execute(sa.text("GRANT SELECT, INSERT, UPDATE ON config_push_operations TO app_user"))
def downgrade() -> None:
conn = op.get_bind()
conn.execute(sa.text("DROP TABLE IF EXISTS config_push_operations CASCADE"))
conn.execute(sa.text("DROP TABLE IF EXISTS config_backup_schedules CASCADE"))
conn.execute(sa.text("DROP TABLE IF EXISTS config_backup_runs CASCADE"))

View File

@@ -0,0 +1,286 @@
"""Add alerting and firmware management tables.
Revision ID: 005
Revises: 004
Create Date: 2026-02-25
This migration:
1. ALTERs devices table: adds architecture and preferred_channel columns.
2. ALTERs device_groups table: adds preferred_channel column.
3. Creates alert_rules, notification_channels, alert_rule_channels, alert_events tables.
4. Creates firmware_versions, firmware_upgrade_jobs tables.
5. Applies RLS policies on tenant-scoped tables.
6. Seeds default alert rules for all existing tenants.
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "005"
down_revision: Union[str, None] = "004"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
conn = op.get_bind()
# =========================================================================
# ALTER devices TABLE — add architecture and preferred_channel columns
# =========================================================================
conn.execute(sa.text(
"ALTER TABLE devices ADD COLUMN IF NOT EXISTS architecture TEXT"
))
conn.execute(sa.text(
"ALTER TABLE devices ADD COLUMN IF NOT EXISTS preferred_channel TEXT DEFAULT 'stable' NOT NULL"
))
# =========================================================================
# ALTER device_groups TABLE — add preferred_channel column
# =========================================================================
conn.execute(sa.text(
"ALTER TABLE device_groups ADD COLUMN IF NOT EXISTS preferred_channel TEXT DEFAULT 'stable' NOT NULL"
))
# =========================================================================
# CREATE alert_rules TABLE
# =========================================================================
conn.execute(sa.text("""
CREATE TABLE IF NOT EXISTS alert_rules (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
device_id UUID REFERENCES devices(id) ON DELETE CASCADE,
group_id UUID REFERENCES device_groups(id) ON DELETE SET NULL,
name TEXT NOT NULL,
metric TEXT NOT NULL,
operator TEXT NOT NULL,
threshold NUMERIC NOT NULL,
duration_polls INTEGER NOT NULL DEFAULT 1,
severity TEXT NOT NULL,
enabled BOOLEAN NOT NULL DEFAULT TRUE,
is_default BOOLEAN NOT NULL DEFAULT FALSE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
)
"""))
conn.execute(sa.text(
"CREATE INDEX IF NOT EXISTS idx_alert_rules_tenant_enabled "
"ON alert_rules (tenant_id, enabled)"
))
conn.execute(sa.text("ALTER TABLE alert_rules ENABLE ROW LEVEL SECURITY"))
conn.execute(sa.text("""
CREATE POLICY tenant_isolation ON alert_rules
USING (tenant_id::text = current_setting('app.current_tenant'))
"""))
conn.execute(sa.text(
"GRANT SELECT, INSERT, UPDATE, DELETE ON alert_rules TO app_user"
))
conn.execute(sa.text("GRANT ALL ON alert_rules TO poller_user"))
# =========================================================================
# CREATE notification_channels TABLE
# =========================================================================
conn.execute(sa.text("""
CREATE TABLE IF NOT EXISTS notification_channels (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
name TEXT NOT NULL,
channel_type TEXT NOT NULL,
smtp_host TEXT,
smtp_port INTEGER,
smtp_user TEXT,
smtp_password BYTEA,
smtp_use_tls BOOLEAN DEFAULT FALSE,
from_address TEXT,
to_address TEXT,
webhook_url TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
)
"""))
conn.execute(sa.text(
"CREATE INDEX IF NOT EXISTS idx_notification_channels_tenant "
"ON notification_channels (tenant_id)"
))
conn.execute(sa.text("ALTER TABLE notification_channels ENABLE ROW LEVEL SECURITY"))
conn.execute(sa.text("""
CREATE POLICY tenant_isolation ON notification_channels
USING (tenant_id::text = current_setting('app.current_tenant'))
"""))
conn.execute(sa.text(
"GRANT SELECT, INSERT, UPDATE, DELETE ON notification_channels TO app_user"
))
conn.execute(sa.text("GRANT ALL ON notification_channels TO poller_user"))
# =========================================================================
# CREATE alert_rule_channels TABLE (M2M association)
# =========================================================================
conn.execute(sa.text("""
CREATE TABLE IF NOT EXISTS alert_rule_channels (
rule_id UUID NOT NULL REFERENCES alert_rules(id) ON DELETE CASCADE,
channel_id UUID NOT NULL REFERENCES notification_channels(id) ON DELETE CASCADE,
PRIMARY KEY (rule_id, channel_id)
)
"""))
conn.execute(sa.text("ALTER TABLE alert_rule_channels ENABLE ROW LEVEL SECURITY"))
# RLS for M2M: join through parent table's tenant_id via rule_id
conn.execute(sa.text("""
CREATE POLICY tenant_isolation ON alert_rule_channels
USING (rule_id IN (
SELECT id FROM alert_rules
WHERE tenant_id::text = current_setting('app.current_tenant')
))
"""))
conn.execute(sa.text(
"GRANT SELECT, INSERT, UPDATE, DELETE ON alert_rule_channels TO app_user"
))
conn.execute(sa.text("GRANT ALL ON alert_rule_channels TO poller_user"))
# =========================================================================
# CREATE alert_events TABLE
# =========================================================================
conn.execute(sa.text("""
CREATE TABLE IF NOT EXISTS alert_events (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
rule_id UUID REFERENCES alert_rules(id) ON DELETE SET NULL,
device_id UUID NOT NULL REFERENCES devices(id) ON DELETE CASCADE,
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
status TEXT NOT NULL,
severity TEXT NOT NULL,
metric TEXT,
value NUMERIC,
threshold NUMERIC,
message TEXT,
is_flapping BOOLEAN NOT NULL DEFAULT FALSE,
acknowledged_at TIMESTAMPTZ,
acknowledged_by UUID REFERENCES users(id) ON DELETE SET NULL,
silenced_until TIMESTAMPTZ,
fired_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
resolved_at TIMESTAMPTZ
)
"""))
conn.execute(sa.text(
"CREATE INDEX IF NOT EXISTS idx_alert_events_device_rule_status "
"ON alert_events (device_id, rule_id, status)"
))
conn.execute(sa.text(
"CREATE INDEX IF NOT EXISTS idx_alert_events_tenant_fired "
"ON alert_events (tenant_id, fired_at)"
))
conn.execute(sa.text("ALTER TABLE alert_events ENABLE ROW LEVEL SECURITY"))
conn.execute(sa.text("""
CREATE POLICY tenant_isolation ON alert_events
USING (tenant_id::text = current_setting('app.current_tenant'))
"""))
conn.execute(sa.text(
"GRANT SELECT, INSERT, UPDATE, DELETE ON alert_events TO app_user"
))
conn.execute(sa.text("GRANT ALL ON alert_events TO poller_user"))
# =========================================================================
# CREATE firmware_versions TABLE (global — NOT tenant-scoped)
# =========================================================================
conn.execute(sa.text("""
CREATE TABLE IF NOT EXISTS firmware_versions (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
architecture TEXT NOT NULL,
channel TEXT NOT NULL,
version TEXT NOT NULL,
npk_url TEXT NOT NULL,
npk_local_path TEXT,
npk_size_bytes BIGINT,
checked_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(architecture, channel, version)
)
"""))
conn.execute(sa.text(
"CREATE INDEX IF NOT EXISTS idx_firmware_versions_arch_channel "
"ON firmware_versions (architecture, channel)"
))
# No RLS on firmware_versions — global cache table
conn.execute(sa.text(
"GRANT SELECT, INSERT, UPDATE ON firmware_versions TO app_user"
))
conn.execute(sa.text("GRANT ALL ON firmware_versions TO poller_user"))
# =========================================================================
# CREATE firmware_upgrade_jobs TABLE
# =========================================================================
conn.execute(sa.text("""
CREATE TABLE IF NOT EXISTS firmware_upgrade_jobs (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
device_id UUID NOT NULL REFERENCES devices(id) ON DELETE CASCADE,
rollout_group_id UUID,
target_version TEXT NOT NULL,
architecture TEXT NOT NULL,
channel TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'pending',
pre_upgrade_backup_sha TEXT,
scheduled_at TIMESTAMPTZ,
started_at TIMESTAMPTZ,
completed_at TIMESTAMPTZ,
error_message TEXT,
confirmed_major_upgrade BOOLEAN NOT NULL DEFAULT FALSE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
)
"""))
conn.execute(sa.text("ALTER TABLE firmware_upgrade_jobs ENABLE ROW LEVEL SECURITY"))
conn.execute(sa.text("""
CREATE POLICY tenant_isolation ON firmware_upgrade_jobs
USING (tenant_id::text = current_setting('app.current_tenant'))
"""))
conn.execute(sa.text(
"GRANT SELECT, INSERT, UPDATE, DELETE ON firmware_upgrade_jobs TO app_user"
))
conn.execute(sa.text("GRANT ALL ON firmware_upgrade_jobs TO poller_user"))
# =========================================================================
# SEED DEFAULT ALERT RULES for all existing tenants
# =========================================================================
# Note: New tenant creation (in the tenants API router) should also seed
# these three default rules. A _seed_default_alert_rules(tenant_id) helper
# should be created in the alerts router or a shared service for this.
conn.execute(sa.text("""
INSERT INTO alert_rules (id, tenant_id, name, metric, operator, threshold, duration_polls, severity, enabled, is_default)
SELECT gen_random_uuid(), t.id, 'High CPU Usage', 'cpu_load', 'gt', 90, 5, 'warning', TRUE, TRUE
FROM tenants t
"""))
conn.execute(sa.text("""
INSERT INTO alert_rules (id, tenant_id, name, metric, operator, threshold, duration_polls, severity, enabled, is_default)
SELECT gen_random_uuid(), t.id, 'High Memory Usage', 'memory_used_pct', 'gt', 90, 5, 'warning', TRUE, TRUE
FROM tenants t
"""))
conn.execute(sa.text("""
INSERT INTO alert_rules (id, tenant_id, name, metric, operator, threshold, duration_polls, severity, enabled, is_default)
SELECT gen_random_uuid(), t.id, 'High Disk Usage', 'disk_used_pct', 'gt', 85, 3, 'warning', TRUE, TRUE
FROM tenants t
"""))
def downgrade() -> None:
conn = op.get_bind()
# Drop tables in reverse dependency order
conn.execute(sa.text("DROP TABLE IF EXISTS firmware_upgrade_jobs CASCADE"))
conn.execute(sa.text("DROP TABLE IF EXISTS firmware_versions CASCADE"))
conn.execute(sa.text("DROP TABLE IF EXISTS alert_events CASCADE"))
conn.execute(sa.text("DROP TABLE IF EXISTS alert_rule_channels CASCADE"))
conn.execute(sa.text("DROP TABLE IF EXISTS notification_channels CASCADE"))
conn.execute(sa.text("DROP TABLE IF EXISTS alert_rules CASCADE"))
# Drop added columns
conn.execute(sa.text("ALTER TABLE devices DROP COLUMN IF EXISTS architecture"))
conn.execute(sa.text("ALTER TABLE devices DROP COLUMN IF EXISTS preferred_channel"))
conn.execute(sa.text("ALTER TABLE device_groups DROP COLUMN IF EXISTS preferred_channel"))

View File

@@ -0,0 +1,212 @@
"""Add config templates, template push jobs, and device location columns.
Revision ID: 006
Revises: 005
Create Date: 2026-02-25
This migration:
1. ALTERs devices table: adds latitude and longitude columns.
2. Creates config_templates table.
3. Creates config_template_tags table.
4. Creates template_push_jobs table.
5. Applies RLS policies on all three new tables.
6. Seeds starter templates for all existing tenants.
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "006"
down_revision: Union[str, None] = "005"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
conn = op.get_bind()
# =========================================================================
# ALTER devices TABLE — add latitude and longitude columns
# =========================================================================
conn.execute(sa.text(
"ALTER TABLE devices ADD COLUMN IF NOT EXISTS latitude DOUBLE PRECISION"
))
conn.execute(sa.text(
"ALTER TABLE devices ADD COLUMN IF NOT EXISTS longitude DOUBLE PRECISION"
))
# =========================================================================
# CREATE config_templates TABLE
# =========================================================================
conn.execute(sa.text("""
CREATE TABLE IF NOT EXISTS config_templates (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
name TEXT NOT NULL,
description TEXT,
content TEXT NOT NULL,
variables JSONB NOT NULL DEFAULT '[]'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
UNIQUE(tenant_id, name)
)
"""))
# =========================================================================
# CREATE config_template_tags TABLE
# =========================================================================
conn.execute(sa.text("""
CREATE TABLE IF NOT EXISTS config_template_tags (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
name VARCHAR(100) NOT NULL,
template_id UUID NOT NULL REFERENCES config_templates(id) ON DELETE CASCADE,
UNIQUE(template_id, name)
)
"""))
# =========================================================================
# CREATE template_push_jobs TABLE
# =========================================================================
conn.execute(sa.text("""
CREATE TABLE IF NOT EXISTS template_push_jobs (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
template_id UUID REFERENCES config_templates(id) ON DELETE SET NULL,
device_id UUID NOT NULL REFERENCES devices(id) ON DELETE CASCADE,
rollout_id UUID,
rendered_content TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'pending',
pre_push_backup_sha TEXT,
error_message TEXT,
started_at TIMESTAMPTZ,
completed_at TIMESTAMPTZ,
created_at TIMESTAMPTZ NOT NULL DEFAULT now()
)
"""))
# =========================================================================
# RLS POLICIES
# =========================================================================
for table in ("config_templates", "config_template_tags", "template_push_jobs"):
conn.execute(sa.text(f"ALTER TABLE {table} ENABLE ROW LEVEL SECURITY"))
conn.execute(sa.text(f"""
CREATE POLICY {table}_tenant_isolation ON {table}
USING (tenant_id = current_setting('app.current_tenant')::uuid)
"""))
conn.execute(sa.text(
f"GRANT SELECT, INSERT, UPDATE, DELETE ON {table} TO app_user"
))
conn.execute(sa.text(f"GRANT ALL ON {table} TO poller_user"))
# =========================================================================
# INDEXES
# =========================================================================
conn.execute(sa.text(
"CREATE INDEX IF NOT EXISTS idx_config_templates_tenant "
"ON config_templates (tenant_id)"
))
conn.execute(sa.text(
"CREATE INDEX IF NOT EXISTS idx_config_template_tags_template "
"ON config_template_tags (template_id)"
))
conn.execute(sa.text(
"CREATE INDEX IF NOT EXISTS idx_template_push_jobs_tenant_rollout "
"ON template_push_jobs (tenant_id, rollout_id)"
))
conn.execute(sa.text(
"CREATE INDEX IF NOT EXISTS idx_template_push_jobs_device_status "
"ON template_push_jobs (device_id, status)"
))
# =========================================================================
# SEED STARTER TEMPLATES for all existing tenants
# =========================================================================
# 1. Basic Firewall
conn.execute(sa.text("""
INSERT INTO config_templates (id, tenant_id, name, description, content, variables)
SELECT
gen_random_uuid(),
t.id,
'Basic Firewall',
'Standard firewall ruleset with WAN protection and LAN forwarding',
'/ip firewall filter
add chain=input connection-state=established,related action=accept
add chain=input connection-state=invalid action=drop
add chain=input in-interface={{ wan_interface }} protocol=tcp dst-port=8291 action=drop comment="Block Winbox from WAN"
add chain=input in-interface={{ wan_interface }} protocol=tcp dst-port=22 action=drop comment="Block SSH from WAN"
add chain=forward connection-state=established,related action=accept
add chain=forward connection-state=invalid action=drop
add chain=forward src-address={{ allowed_network }} action=accept
add chain=forward action=drop',
'[{"name":"wan_interface","type":"string","default":"ether1","description":"WAN-facing interface"},{"name":"allowed_network","type":"subnet","default":"192.168.1.0/24","description":"Allowed source network"}]'::jsonb
FROM tenants t
ON CONFLICT DO NOTHING
"""))
# 2. DHCP Server Setup
conn.execute(sa.text("""
INSERT INTO config_templates (id, tenant_id, name, description, content, variables)
SELECT
gen_random_uuid(),
t.id,
'DHCP Server Setup',
'Configure DHCP server with address pool, DNS, and gateway',
'/ip pool add name=dhcp-pool ranges={{ pool_start }}-{{ pool_end }}
/ip dhcp-server network add address={{ gateway }}/24 gateway={{ gateway }} dns-server={{ dns_server }}
/ip dhcp-server add name=dhcp1 interface={{ interface }} address-pool=dhcp-pool disabled=no',
'[{"name":"pool_start","type":"ip","default":"192.168.1.100","description":"DHCP pool start address"},{"name":"pool_end","type":"ip","default":"192.168.1.254","description":"DHCP pool end address"},{"name":"gateway","type":"ip","default":"192.168.1.1","description":"Default gateway"},{"name":"dns_server","type":"ip","default":"8.8.8.8","description":"DNS server address"},{"name":"interface","type":"string","default":"bridge1","description":"Interface to serve DHCP on"}]'::jsonb
FROM tenants t
ON CONFLICT DO NOTHING
"""))
# 3. Wireless AP Config
conn.execute(sa.text("""
INSERT INTO config_templates (id, tenant_id, name, description, content, variables)
SELECT
gen_random_uuid(),
t.id,
'Wireless AP Config',
'Configure wireless access point with WPA2 security',
'/interface wireless security-profiles add name=portal-wpa2 mode=dynamic-keys authentication-types=wpa2-psk wpa2-pre-shared-key={{ password }}
/interface wireless set wlan1 mode=ap-bridge ssid={{ ssid }} security-profile=portal-wpa2 frequency={{ frequency }} channel-width={{ channel_width }} disabled=no',
'[{"name":"ssid","type":"string","default":"MikroTik-AP","description":"Wireless network name"},{"name":"password","type":"string","default":"","description":"WPA2 pre-shared key (min 8 characters)"},{"name":"frequency","type":"integer","default":"2412","description":"Wireless frequency in MHz"},{"name":"channel_width","type":"string","default":"20/40mhz-XX","description":"Channel width setting"}]'::jsonb
FROM tenants t
ON CONFLICT DO NOTHING
"""))
# 4. Initial Device Setup
conn.execute(sa.text("""
INSERT INTO config_templates (id, tenant_id, name, description, content, variables)
SELECT
gen_random_uuid(),
t.id,
'Initial Device Setup',
'Set device identity, NTP, DNS, and disable unused services',
'/system identity set name={{ device.hostname }}
/system ntp client set enabled=yes servers={{ ntp_server }}
/ip dns set servers={{ dns_servers }} allow-remote-requests=no
/ip service disable telnet,ftp,www,api-ssl
/ip service set ssh port=22
/ip service set winbox port=8291',
'[{"name":"ntp_server","type":"ip","default":"pool.ntp.org","description":"NTP server address"},{"name":"dns_servers","type":"string","default":"8.8.8.8,8.8.4.4","description":"Comma-separated DNS servers"}]'::jsonb
FROM tenants t
ON CONFLICT DO NOTHING
"""))
def downgrade() -> None:
conn = op.get_bind()
# Drop tables in reverse dependency order
conn.execute(sa.text("DROP TABLE IF EXISTS template_push_jobs CASCADE"))
conn.execute(sa.text("DROP TABLE IF EXISTS config_template_tags CASCADE"))
conn.execute(sa.text("DROP TABLE IF EXISTS config_templates CASCADE"))
# Drop location columns from devices
conn.execute(sa.text("ALTER TABLE devices DROP COLUMN IF EXISTS latitude"))
conn.execute(sa.text("ALTER TABLE devices DROP COLUMN IF EXISTS longitude"))

View File

@@ -0,0 +1,82 @@
"""Create audit_logs table with RLS policy.
Revision ID: 007
Revises: 006
Create Date: 2026-03-02
This migration:
1. Creates audit_logs table for centralized audit trail.
2. Applies RLS policy for tenant isolation.
3. Creates indexes for fast paginated and filtered queries.
4. Grants SELECT, INSERT to app_user (read and write audit entries).
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "007"
down_revision: Union[str, None] = "006"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
conn = op.get_bind()
# =========================================================================
# CREATE audit_logs TABLE
# =========================================================================
conn.execute(sa.text("""
CREATE TABLE IF NOT EXISTS audit_logs (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
user_id UUID REFERENCES users(id) ON DELETE SET NULL,
action VARCHAR(100) NOT NULL,
resource_type VARCHAR(50),
resource_id VARCHAR(255),
device_id UUID REFERENCES devices(id) ON DELETE SET NULL,
details JSONB NOT NULL DEFAULT '{}'::jsonb,
ip_address VARCHAR(45),
created_at TIMESTAMPTZ NOT NULL DEFAULT now()
)
"""))
# =========================================================================
# RLS POLICY
# =========================================================================
conn.execute(sa.text(
"ALTER TABLE audit_logs ENABLE ROW LEVEL SECURITY"
))
conn.execute(sa.text("""
CREATE POLICY audit_logs_tenant_isolation ON audit_logs
USING (tenant_id = current_setting('app.current_tenant')::uuid)
"""))
# Grant SELECT + INSERT to app_user (no UPDATE/DELETE -- audit logs are immutable)
conn.execute(sa.text(
"GRANT SELECT, INSERT ON audit_logs TO app_user"
))
# Poller user gets full access for cross-tenant audit logging
conn.execute(sa.text(
"GRANT ALL ON audit_logs TO poller_user"
))
# =========================================================================
# INDEXES
# =========================================================================
conn.execute(sa.text(
"CREATE INDEX IF NOT EXISTS idx_audit_logs_tenant_created "
"ON audit_logs (tenant_id, created_at DESC)"
))
conn.execute(sa.text(
"CREATE INDEX IF NOT EXISTS idx_audit_logs_tenant_action "
"ON audit_logs (tenant_id, action)"
))
def downgrade() -> None:
conn = op.get_bind()
conn.execute(sa.text("DROP TABLE IF EXISTS audit_logs CASCADE"))

View File

@@ -0,0 +1,86 @@
"""Add maintenance_windows table with RLS.
Revision ID: 008
Revises: 007
Create Date: 2026-03-02
This migration:
1. Creates maintenance_windows table for scheduling maintenance periods.
2. Adds CHECK constraint (end_at > start_at).
3. Creates composite index on (tenant_id, start_at, end_at) for active window queries.
4. Applies RLS policy matching the standard tenant_id isolation pattern.
5. Grants permissions to app_user role.
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "008"
down_revision: Union[str, None] = "007"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
conn = op.get_bind()
# ── 1. Create maintenance_windows table ────────────────────────────────
conn.execute(sa.text("""
CREATE TABLE IF NOT EXISTS maintenance_windows (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
name VARCHAR(200) NOT NULL,
device_ids JSONB NOT NULL DEFAULT '[]'::jsonb,
start_at TIMESTAMPTZ NOT NULL,
end_at TIMESTAMPTZ NOT NULL,
suppress_alerts BOOLEAN NOT NULL DEFAULT true,
notes TEXT,
created_by UUID REFERENCES users(id) ON DELETE SET NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
CONSTRAINT chk_maintenance_window_dates CHECK (end_at > start_at)
)
"""))
# ── 2. Composite index for active window queries ───────────────────────
conn.execute(sa.text("""
CREATE INDEX IF NOT EXISTS idx_maintenance_windows_tenant_time
ON maintenance_windows (tenant_id, start_at, end_at)
"""))
# ── 3. RLS policy ─────────────────────────────────────────────────────
conn.execute(sa.text("ALTER TABLE maintenance_windows ENABLE ROW LEVEL SECURITY"))
conn.execute(sa.text("""
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_policies
WHERE tablename = 'maintenance_windows' AND policyname = 'maintenance_windows_tenant_isolation'
) THEN
CREATE POLICY maintenance_windows_tenant_isolation ON maintenance_windows
USING (tenant_id = NULLIF(current_setting('app.current_tenant', true), '')::uuid);
END IF;
END
$$
"""))
# ── 4. Grant permissions to app_user ───────────────────────────────────
conn.execute(sa.text("""
DO $$
BEGIN
IF EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'app_user') THEN
GRANT SELECT, INSERT, UPDATE, DELETE ON maintenance_windows TO app_user;
END IF;
END
$$
"""))
def downgrade() -> None:
conn = op.get_bind()
conn.execute(sa.text("DROP TABLE IF EXISTS maintenance_windows CASCADE"))

View File

@@ -0,0 +1,93 @@
"""Add api_keys table with RLS for tenant-scoped API key management.
Revision ID: 009
Revises: 008
Create Date: 2026-03-02
This migration:
1. Creates api_keys table (UUID PK, tenant_id FK, user_id FK, key_hash, scopes JSONB).
2. Adds unique index on key_hash for O(1) validation lookups.
3. Adds composite index on (tenant_id, revoked_at) for listing active keys.
4. Applies RLS policy on tenant_id.
5. Grants SELECT, INSERT, UPDATE to app_user.
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "009"
down_revision: Union[str, None] = "008"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
conn = op.get_bind()
# 1. Create api_keys table
conn.execute(
sa.text("""
CREATE TABLE IF NOT EXISTS api_keys (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
name VARCHAR(200) NOT NULL,
key_prefix VARCHAR(12) NOT NULL,
key_hash VARCHAR(64) NOT NULL,
scopes JSONB NOT NULL DEFAULT '[]'::jsonb,
expires_at TIMESTAMPTZ,
last_used_at TIMESTAMPTZ,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
revoked_at TIMESTAMPTZ
);
""")
)
# 2. Unique index on key_hash for fast validation lookups
conn.execute(
sa.text("""
CREATE UNIQUE INDEX IF NOT EXISTS ix_api_keys_key_hash
ON api_keys (key_hash);
""")
)
# 3. Composite index for listing active keys per tenant
conn.execute(
sa.text("""
CREATE INDEX IF NOT EXISTS ix_api_keys_tenant_revoked
ON api_keys (tenant_id, revoked_at);
""")
)
# 4. Enable RLS and create tenant isolation policy
conn.execute(sa.text("ALTER TABLE api_keys ENABLE ROW LEVEL SECURITY;"))
conn.execute(sa.text("ALTER TABLE api_keys FORCE ROW LEVEL SECURITY;"))
conn.execute(
sa.text("""
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_policies
WHERE tablename = 'api_keys' AND policyname = 'tenant_isolation'
) THEN
CREATE POLICY tenant_isolation ON api_keys
USING (
tenant_id::text = current_setting('app.current_tenant', true)
OR current_setting('app.current_tenant', true) = 'super_admin'
);
END IF;
END $$;
""")
)
# 5. Grant permissions to app_user role
conn.execute(sa.text("GRANT SELECT, INSERT, UPDATE ON api_keys TO app_user;"))
def downgrade() -> None:
conn = op.get_bind()
conn.execute(sa.text("DROP TABLE IF EXISTS api_keys CASCADE;"))

View File

@@ -0,0 +1,90 @@
"""Add vpn_config and vpn_peers tables for WireGuard VPN management.
Revision ID: 010
Revises: 009
Create Date: 2026-03-02
This migration:
1. Creates vpn_config table (one row per tenant — server keys, subnet, port).
2. Creates vpn_peers table (one row per device VPN connection).
3. Applies RLS policies on tenant_id.
4. Grants SELECT, INSERT, UPDATE, DELETE to app_user.
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import UUID
# revision identifiers
revision: str = "010"
down_revision: Union[str, None] = "009"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ── vpn_config: one row per tenant ──
op.create_table(
"vpn_config",
sa.Column("id", UUID(as_uuid=True), server_default=sa.text("gen_random_uuid()"), primary_key=True),
sa.Column("tenant_id", UUID(as_uuid=True), sa.ForeignKey("tenants.id", ondelete="CASCADE"), nullable=False, unique=True),
sa.Column("server_private_key", sa.LargeBinary(), nullable=False), # AES-256-GCM encrypted
sa.Column("server_public_key", sa.String(64), nullable=False),
sa.Column("subnet", sa.String(32), nullable=False, server_default="10.10.0.0/24"),
sa.Column("server_port", sa.Integer(), nullable=False, server_default="51820"),
sa.Column("server_address", sa.String(32), nullable=False, server_default="10.10.0.1/24"),
sa.Column("endpoint", sa.String(255), nullable=True), # public hostname:port for devices to connect to
sa.Column("is_enabled", sa.Boolean(), nullable=False, server_default="false"),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
)
# ── vpn_peers: one per device VPN connection ──
op.create_table(
"vpn_peers",
sa.Column("id", UUID(as_uuid=True), server_default=sa.text("gen_random_uuid()"), primary_key=True),
sa.Column("tenant_id", UUID(as_uuid=True), sa.ForeignKey("tenants.id", ondelete="CASCADE"), nullable=False),
sa.Column("device_id", UUID(as_uuid=True), sa.ForeignKey("devices.id", ondelete="CASCADE"), nullable=False, unique=True),
sa.Column("peer_private_key", sa.LargeBinary(), nullable=False), # AES-256-GCM encrypted
sa.Column("peer_public_key", sa.String(64), nullable=False),
sa.Column("preshared_key", sa.LargeBinary(), nullable=True), # AES-256-GCM encrypted, optional
sa.Column("assigned_ip", sa.String(32), nullable=False), # e.g. 10.10.0.2/24
sa.Column("additional_allowed_ips", sa.String(512), nullable=True), # comma-separated subnets for site-to-site
sa.Column("is_enabled", sa.Boolean(), nullable=False, server_default="true"),
sa.Column("last_handshake", sa.DateTime(timezone=True), nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
)
# Indexes
op.create_index("ix_vpn_peers_tenant_id", "vpn_peers", ["tenant_id"])
# ── RLS policies ──
op.execute("ALTER TABLE vpn_config ENABLE ROW LEVEL SECURITY")
op.execute("""
CREATE POLICY vpn_config_tenant_isolation ON vpn_config
FOR ALL
TO app_user
USING (CAST(tenant_id AS text) = current_setting('app.current_tenant', true))
""")
op.execute("ALTER TABLE vpn_peers ENABLE ROW LEVEL SECURITY")
op.execute("""
CREATE POLICY vpn_peers_tenant_isolation ON vpn_peers
FOR ALL
TO app_user
USING (CAST(tenant_id AS text) = current_setting('app.current_tenant', true))
""")
# ── Grants ──
op.execute("GRANT SELECT, INSERT, UPDATE, DELETE ON vpn_config TO app_user")
op.execute("GRANT SELECT, INSERT, UPDATE, DELETE ON vpn_peers TO app_user")
def downgrade() -> None:
op.execute("DROP POLICY IF EXISTS vpn_peers_tenant_isolation ON vpn_peers")
op.execute("DROP POLICY IF EXISTS vpn_config_tenant_isolation ON vpn_config")
op.drop_table("vpn_peers")
op.drop_table("vpn_config")

View File

@@ -0,0 +1,169 @@
"""Seed starter config templates for tenants missing them.
Revision ID: 012
Revises: 010
Create Date: 2026-03-02
Re-seeds the 4 original starter templates from 006 plus a new comprehensive
'Basic Router' template for any tenants created after migration 006 ran.
Uses ON CONFLICT (tenant_id, name) DO NOTHING so existing templates are untouched.
"""
revision = "012"
down_revision = "010"
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade() -> None:
conn = op.get_bind()
# 1. Basic Router — comprehensive starter for a typical SOHO/branch router
conn.execute(sa.text("""
INSERT INTO config_templates (id, tenant_id, name, description, content, variables)
SELECT
gen_random_uuid(),
t.id,
'Basic Router',
'Complete SOHO/branch router setup: WAN on ether1, LAN bridge, DHCP, DNS, NAT, basic firewall',
'/interface bridge add name=bridge-lan comment="LAN bridge"
/interface bridge port add bridge=bridge-lan interface=ether2
/interface bridge port add bridge=bridge-lan interface=ether3
/interface bridge port add bridge=bridge-lan interface=ether4
/interface bridge port add bridge=bridge-lan interface=ether5
# WAN — DHCP client on ether1
/ip dhcp-client add interface={{ wan_interface }} disabled=no comment="WAN uplink"
# LAN address
/ip address add address={{ lan_gateway }}/{{ lan_cidr }} interface=bridge-lan
# DNS
/ip dns set servers={{ dns_servers }} allow-remote-requests=yes
# DHCP server for LAN
/ip pool add name=lan-pool ranges={{ dhcp_start }}-{{ dhcp_end }}
/ip dhcp-server network add address={{ lan_network }}/{{ lan_cidr }} gateway={{ lan_gateway }} dns-server={{ lan_gateway }}
/ip dhcp-server add name=lan-dhcp interface=bridge-lan address-pool=lan-pool disabled=no
# NAT masquerade
/ip firewall nat add chain=srcnat out-interface={{ wan_interface }} action=masquerade
# Firewall — input chain
/ip firewall filter
add chain=input connection-state=established,related action=accept
add chain=input connection-state=invalid action=drop
add chain=input in-interface={{ wan_interface }} action=drop comment="Drop all other WAN input"
# Firewall — forward chain
add chain=forward connection-state=established,related action=accept
add chain=forward connection-state=invalid action=drop
add chain=forward in-interface=bridge-lan out-interface={{ wan_interface }} action=accept comment="Allow LAN to WAN"
add chain=forward action=drop comment="Drop everything else"
# NTP
/system ntp client set enabled=yes servers={{ ntp_server }}
# Identity
/system identity set name={{ device.hostname }}',
'[{"name":"wan_interface","type":"string","default":"ether1","description":"WAN-facing interface"},{"name":"lan_gateway","type":"ip","default":"192.168.88.1","description":"LAN gateway IP"},{"name":"lan_cidr","type":"integer","default":"24","description":"LAN subnet mask bits"},{"name":"lan_network","type":"ip","default":"192.168.88.0","description":"LAN network address"},{"name":"dhcp_start","type":"ip","default":"192.168.88.100","description":"DHCP pool start"},{"name":"dhcp_end","type":"ip","default":"192.168.88.254","description":"DHCP pool end"},{"name":"dns_servers","type":"string","default":"8.8.8.8,8.8.4.4","description":"Upstream DNS servers"},{"name":"ntp_server","type":"string","default":"pool.ntp.org","description":"NTP server"}]'::jsonb
FROM tenants t
WHERE NOT EXISTS (
SELECT 1 FROM config_templates ct
WHERE ct.tenant_id = t.id AND ct.name = 'Basic Router'
)
"""))
# 2. Re-seed Basic Firewall (for tenants missing it)
conn.execute(sa.text("""
INSERT INTO config_templates (id, tenant_id, name, description, content, variables)
SELECT
gen_random_uuid(),
t.id,
'Basic Firewall',
'Standard firewall ruleset with WAN protection and LAN forwarding',
'/ip firewall filter
add chain=input connection-state=established,related action=accept
add chain=input connection-state=invalid action=drop
add chain=input in-interface={{ wan_interface }} protocol=tcp dst-port=8291 action=drop comment="Block Winbox from WAN"
add chain=input in-interface={{ wan_interface }} protocol=tcp dst-port=22 action=drop comment="Block SSH from WAN"
add chain=forward connection-state=established,related action=accept
add chain=forward connection-state=invalid action=drop
add chain=forward src-address={{ allowed_network }} action=accept
add chain=forward action=drop',
'[{"name":"wan_interface","type":"string","default":"ether1","description":"WAN-facing interface"},{"name":"allowed_network","type":"subnet","default":"192.168.88.0/24","description":"Allowed source network"}]'::jsonb
FROM tenants t
WHERE NOT EXISTS (
SELECT 1 FROM config_templates ct
WHERE ct.tenant_id = t.id AND ct.name = 'Basic Firewall'
)
"""))
# 3. Re-seed DHCP Server Setup
conn.execute(sa.text("""
INSERT INTO config_templates (id, tenant_id, name, description, content, variables)
SELECT
gen_random_uuid(),
t.id,
'DHCP Server Setup',
'Configure DHCP server with address pool, DNS, and gateway',
'/ip pool add name=dhcp-pool ranges={{ pool_start }}-{{ pool_end }}
/ip dhcp-server network add address={{ gateway }}/24 gateway={{ gateway }} dns-server={{ dns_server }}
/ip dhcp-server add name=dhcp1 interface={{ interface }} address-pool=dhcp-pool disabled=no',
'[{"name":"pool_start","type":"ip","default":"192.168.88.100","description":"DHCP pool start address"},{"name":"pool_end","type":"ip","default":"192.168.88.254","description":"DHCP pool end address"},{"name":"gateway","type":"ip","default":"192.168.88.1","description":"Default gateway"},{"name":"dns_server","type":"ip","default":"8.8.8.8","description":"DNS server address"},{"name":"interface","type":"string","default":"bridge-lan","description":"Interface to serve DHCP on"}]'::jsonb
FROM tenants t
WHERE NOT EXISTS (
SELECT 1 FROM config_templates ct
WHERE ct.tenant_id = t.id AND ct.name = 'DHCP Server Setup'
)
"""))
# 4. Re-seed Wireless AP Config
conn.execute(sa.text("""
INSERT INTO config_templates (id, tenant_id, name, description, content, variables)
SELECT
gen_random_uuid(),
t.id,
'Wireless AP Config',
'Configure wireless access point with WPA2 security',
'/interface wireless security-profiles add name=portal-wpa2 mode=dynamic-keys authentication-types=wpa2-psk wpa2-pre-shared-key={{ password }}
/interface wireless set wlan1 mode=ap-bridge ssid={{ ssid }} security-profile=portal-wpa2 frequency={{ frequency }} channel-width={{ channel_width }} disabled=no',
'[{"name":"ssid","type":"string","default":"MikroTik-AP","description":"Wireless network name"},{"name":"password","type":"string","default":"","description":"WPA2 pre-shared key (min 8 characters)"},{"name":"frequency","type":"integer","default":"2412","description":"Wireless frequency in MHz"},{"name":"channel_width","type":"string","default":"20/40mhz-XX","description":"Channel width setting"}]'::jsonb
FROM tenants t
WHERE NOT EXISTS (
SELECT 1 FROM config_templates ct
WHERE ct.tenant_id = t.id AND ct.name = 'Wireless AP Config'
)
"""))
# 5. Re-seed Initial Device Setup
conn.execute(sa.text("""
INSERT INTO config_templates (id, tenant_id, name, description, content, variables)
SELECT
gen_random_uuid(),
t.id,
'Initial Device Setup',
'Set device identity, NTP, DNS, and disable unused services',
'/system identity set name={{ device.hostname }}
/system ntp client set enabled=yes servers={{ ntp_server }}
/ip dns set servers={{ dns_servers }} allow-remote-requests=no
/ip service disable telnet,ftp,www,api-ssl
/ip service set ssh port=22
/ip service set winbox port=8291',
'[{"name":"ntp_server","type":"ip","default":"pool.ntp.org","description":"NTP server address"},{"name":"dns_servers","type":"string","default":"8.8.8.8,8.8.4.4","description":"Comma-separated DNS servers"}]'::jsonb
FROM tenants t
WHERE NOT EXISTS (
SELECT 1 FROM config_templates ct
WHERE ct.tenant_id = t.id AND ct.name = 'Initial Device Setup'
)
"""))
def downgrade() -> None:
conn = op.get_bind()
conn.execute(sa.text(
"DELETE FROM config_templates WHERE name = 'Basic Router'"
))

View File

@@ -0,0 +1,203 @@
"""Add certificate authority and device certificate tables.
Revision ID: 013
Revises: 012
Create Date: 2026-03-03
Creates the `certificate_authorities` (one per tenant) and `device_certificates`
(one per device) tables for the Internal Certificate Authority feature.
Also adds a `tls_mode` column to the `devices` table to track per-device
TLS verification mode (insecure vs portal_ca).
Both tables have RLS policies for tenant isolation, plus poller_user read
access (the poller needs CA cert PEM to verify device TLS connections).
"""
revision = "013"
down_revision = "012"
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import UUID
def upgrade() -> None:
# --- certificate_authorities table ---
op.create_table(
"certificate_authorities",
sa.Column(
"id",
UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
primary_key=True,
),
sa.Column(
"tenant_id",
UUID(as_uuid=True),
sa.ForeignKey("tenants.id", ondelete="CASCADE"),
nullable=False,
unique=True,
),
sa.Column("common_name", sa.String(255), nullable=False),
sa.Column("cert_pem", sa.Text(), nullable=False),
sa.Column("encrypted_private_key", sa.LargeBinary(), nullable=False),
sa.Column("serial_number", sa.String(64), nullable=False),
sa.Column("fingerprint_sha256", sa.String(95), nullable=False),
sa.Column(
"not_valid_before",
sa.DateTime(timezone=True),
nullable=False,
),
sa.Column(
"not_valid_after",
sa.DateTime(timezone=True),
nullable=False,
),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
),
)
# --- device_certificates table ---
op.create_table(
"device_certificates",
sa.Column(
"id",
UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
primary_key=True,
),
sa.Column(
"tenant_id",
UUID(as_uuid=True),
sa.ForeignKey("tenants.id", ondelete="CASCADE"),
nullable=False,
),
sa.Column(
"device_id",
UUID(as_uuid=True),
sa.ForeignKey("devices.id", ondelete="CASCADE"),
nullable=False,
),
sa.Column(
"ca_id",
UUID(as_uuid=True),
sa.ForeignKey("certificate_authorities.id", ondelete="CASCADE"),
nullable=False,
),
sa.Column("common_name", sa.String(255), nullable=False),
sa.Column("serial_number", sa.String(64), nullable=False),
sa.Column("fingerprint_sha256", sa.String(95), nullable=False),
sa.Column("cert_pem", sa.Text(), nullable=False),
sa.Column("encrypted_private_key", sa.LargeBinary(), nullable=False),
sa.Column(
"not_valid_before",
sa.DateTime(timezone=True),
nullable=False,
),
sa.Column(
"not_valid_after",
sa.DateTime(timezone=True),
nullable=False,
),
sa.Column(
"status",
sa.String(20),
nullable=False,
server_default="issued",
),
sa.Column("deployed_at", sa.DateTime(timezone=True), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
),
)
# --- Add tls_mode column to devices table ---
op.add_column(
"devices",
sa.Column(
"tls_mode",
sa.String(20),
nullable=False,
server_default="insecure",
),
)
# --- RLS policies ---
conn = op.get_bind()
# certificate_authorities RLS
conn.execute(sa.text(
"ALTER TABLE certificate_authorities ENABLE ROW LEVEL SECURITY"
))
conn.execute(sa.text(
"GRANT SELECT, INSERT, UPDATE, DELETE ON certificate_authorities TO app_user"
))
conn.execute(sa.text(
"CREATE POLICY tenant_isolation ON certificate_authorities FOR ALL "
"USING (tenant_id = NULLIF(current_setting('app.current_tenant', true), '')::uuid) "
"WITH CHECK (tenant_id = NULLIF(current_setting('app.current_tenant', true), '')::uuid)"
))
conn.execute(sa.text(
"GRANT SELECT ON certificate_authorities TO poller_user"
))
# device_certificates RLS
conn.execute(sa.text(
"ALTER TABLE device_certificates ENABLE ROW LEVEL SECURITY"
))
conn.execute(sa.text(
"GRANT SELECT, INSERT, UPDATE, DELETE ON device_certificates TO app_user"
))
conn.execute(sa.text(
"CREATE POLICY tenant_isolation ON device_certificates FOR ALL "
"USING (tenant_id = NULLIF(current_setting('app.current_tenant', true), '')::uuid) "
"WITH CHECK (tenant_id = NULLIF(current_setting('app.current_tenant', true), '')::uuid)"
))
conn.execute(sa.text(
"GRANT SELECT ON device_certificates TO poller_user"
))
def downgrade() -> None:
conn = op.get_bind()
# Drop RLS policies
conn.execute(sa.text(
"DROP POLICY IF EXISTS tenant_isolation ON device_certificates"
))
conn.execute(sa.text(
"DROP POLICY IF EXISTS tenant_isolation ON certificate_authorities"
))
# Revoke grants
conn.execute(sa.text(
"REVOKE ALL ON device_certificates FROM app_user"
))
conn.execute(sa.text(
"REVOKE ALL ON device_certificates FROM poller_user"
))
conn.execute(sa.text(
"REVOKE ALL ON certificate_authorities FROM app_user"
))
conn.execute(sa.text(
"REVOKE ALL ON certificate_authorities FROM poller_user"
))
# Drop tls_mode column from devices
op.drop_column("devices", "tls_mode")
# Drop tables
op.drop_table("device_certificates")
op.drop_table("certificate_authorities")

View File

@@ -0,0 +1,50 @@
"""Add TimescaleDB retention policies.
Revision ID: 014
Revises: 013
Create Date: 2026-03-03
Adds retention (drop after 90 days) on all three hypertables:
interface_metrics, health_metrics, wireless_metrics.
Note: Compression is skipped because TimescaleDB 2.17.x does not support
compression on tables with row-level security (RLS) policies.
Compression can be re-added when upgrading to TimescaleDB >= 2.19.
Without retention policies the database grows ~5 GB/month unbounded.
"""
revision = "014"
down_revision = "013"
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
HYPERTABLES = [
"interface_metrics",
"health_metrics",
"wireless_metrics",
]
def upgrade() -> None:
conn = op.get_bind()
for table in HYPERTABLES:
# Drop chunks older than 90 days
conn.execute(sa.text(
f"SELECT add_retention_policy('{table}', INTERVAL '90 days')"
))
def downgrade() -> None:
conn = op.get_bind()
for table in HYPERTABLES:
# Remove retention policy
conn.execute(sa.text(
f"SELECT remove_retention_policy('{table}', if_exists => true)"
))

View File

@@ -0,0 +1,62 @@
"""Add password_reset_tokens table.
Revision ID: 015
Revises: 014
Create Date: 2026-03-03
Stores one-time password reset tokens with expiry. Tokens are hashed
with SHA-256 so a database leak doesn't expose reset links.
"""
revision = "015"
down_revision = "014"
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import UUID
def upgrade() -> None:
op.create_table(
"password_reset_tokens",
sa.Column(
"id",
UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
primary_key=True,
),
sa.Column(
"user_id",
UUID(as_uuid=True),
sa.ForeignKey("users.id", ondelete="CASCADE"),
nullable=False,
),
sa.Column(
"token_hash",
sa.String(64),
nullable=False,
unique=True,
index=True,
),
sa.Column(
"expires_at",
sa.DateTime(timezone=True),
nullable=False,
),
sa.Column(
"used_at",
sa.DateTime(timezone=True),
nullable=True,
),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
),
)
def downgrade() -> None:
op.drop_table("password_reset_tokens")

View File

@@ -0,0 +1,207 @@
"""Add zero-knowledge authentication schema.
Revision ID: 016
Revises: 015
Create Date: 2026-03-03
Adds SRP columns to users, creates user_key_sets table for encrypted
key bundles, creates immutable key_access_log audit trail, and adds
vault key columns to tenants (Phase 29 preparation).
Both new tables have RLS policies. key_access_log is append-only
(INSERT+SELECT only, no UPDATE/DELETE).
"""
revision = "016"
down_revision = "015"
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import UUID
def upgrade() -> None:
# --- Add SRP columns to users table ---
op.add_column(
"users",
sa.Column("srp_salt", sa.LargeBinary(), nullable=True),
)
op.add_column(
"users",
sa.Column("srp_verifier", sa.LargeBinary(), nullable=True),
)
op.add_column(
"users",
sa.Column(
"auth_version",
sa.SmallInteger(),
server_default=sa.text("1"),
nullable=False,
),
)
# --- Create user_key_sets table ---
op.create_table(
"user_key_sets",
sa.Column(
"id",
UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
primary_key=True,
),
sa.Column(
"user_id",
UUID(as_uuid=True),
sa.ForeignKey("users.id", ondelete="CASCADE"),
nullable=False,
unique=True,
),
sa.Column(
"tenant_id",
UUID(as_uuid=True),
sa.ForeignKey("tenants.id", ondelete="CASCADE"),
nullable=True, # NULL for super_admin
),
sa.Column("encrypted_private_key", sa.LargeBinary(), nullable=False),
sa.Column("private_key_nonce", sa.LargeBinary(), nullable=False),
sa.Column("encrypted_vault_key", sa.LargeBinary(), nullable=False),
sa.Column("vault_key_nonce", sa.LargeBinary(), nullable=False),
sa.Column("public_key", sa.LargeBinary(), nullable=False),
sa.Column(
"pbkdf2_iterations",
sa.Integer(),
server_default=sa.text("650000"),
nullable=False,
),
sa.Column("pbkdf2_salt", sa.LargeBinary(), nullable=False),
sa.Column("hkdf_salt", sa.LargeBinary(), nullable=False),
sa.Column(
"key_version",
sa.Integer(),
server_default=sa.text("1"),
nullable=False,
),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
),
)
# --- Create key_access_log table (immutable audit trail) ---
op.create_table(
"key_access_log",
sa.Column(
"id",
UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
primary_key=True,
),
sa.Column(
"tenant_id",
UUID(as_uuid=True),
sa.ForeignKey("tenants.id", ondelete="CASCADE"),
nullable=False,
),
sa.Column(
"user_id",
UUID(as_uuid=True),
sa.ForeignKey("users.id", ondelete="SET NULL"),
nullable=True,
),
sa.Column("action", sa.Text(), nullable=False),
sa.Column("resource_type", sa.Text(), nullable=True),
sa.Column("resource_id", sa.Text(), nullable=True),
sa.Column("key_version", sa.Integer(), nullable=True),
sa.Column("ip_address", sa.Text(), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
)
# --- Add vault key columns to tenants (Phase 29 preparation) ---
op.add_column(
"tenants",
sa.Column("encrypted_vault_key", sa.LargeBinary(), nullable=True),
)
op.add_column(
"tenants",
sa.Column(
"vault_key_version",
sa.Integer(),
server_default=sa.text("1"),
),
)
# --- RLS policies ---
conn = op.get_bind()
# user_key_sets RLS
conn.execute(sa.text(
"ALTER TABLE user_key_sets ENABLE ROW LEVEL SECURITY"
))
conn.execute(sa.text(
"CREATE POLICY user_key_sets_tenant_isolation ON user_key_sets "
"USING (tenant_id::text = current_setting('app.current_tenant', true) "
"OR current_setting('app.current_tenant', true) = 'super_admin')"
))
conn.execute(sa.text(
"GRANT SELECT, INSERT, UPDATE ON user_key_sets TO app_user"
))
# key_access_log RLS (append-only: INSERT+SELECT only, no UPDATE/DELETE)
conn.execute(sa.text(
"ALTER TABLE key_access_log ENABLE ROW LEVEL SECURITY"
))
conn.execute(sa.text(
"CREATE POLICY key_access_log_tenant_isolation ON key_access_log "
"USING (tenant_id::text = current_setting('app.current_tenant', true) "
"OR current_setting('app.current_tenant', true) = 'super_admin')"
))
conn.execute(sa.text(
"GRANT INSERT, SELECT ON key_access_log TO app_user"
))
# poller_user needs INSERT to log key access events when decrypting credentials
conn.execute(sa.text(
"GRANT INSERT, SELECT ON key_access_log TO poller_user"
))
def downgrade() -> None:
conn = op.get_bind()
# Drop RLS policies
conn.execute(sa.text(
"DROP POLICY IF EXISTS key_access_log_tenant_isolation ON key_access_log"
))
conn.execute(sa.text(
"DROP POLICY IF EXISTS user_key_sets_tenant_isolation ON user_key_sets"
))
# Revoke grants
conn.execute(sa.text("REVOKE ALL ON key_access_log FROM app_user"))
conn.execute(sa.text("REVOKE ALL ON key_access_log FROM poller_user"))
conn.execute(sa.text("REVOKE ALL ON user_key_sets FROM app_user"))
# Drop vault key columns from tenants
op.drop_column("tenants", "vault_key_version")
op.drop_column("tenants", "encrypted_vault_key")
# Drop tables
op.drop_table("key_access_log")
op.drop_table("user_key_sets")
# Drop SRP columns from users
op.drop_column("users", "auth_version")
op.drop_column("users", "srp_verifier")
op.drop_column("users", "srp_salt")

View File

@@ -0,0 +1,90 @@
"""OpenBao envelope encryption columns and key_access_log extensions.
Revision ID: 017
Revises: 016
Create Date: 2026-03-03
Adds Transit ciphertext columns (TEXT) alongside existing BYTEA columns
for dual-write migration strategy. Extends key_access_log with device_id,
justification, and correlation_id for Phase 29 audit trail.
"""
revision = "017"
down_revision = "016"
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import UUID
def upgrade() -> None:
# --- Transit ciphertext columns (TEXT, alongside existing BYTEA) ---
# devices: store OpenBao Transit ciphertext for credentials
op.add_column(
"devices",
sa.Column("encrypted_credentials_transit", sa.Text(), nullable=True),
)
# certificate_authorities: Transit-encrypted CA private keys
op.add_column(
"certificate_authorities",
sa.Column("encrypted_private_key_transit", sa.Text(), nullable=True),
)
# device_certificates: Transit-encrypted device cert private keys
op.add_column(
"device_certificates",
sa.Column("encrypted_private_key_transit", sa.Text(), nullable=True),
)
# notification_channels: Transit-encrypted SMTP password
op.add_column(
"notification_channels",
sa.Column("smtp_password_transit", sa.Text(), nullable=True),
)
# --- Tenant OpenBao key tracking ---
op.add_column(
"tenants",
sa.Column("openbao_key_name", sa.Text(), nullable=True),
)
# --- Extend key_access_log for Phase 29 ---
op.add_column(
"key_access_log",
sa.Column("device_id", UUID(as_uuid=True), nullable=True),
)
op.add_column(
"key_access_log",
sa.Column("justification", sa.Text(), nullable=True),
)
op.add_column(
"key_access_log",
sa.Column("correlation_id", sa.Text(), nullable=True),
)
# Add FK constraint for device_id -> devices(id) (nullable, so no cascade needed)
op.create_foreign_key(
"fk_key_access_log_device_id",
"key_access_log",
"devices",
["device_id"],
["id"],
)
def downgrade() -> None:
op.drop_constraint(
"fk_key_access_log_device_id", "key_access_log", type_="foreignkey"
)
op.drop_column("key_access_log", "correlation_id")
op.drop_column("key_access_log", "justification")
op.drop_column("key_access_log", "device_id")
op.drop_column("tenants", "openbao_key_name")
op.drop_column("notification_channels", "smtp_password_transit")
op.drop_column("device_certificates", "encrypted_private_key_transit")
op.drop_column("certificate_authorities", "encrypted_private_key_transit")
op.drop_column("devices", "encrypted_credentials_transit")

View File

@@ -0,0 +1,62 @@
"""Data encryption columns for config backups and audit logs.
Revision ID: 018
Revises: 017
Create Date: 2026-03-03
Adds encryption metadata columns to config_backup_runs (encryption_tier,
encryption_nonce) and encrypted_details TEXT column to audit_logs for
Transit-encrypted audit detail storage.
"""
revision = "018"
down_revision = "017"
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade() -> None:
# --- config_backup_runs: encryption metadata ---
# NULL = plaintext, 1 = client-side AES-GCM, 2 = OpenBao Transit
op.add_column(
"config_backup_runs",
sa.Column(
"encryption_tier",
sa.SmallInteger(),
nullable=True,
comment="NULL=plaintext, 1=client-side AES-GCM, 2=OpenBao Transit",
),
)
# 12-byte AES-GCM nonce for Tier 1 (client-side) backups
op.add_column(
"config_backup_runs",
sa.Column(
"encryption_nonce",
sa.LargeBinary(),
nullable=True,
comment="12-byte AES-GCM nonce for Tier 1 backups",
),
)
# --- audit_logs: Transit-encrypted details ---
op.add_column(
"audit_logs",
sa.Column(
"encrypted_details",
sa.Text(),
nullable=True,
comment="Transit-encrypted details JSON (vault:v1:...)",
),
)
def downgrade() -> None:
op.drop_column("audit_logs", "encrypted_details")
op.drop_column("config_backup_runs", "encryption_nonce")
op.drop_column("config_backup_runs", "encryption_tier")

View File

@@ -0,0 +1,52 @@
"""Deprecate bcrypt: add must_upgrade_auth flag and make hashed_password nullable.
Revision ID: 019
Revises: 018
Create Date: 2026-03-03
Conservative migration that flags legacy bcrypt users for SRP upgrade
rather than dropping data. hashed_password is made nullable so SRP-only
users no longer need a dummy value. A future migration (post-v6.0) can
drop hashed_password once all users have upgraded.
"""
import sqlalchemy as sa
from alembic import op
revision = "019"
down_revision = "018"
branch_labels = None
depends_on = None
def upgrade() -> None:
# Add must_upgrade_auth flag
op.add_column(
"users",
sa.Column(
"must_upgrade_auth",
sa.Boolean(),
server_default="false",
nullable=False,
),
)
# Flag all bcrypt-only users for upgrade (auth_version=1 and no SRP verifier)
op.execute(
"UPDATE users SET must_upgrade_auth = true "
"WHERE auth_version = 1 AND srp_verifier IS NULL"
)
# Make hashed_password nullable (SRP users don't need it)
op.alter_column("users", "hashed_password", nullable=True)
def downgrade() -> None:
# Restore NOT NULL (set a dummy value for any NULLs first)
op.execute(
"UPDATE users SET hashed_password = '$2b$12$placeholder' "
"WHERE hashed_password IS NULL"
)
op.alter_column("users", "hashed_password", nullable=False)
op.drop_column("users", "must_upgrade_auth")

View File

@@ -0,0 +1,51 @@
"""Add opt-in plain-text TLS mode and change default from insecure to auto.
Revision ID: 020
Revises: 019
Create Date: 2026-03-04
Reclassifies tls_mode values:
- 'auto': CA-verified -> InsecureSkipVerify (NO plain-text fallback)
- 'insecure': Skip directly to InsecureSkipVerify
- 'plain': Explicit opt-in for plain-text API (dangerous)
- 'portal_ca': Existing CA-verified mode (unchanged)
Existing 'insecure' devices become 'auto' since the old behavior was
an implicit auto-fallback. portal_ca devices keep their mode.
"""
import sqlalchemy as sa
from alembic import op
revision = "020"
down_revision = "019"
branch_labels = None
depends_on = None
def upgrade() -> None:
# Migrate existing 'insecure' devices to 'auto' (the new default).
# 'portal_ca' devices keep their mode (they already have CA verification).
op.execute("UPDATE devices SET tls_mode = 'auto' WHERE tls_mode = 'insecure'")
# Change the server default from 'insecure' to 'auto'
op.alter_column(
"devices",
"tls_mode",
server_default="auto",
)
def downgrade() -> None:
# Revert 'auto' devices back to 'insecure'
op.execute("UPDATE devices SET tls_mode = 'insecure' WHERE tls_mode = 'auto'")
# Revert 'plain' devices to 'insecure' (plain didn't exist before)
op.execute("UPDATE devices SET tls_mode = 'insecure' WHERE tls_mode = 'plain'")
# Restore old server default
op.alter_column(
"devices",
"tls_mode",
server_default="insecure",
)

View File

@@ -0,0 +1,44 @@
"""Add system tenant for super_admin audit log entries.
Revision ID: 021
Revises: 020
Create Date: 2026-03-04
The super_admin has NULL tenant_id, but audit_logs.tenant_id has a FK
to tenants and is NOT NULL. Code was using uuid.UUID(int=0) as a
substitute, but that row didn't exist — causing FK violations that
silently dropped every super_admin audit entry.
This migration inserts a sentinel 'System (Internal)' tenant so
audit_logs can reference it.
"""
from alembic import op
revision = "021"
down_revision = "020"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.execute(
"""
INSERT INTO tenants (id, name, description)
VALUES (
'00000000-0000-0000-0000-000000000000',
'System (Internal)',
'Internal tenant for super_admin audit entries'
)
ON CONFLICT (id) DO NOTHING
"""
)
def downgrade() -> None:
op.execute(
"""
DELETE FROM tenants
WHERE id = '00000000-0000-0000-0000-000000000000'
"""
)

View File

@@ -0,0 +1,49 @@
"""Add super_admin bypass to devices, device_groups, device_tags RLS policies.
Previously these tables only matched tenant_id, so super_admin context
('super_admin') returned zero rows. Users/tenants tables already had
the bypass — this brings device tables in line.
Revision ID: 022
Revises: 021
Create Date: 2026-03-07
"""
import sqlalchemy as sa
from alembic import op
revision = "022"
down_revision = "021"
branch_labels = None
depends_on = None
# Tables that need super_admin bypass added to their RLS policy
_TABLES = ["devices", "device_groups", "device_tags"]
def upgrade() -> None:
conn = op.get_bind()
for table in _TABLES:
conn.execute(sa.text(f"DROP POLICY IF EXISTS tenant_isolation ON {table}"))
conn.execute(sa.text(f"""
CREATE POLICY tenant_isolation ON {table}
USING (
tenant_id::text = current_setting('app.current_tenant', true)
OR current_setting('app.current_tenant', true) = 'super_admin'
)
WITH CHECK (
tenant_id::text = current_setting('app.current_tenant', true)
OR current_setting('app.current_tenant', true) = 'super_admin'
)
"""))
def downgrade() -> None:
conn = op.get_bind()
for table in _TABLES:
conn.execute(sa.text(f"DROP POLICY IF EXISTS tenant_isolation ON {table}"))
conn.execute(sa.text(f"""
CREATE POLICY tenant_isolation ON {table}
USING (tenant_id::text = current_setting('app.current_tenant', true))
WITH CHECK (tenant_id::text = current_setting('app.current_tenant', true))
"""))

View File

@@ -0,0 +1,21 @@
"""Add Slack notification channel support.
Revision ID: 023
Revises: 022
"""
from alembic import op
import sqlalchemy as sa
revision = "023"
down_revision = "022"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.add_column("notification_channels", sa.Column("slack_webhook_url", sa.Text(), nullable=True))
def downgrade() -> None:
op.drop_column("notification_channels", "slack_webhook_url")

View File

@@ -0,0 +1,41 @@
"""Add contact_email to tenants and seed device_offline default alert rule.
Revision ID: 024
Revises: 023
"""
from alembic import op
import sqlalchemy as sa
revision = "024"
down_revision = "023"
def upgrade() -> None:
conn = op.get_bind()
# 1. Add contact_email column to tenants
op.add_column("tenants", sa.Column("contact_email", sa.String(255), nullable=True))
# 2. Seed device_offline default alert rule for all existing tenants
conn.execute(sa.text("""
INSERT INTO alert_rules (id, tenant_id, name, metric, operator, threshold, duration_polls, severity, enabled, is_default)
SELECT gen_random_uuid(), t.id, 'Device Offline', 'device_offline', 'eq', 1, 1, 'critical', TRUE, TRUE
FROM tenants t
WHERE t.id != '00000000-0000-0000-0000-000000000000'
AND NOT EXISTS (
SELECT 1 FROM alert_rules ar
WHERE ar.tenant_id = t.id AND ar.metric = 'device_offline' AND ar.is_default = TRUE
)
"""))
def downgrade() -> None:
conn = op.get_bind()
conn.execute(sa.text("""
DELETE FROM alert_rules WHERE metric = 'device_offline' AND is_default = TRUE
"""))
op.drop_column("tenants", "contact_email")

View File

@@ -0,0 +1,37 @@
"""Fix key_access_log device_id FK to SET NULL on delete.
Revision ID: 025
Revises: 024
"""
from alembic import op
revision = "025"
down_revision = "024"
def upgrade() -> None:
op.drop_constraint(
"fk_key_access_log_device_id", "key_access_log", type_="foreignkey"
)
op.create_foreign_key(
"fk_key_access_log_device_id",
"key_access_log",
"devices",
["device_id"],
["id"],
ondelete="SET NULL",
)
def downgrade() -> None:
op.drop_constraint(
"fk_key_access_log_device_id", "key_access_log", type_="foreignkey"
)
op.create_foreign_key(
"fk_key_access_log_device_id",
"key_access_log",
"devices",
["device_id"],
["id"],
)

View File

@@ -0,0 +1,41 @@
"""Add system_settings table for instance-wide configuration.
Revision ID: 026
Revises: 025
Create Date: 2026-03-08
"""
revision = "026"
down_revision = "025"
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import UUID
def upgrade() -> None:
op.create_table(
"system_settings",
sa.Column("key", sa.String(255), primary_key=True),
sa.Column("value", sa.Text, nullable=True),
sa.Column("encrypted_value", sa.LargeBinary, nullable=True),
sa.Column("encrypted_value_transit", sa.Text, nullable=True),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.func.now(),
nullable=False,
),
sa.Column(
"updated_by",
UUID(as_uuid=True),
sa.ForeignKey("users.id", ondelete="SET NULL"),
nullable=True,
),
)
def downgrade() -> None:
op.drop_table("system_settings")