Initial commit

This commit is contained in:
monoadmin
2026-04-10 15:36:35 -07:00
commit c284a9f7cf
8 changed files with 2567 additions and 0 deletions

8
.env.example Normal file
View File

@@ -0,0 +1,8 @@
# Docker backup configuration
# Copy to .env and fill in your values
# Backup destination (local path or remote)
BACKUP_DEST=/backups
# Optional: notification webhook (e.g. Slack, Discord)
NOTIFY_WEBHOOK=

23
.gitignore vendored Normal file
View File

@@ -0,0 +1,23 @@
# Environment variables
.env
.env.*
!.env.example
# Dependencies
node_modules/
vendor/
# Build output
.next/
dist/
build/
*.pyc
__pycache__/
# OS
.DS_Store
Thumbs.db
# Logs
*.log

463
docker_backup.py Executable file
View File

@@ -0,0 +1,463 @@
#!/usr/bin/env python3
"""
Docker Backup/Restore Tool
Backup and restore containers (config + volumes) across multiple Docker hosts.
"""
import argparse
import json
import os
import sys
import tarfile
import tempfile
import time
from datetime import datetime
from io import BytesIO
from pathlib import Path
import docker
from docker.errors import DockerException, NotFound, APIError
from rich.console import Console
from rich.table import Table
from rich import print as rprint
console = Console()
# ── Host connection helpers ──────────────────────────────────────────────────
def connect(host: str) -> docker.DockerClient:
"""Connect to a Docker host. 'local' uses the default socket."""
try:
if host in ("local", "unix:///var/run/docker.sock", ""):
client = docker.from_env()
elif host.startswith("ssh://"):
client = docker.DockerClient(base_url=host, use_ssh_client=True)
else:
# tcp:// or host:port shorthand
if not host.startswith("tcp://") and "://" not in host:
host = f"tcp://{host}"
client = docker.DockerClient(base_url=host)
client.ping()
return client
except DockerException as e:
console.print(f"[red]Cannot connect to {host}: {e}[/red]")
sys.exit(1)
# ── Backup ───────────────────────────────────────────────────────────────────
def backup_container(client: docker.DockerClient, container_name: str,
output_dir: Path, save_image: bool = False):
"""
Backup a single container to output_dir/<container_name>_<timestamp>.tar
Contents:
config.json — full docker inspect output
volumes/ — one .tar per named/anonymous volume
image.tar — saved image (optional, can be large)
"""
try:
container = client.containers.get(container_name)
except NotFound:
console.print(f"[red]Container not found: {container_name}[/red]")
sys.exit(1)
ts = datetime.now().strftime("%Y%m%d_%H%M%S")
safe_name = container.name.lstrip("/").replace("/", "_")
out_file = output_dir / f"{safe_name}_{ts}.tar"
console.print(f"Backing up [cyan]{container.name}[/cyan] → [green]{out_file}[/green]")
inspect = client.api.inspect_container(container.id)
with tarfile.open(out_file, "w") as tar:
# 1. config.json
config_bytes = json.dumps(inspect, indent=2).encode()
info = tarfile.TarInfo(name="config.json")
info.size = len(config_bytes)
info.mtime = int(time.time())
tar.addfile(info, BytesIO(config_bytes))
console.print(" [dim]✓ config.json[/dim]")
# 2. Volumes
mounts = inspect.get("Mounts", [])
for mount in mounts:
mtype = mount.get("Type", "")
if mtype not in ("volume", "bind"):
continue
if mtype == "volume":
vol_name = mount.get("Name", "")
dest = mount.get("Destination", "")
label = vol_name or dest.replace("/", "_")
else:
src = mount.get("Source", "")
label = src.replace("/", "_").lstrip("_")
dest = src
archive_name = f"volumes/{label}.tar"
console.print(f" [dim]Archiving volume: {label} ({dest})[/dim]")
try:
# Use docker cp stream to get volume data
stream, _ = client.api.get_archive(container.id, dest)
vol_data = b"".join(stream)
vol_info = tarfile.TarInfo(name=archive_name)
vol_info.size = len(vol_data)
vol_info.mtime = int(time.time())
tar.addfile(vol_info, BytesIO(vol_data))
console.print(f" [dim]✓ {archive_name} ({len(vol_data)//1024} KB)[/dim]")
except (APIError, Exception) as e:
console.print(f" [yellow]⚠ Could not archive {label}: {e}[/yellow]")
# 3. Image (optional)
if save_image:
image_tag = inspect["Config"].get("Image", "")
console.print(f" [dim]Saving image: {image_tag} (may take a while)…[/dim]")
try:
image = client.images.get(image_tag)
img_data = b"".join(image.save())
img_info = tarfile.TarInfo(name="image.tar")
img_info.size = len(img_data)
img_info.mtime = int(time.time())
tar.addfile(img_info, BytesIO(img_data))
console.print(f" [dim]✓ image.tar ({len(img_data)//1024//1024} MB)[/dim]")
except Exception as e:
console.print(f" [yellow]⚠ Could not save image: {e}[/yellow]")
console.print(f"[green]✓ Backup complete:[/green] {out_file}")
return out_file
# ── Restore ──────────────────────────────────────────────────────────────────
def restore_container(client: docker.DockerClient, backup_file: Path,
new_name: str | None = None, start: bool = False,
load_image: bool = False):
"""
Restore a container from a backup .tar produced by backup_container().
"""
console.print(f"Restoring from [cyan]{backup_file}[/cyan]")
with tarfile.open(backup_file, "r") as tar:
# 1. Read config
config_member = tar.getmember("config.json")
config = json.loads(tar.extractfile(config_member).read())
orig_name = config["Name"].lstrip("/")
container_name = new_name or orig_name
image_name = config["Config"]["Image"]
console.print(f" Original name : [cyan]{orig_name}[/cyan]")
console.print(f" Restore as : [cyan]{container_name}[/cyan]")
console.print(f" Image : [cyan]{image_name}[/cyan]")
# 2. Optionally load image from backup
if load_image:
with tarfile.open(backup_file, "r") as tar:
try:
img_member = tar.getmember("image.tar")
console.print(" [dim]Loading image from backup…[/dim]")
img_data = tar.extractfile(img_member).read()
client.images.load(img_data)
console.print(" [dim]✓ Image loaded[/dim]")
except KeyError:
console.print(" [yellow]⚠ No image.tar in backup, skipping load[/yellow]")
# 3. Pull image if not present
try:
client.images.get(image_name)
console.print(f" [dim]✓ Image {image_name} already present[/dim]")
except NotFound:
console.print(f" [dim]Pulling {image_name}…[/dim]")
try:
client.images.pull(image_name)
except Exception as e:
console.print(f" [red]Cannot pull image: {e}[/red]")
sys.exit(1)
# 4. Check for name collision
try:
existing = client.containers.get(container_name)
console.print(f" [yellow]Container {container_name} already exists (id={existing.short_id})[/yellow]")
answer = input(" Remove existing container? [y/N] ").strip().lower()
if answer == "y":
existing.remove(force=True)
else:
console.print("[red]Restore aborted.[/red]")
sys.exit(1)
except NotFound:
pass
# 5. Recreate volumes and restore data
with tarfile.open(backup_file, "r") as tar:
volume_members = [m for m in tar.getmembers()
if m.name.startswith("volumes/") and m.name.endswith(".tar")]
mounts = config.get("Mounts", [])
vol_map: dict[str, str] = {} # label -> actual volume name or bind path
for mount in mounts:
mtype = mount.get("Type", "")
dest = mount.get("Destination", "")
if mtype == "volume":
vol_name = mount.get("Name", "")
label = vol_name or dest.replace("/", "_")
else:
src = mount.get("Source", "")
label = src.replace("/", "_").lstrip("_")
vol_map[label] = src
continue # bind mounts: path must exist on target host
# Create volume if missing
try:
client.volumes.get(vol_name)
console.print(f" [dim]Volume {vol_name} already exists[/dim]")
except NotFound:
client.volumes.create(name=vol_name)
console.print(f" [dim]✓ Created volume {vol_name}[/dim]")
vol_map[label] = vol_name
# Restore volume data via a temporary Alpine container
with tarfile.open(backup_file, "r") as tar:
for vmember in volume_members:
label = vmember.name[len("volumes/"):-len(".tar")]
if label not in vol_map:
console.print(f" [yellow]⚠ No matching mount for volume backup: {label}[/yellow]")
continue
vol_name = vol_map[label]
dest_path = next(
(m["Destination"] for m in mounts
if (m.get("Name") == vol_name or
m.get("Source", "").replace("/", "_").lstrip("_") == label)),
None
)
if not dest_path:
continue
console.print(f" [dim]Restoring volume data: {vol_name}{dest_path}[/dim]")
vol_data = tar.extractfile(vmember).read()
# Spin up temp container with the volume mounted, restore via tar stream
if vol_map[label] == vol_name:
volumes_arg = {vol_name: {"bind": "/restore_target", "mode": "rw"}}
else:
volumes_arg = {vol_name: {"bind": "/restore_target", "mode": "rw"}}
try:
restore_ctr = client.containers.run(
"alpine",
command="sh -c 'cd /restore_target && tar xf /tmp/vol.tar --strip-components=1'",
volumes=volumes_arg,
detach=True,
remove=False,
)
# Upload the tar to the temp container
client.api.put_archive(restore_ctr.id, "/tmp", vol_data)
restore_ctr.wait()
restore_ctr.remove()
console.print(f" [dim]✓ Volume data restored: {vol_name}[/dim]")
except Exception as e:
console.print(f" [yellow]⚠ Could not restore volume data for {vol_name}: {e}[/yellow]")
# 6. Rebuild container create kwargs from inspect
cfg = config["Config"]
host_cfg = config["HostConfig"]
create_kwargs: dict = {
"image": image_name,
"name": container_name,
"command": cfg.get("Cmd"),
"entrypoint": cfg.get("Entrypoint"),
"environment": cfg.get("Env") or [],
"working_dir": cfg.get("WorkingDir") or None,
"user": cfg.get("User") or None,
"hostname": cfg.get("Hostname") or None,
"domainname": cfg.get("Domainname") or None,
"labels": cfg.get("Labels") or {},
"tty": cfg.get("Tty", False),
"stdin_open": cfg.get("OpenStdin", False),
}
# Ports
exposed = cfg.get("ExposedPorts") or {}
port_bindings = host_cfg.get("PortBindings") or {}
if exposed or port_bindings:
ports = {}
for port_proto in exposed:
bindings = port_bindings.get(port_proto)
if bindings:
host_ports = [b.get("HostPort") for b in bindings if b.get("HostPort")]
ports[port_proto] = host_ports or None
else:
ports[port_proto] = None
create_kwargs["ports"] = ports
# Restart policy
restart = host_cfg.get("RestartPolicy", {})
if restart.get("Name"):
create_kwargs["restart_policy"] = {
"Name": restart["Name"],
"MaximumRetryCount": restart.get("MaximumRetryCount", 0),
}
# Network mode
net_mode = host_cfg.get("NetworkMode", "default")
if net_mode not in ("default", "bridge"):
create_kwargs["network_mode"] = net_mode
# Volumes mounts
volume_binds = []
for mount in mounts:
mtype = mount.get("Type", "")
dest = mount.get("Destination", "")
mode = "rw" if not mount.get("RW") is False else "ro"
if mtype == "volume":
volume_binds.append(f"{mount['Name']}:{dest}:{mode}")
elif mtype == "bind":
volume_binds.append(f"{mount['Source']}:{dest}:{mode}")
if volume_binds:
create_kwargs["volumes"] = volume_binds
# Extra host config fields
if host_cfg.get("Privileged"):
create_kwargs["privileged"] = True
if host_cfg.get("NetworkMode") == "host":
create_kwargs["network_mode"] = "host"
cap_add = host_cfg.get("CapAdd") or []
if cap_add:
create_kwargs["cap_add"] = cap_add
devices = host_cfg.get("Devices") or []
if devices:
create_kwargs["devices"] = [
f"{d['PathOnHost']}:{d['PathInContainer']}:{d['CgroupPermissions']}"
for d in devices
]
# 7. Create container
try:
ctr = client.containers.create(**{k: v for k, v in create_kwargs.items() if v is not None})
console.print(f"[green]✓ Container created:[/green] {ctr.name} ({ctr.short_id})")
except Exception as e:
console.print(f"[red]Failed to create container: {e}[/red]")
sys.exit(1)
if start:
ctr.start()
console.print(f"[green]✓ Container started[/green]")
return ctr
# ── List ─────────────────────────────────────────────────────────────────────
def list_containers(hosts: list[str], all_containers: bool = False):
table = Table(title="Docker Containers")
table.add_column("Host", style="cyan")
table.add_column("Name", style="white")
table.add_column("Image", style="dim")
table.add_column("Status", style="green")
table.add_column("Ports", style="dim")
for host in hosts:
client = connect(host)
label = host
try:
containers = client.containers.list(all=all_containers)
for c in containers:
ports = ", ".join(
f"{v[0]['HostPort']}{k}" if v else k
for k, v in (c.ports or {}).items()
) or "-"
status = c.status
color = "green" if status == "running" else "yellow"
table.add_row(label, c.name, c.image.tags[0] if c.image.tags else c.image.short_id,
f"[{color}]{status}[/{color}]", ports)
label = "" # only show host label on first row
except Exception as e:
table.add_row(host, f"[red]ERROR: {e}[/red]", "", "", "")
console.print(table)
# ── CLI ──────────────────────────────────────────────────────────────────────
def main():
parser = argparse.ArgumentParser(
description="Backup and restore Docker containers across multiple hosts.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# List containers on multiple hosts
docker_backup.py list --host local --host tcp://192.168.1.10:2375
# Backup a container on a remote host
docker_backup.py backup myapp --host tcp://192.168.1.10:2375 --output /backups
# Backup with image saved
docker_backup.py backup myapp --host local --save-image
# Restore to a different host (pull image from registry)
docker_backup.py restore /backups/myapp_20240101_120000.tar --host tcp://192.168.1.20:2375
# Restore with a new name and auto-start
docker_backup.py restore /backups/myapp_20240101_120000.tar --name myapp2 --start
# Restore using image from backup file (no registry needed)
docker_backup.py restore /backups/myapp_20240101_120000.tar --load-image --start
"""
)
sub = parser.add_subparsers(dest="command", required=True)
# list
p_list = sub.add_parser("list", help="List containers across hosts")
p_list.add_argument("--host", action="append", default=[], dest="hosts",
metavar="HOST", help="Docker host (repeatable). Use 'local' for local socket.")
p_list.add_argument("-a", "--all", action="store_true", help="Show stopped containers too")
# backup
p_back = sub.add_parser("backup", help="Backup a container")
p_back.add_argument("container", help="Container name or ID")
p_back.add_argument("--host", default="local", metavar="HOST",
help="Docker host (default: local)")
p_back.add_argument("--output", "-o", default=".", metavar="DIR",
help="Output directory (default: current dir)")
p_back.add_argument("--save-image", action="store_true",
help="Also save the container image into the backup (large!)")
# restore
p_rest = sub.add_parser("restore", help="Restore a container from backup")
p_rest.add_argument("backup", help="Path to the backup .tar file")
p_rest.add_argument("--host", default="local", metavar="HOST",
help="Docker host to restore to (default: local)")
p_rest.add_argument("--name", metavar="NAME",
help="Override container name (default: original name)")
p_rest.add_argument("--start", action="store_true",
help="Start the container after restoring")
p_rest.add_argument("--load-image", action="store_true",
help="Load image from backup instead of pulling from registry")
args = parser.parse_args()
if args.command == "list":
hosts = args.hosts or ["local"]
list_containers(hosts, all_containers=args.all)
elif args.command == "backup":
client = connect(args.host)
output_dir = Path(args.output)
output_dir.mkdir(parents=True, exist_ok=True)
backup_container(client, args.container, output_dir, save_image=args.save_image)
elif args.command == "restore":
client = connect(args.host)
restore_container(client, Path(args.backup),
new_name=args.name,
start=args.start,
load_image=args.load_image)
if __name__ == "__main__":
main()

100
install.sh Executable file
View File

@@ -0,0 +1,100 @@
#!/bin/bash
set -e
APP_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
WEBAPP_DIR="$APP_DIR/webapp"
SERVICE_NAME="docker-backup"
SERVICE_FILE="/etc/systemd/system/${SERVICE_NAME}.service"
PYTHON="$(which python3)"
PORT="${PORT:-5999}"
BIND="${BIND:-0.0.0.0}"
# ── Colours ───────────────────────────────────────────────────────────────────
GREEN='\033[0;32m'; YELLOW='\033[1;33m'; RED='\033[0;31m'; NC='\033[0m'
ok() { echo -e "${GREEN}${NC} $*"; }
warn() { echo -e "${YELLOW}${NC} $*"; }
die() { echo -e "${RED}${NC} $*"; exit 1; }
step() { echo -e "\n${YELLOW}${NC} $*"; }
echo ""
echo " Docker Backup — Install"
echo " ───────────────────────"
# ── Root check ────────────────────────────────────────────────────────────────
[[ $EUID -ne 0 ]] && die "Run as root (sudo $0)"
# ── Python deps ───────────────────────────────────────────────────────────────
step "Checking Python dependencies…"
PIP="$(which pip3 2>/dev/null || which pip 2>/dev/null || echo 'python3 -m pip')"
MISSING=()
for pkg in docker flask rich apscheduler paramiko; do
python3 -c "import $pkg" 2>/dev/null && ok "$pkg" || MISSING+=("$pkg")
done
if [[ ${#MISSING[@]} -gt 0 ]]; then
echo " Installing: ${MISSING[*]}"
$PIP install --quiet --break-system-packages "${MISSING[@]}" 2>/dev/null || \
$PIP install --quiet "${MISSING[@]}" && ok "Packages installed"
fi
# ── Backup dir ────────────────────────────────────────────────────────────────
step "Creating backup directory…"
BACKUP_DIR="$WEBAPP_DIR/backups"
mkdir -p "$BACKUP_DIR"
ok "$BACKUP_DIR"
# ── Systemd service ───────────────────────────────────────────────────────────
step "Writing systemd service: $SERVICE_FILE"
cat > "$SERVICE_FILE" <<EOF
[Unit]
Description=Docker Backup Web UI
After=network.target docker.service
Wants=docker.service
[Service]
Type=simple
ExecStart=${PYTHON} ${WEBAPP_DIR}/app.py --host ${BIND} --port ${PORT}
WorkingDirectory=${WEBAPP_DIR}
Restart=always
RestartSec=5
StandardOutput=journal
StandardError=journal
SyslogIdentifier=${SERVICE_NAME}
[Install]
WantedBy=multi-user.target
EOF
ok "Service file written"
# ── Enable & start ────────────────────────────────────────────────────────────
step "Enabling and starting service…"
systemctl daemon-reload
systemctl enable "$SERVICE_NAME"
systemctl restart "$SERVICE_NAME"
# Wait a moment then check status
sleep 2
if systemctl is-active --quiet "$SERVICE_NAME"; then
ok "Service is running"
else
warn "Service may have failed — check: journalctl -u $SERVICE_NAME -n 30"
fi
# ── Summary ───────────────────────────────────────────────────────────────────
HOST_IP=$(hostname -I | awk '{print $1}')
echo ""
echo " ┌──────────────────────────────────────────────┐"
echo " │ Docker Backup UI is running │"
echo " │ │"
printf " │ Local: http://localhost:%-19s│\n" "${PORT}"
printf " │ Network: http://%-27s│\n" "${HOST_IP}:${PORT}"
echo " │ │"
echo " │ Manage service: │"
echo " │ systemctl status $SERVICE_NAME"
echo " │ systemctl stop $SERVICE_NAME"
echo " │ systemctl restart $SERVICE_NAME"
echo " │ journalctl -u $SERVICE_NAME -f │"
echo " └──────────────────────────────────────────────┘"
echo ""

1
webapp/.secret_key Normal file
View File

@@ -0,0 +1 @@
ba0e6899ed5beaa803976a8e1068df03270fc67aae10560be81f937e0adc0664

859
webapp/app.py Normal file
View File

@@ -0,0 +1,859 @@
#!/usr/bin/env python3
"""Docker Backup Web UI"""
import json
import os
import secrets
import sys
import tarfile
import threading
import time
import uuid
from datetime import datetime
from functools import wraps
from pathlib import Path
import docker
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from docker.errors import NotFound, APIError
from flask import Flask, Response, jsonify, render_template, request, session, stream_with_context
from werkzeug.security import check_password_hash, generate_password_hash
sys.path.insert(0, str(Path(__file__).parent.parent))
app = Flask(__name__)
# ── Secret key (stable across restarts) ──────────────────────────────────────
_KEY_FILE = Path(__file__).parent / ".secret_key"
if _KEY_FILE.exists():
app.secret_key = _KEY_FILE.read_text().strip()
else:
_key = secrets.token_hex(32)
_KEY_FILE.write_text(_key)
app.secret_key = _key
CONFIG_FILE = Path(__file__).parent / "config.json"
DEFAULT_CONFIG = {
"hosts": ["local"],
"backup_dir": str(Path(__file__).parent / "backups"),
"auth": {},
"schedules": [],
}
jobs: dict[str, dict] = {}
jobs_lock = threading.Lock()
CRON_ALIASES = {
"hourly": "0 * * * *",
"daily": "0 2 * * *",
"weekly": "0 2 * * 0",
"monthly": "0 2 1 * *",
}
# ── Config ────────────────────────────────────────────────────────────────────
def load_config() -> dict:
if CONFIG_FILE.exists():
data = json.loads(CONFIG_FILE.read_text())
# Back-fill missing keys
for k, v in DEFAULT_CONFIG.items():
data.setdefault(k, v)
return data
return DEFAULT_CONFIG.copy()
def save_config(cfg: dict):
CONFIG_FILE.write_text(json.dumps(cfg, indent=2))
# ── Auth ──────────────────────────────────────────────────────────────────────
def auth_enabled() -> bool:
return bool(load_config().get("auth", {}).get("password_hash"))
def login_required(f):
@wraps(f)
def decorated(*args, **kwargs):
if auth_enabled() and not session.get("authenticated"):
if request.is_json or request.path.startswith("/api/"):
return jsonify({"error": "unauthorized"}), 401
return jsonify({"error": "unauthorized"}), 401
return f(*args, **kwargs)
return decorated
@app.route("/api/auth/status")
def api_auth_status():
return jsonify({
"enabled": auth_enabled(),
"authenticated": session.get("authenticated", False),
"username": load_config().get("auth", {}).get("username", ""),
})
@app.route("/api/auth/login", methods=["POST"])
def api_auth_login():
data = request.json or {}
cfg = load_config()
auth = cfg.get("auth", {})
if not auth.get("password_hash"):
return jsonify({"error": "Auth not configured"}), 400
if data.get("username") != auth.get("username"):
return jsonify({"error": "Invalid credentials"}), 401
if not check_password_hash(auth["password_hash"], data.get("password", "")):
return jsonify({"error": "Invalid credentials"}), 401
session["authenticated"] = True
session.permanent = True
return jsonify({"ok": True})
@app.route("/api/auth/logout", methods=["POST"])
def api_auth_logout():
session.clear()
return jsonify({"ok": True})
@app.route("/api/auth/setup", methods=["POST"])
def api_auth_setup():
cfg = load_config()
auth = cfg.get("auth", {})
data = request.json or {}
# If auth already configured, require current password
if auth.get("password_hash"):
if not session.get("authenticated"):
return jsonify({"error": "unauthorized"}), 401
username = data.get("username", "").strip()
password = data.get("password", "")
if not username or not password:
return jsonify({"error": "username and password required"}), 400
if len(password) < 6:
return jsonify({"error": "password must be at least 6 characters"}), 400
cfg["auth"] = {
"username": username,
"password_hash": generate_password_hash(password),
}
save_config(cfg)
session["authenticated"] = True
return jsonify({"ok": True})
@app.route("/api/auth/disable", methods=["POST"])
@login_required
def api_auth_disable():
cfg = load_config()
cfg["auth"] = {}
save_config(cfg)
session.clear()
return jsonify({"ok": True})
# ── Job helpers ───────────────────────────────────────────────────────────────
def new_job(label: str) -> str:
jid = str(uuid.uuid4())[:8]
with jobs_lock:
jobs[jid] = {"id": jid, "label": label, "status": "running",
"logs": [], "created": datetime.now().isoformat()}
return jid
def job_log(jid: str, msg: str):
with jobs_lock:
if jid in jobs:
jobs[jid]["logs"].append(msg)
def job_done(jid: str, error: str | None = None):
with jobs_lock:
if jid in jobs:
jobs[jid]["status"] = "error" if error else "done"
if error:
jobs[jid]["logs"].append(f"ERROR: {error}")
jobs[jid]["logs"].append("[DONE]")
# ── Docker client helper ──────────────────────────────────────────────────────
def make_client(host: str) -> docker.DockerClient:
if host in ("local", ""):
return docker.from_env()
url = host if "://" in host else f"tcp://{host}"
return docker.DockerClient(base_url=url, use_ssh_client=host.startswith("ssh://"))
# ── Retention ─────────────────────────────────────────────────────────────────
def apply_retention(backup_dir: Path, container_name: str,
keep_count: int | None, keep_days: int | None) -> int:
safe = container_name.lstrip("/").replace("/", "_")
files = sorted(backup_dir.glob(f"{safe}_*.tar"),
key=lambda x: x.stat().st_mtime, reverse=True)
to_delete: set[Path] = set()
if keep_days and keep_days > 0:
cutoff = time.time() - keep_days * 86400
to_delete.update(f for f in files if f.stat().st_mtime < cutoff)
if keep_count and keep_count > 0:
to_delete.update(files[keep_count:])
for f in to_delete:
try:
f.unlink()
except Exception:
pass
return len(to_delete)
# ── Backup worker ─────────────────────────────────────────────────────────────
def _run_backup(jid: str, host: str, container_name: str,
backup_dir: str, save_image: bool, pre_hook: str = "",
retention_count: int | None = None, retention_days: int | None = None):
from io import BytesIO
try:
client = make_client(host)
client.ping()
job_log(jid, f"Connected to {host}")
try:
container = client.containers.get(container_name)
except NotFound:
job_done(jid, f"Container not found: {container_name}")
return
# Pre-hook
if pre_hook and pre_hook.strip():
job_log(jid, f"Running pre-hook: {pre_hook}")
try:
result = container.exec_run(["sh", "-c", pre_hook],
stdout=True, stderr=True)
output = result.output.decode(errors="replace").strip()
if result.exit_code != 0:
job_log(jid, f"⚠ Pre-hook exited {result.exit_code}: {output}")
else:
job_log(jid, f"✓ Pre-hook OK{': ' + output[:200] if output else ''}")
except Exception as e:
job_log(jid, f"⚠ Pre-hook failed: {e}")
ts = datetime.now().strftime("%Y%m%d_%H%M%S")
safe_name = container.name.lstrip("/").replace("/", "_")
out_file = Path(backup_dir) / f"{safe_name}_{ts}.tar"
Path(backup_dir).mkdir(parents=True, exist_ok=True)
inspect = client.api.inspect_container(container.id)
job_log(jid, f"Backing up: {container.name}")
with tarfile.open(out_file, "w") as tar:
config_bytes = json.dumps(inspect, indent=2).encode()
info = tarfile.TarInfo(name="config.json")
info.size = len(config_bytes)
info.mtime = int(time.time())
tar.addfile(info, BytesIO(config_bytes))
job_log(jid, "✓ config.json")
for mount in inspect.get("Mounts", []):
mtype = mount.get("Type", "")
if mtype not in ("volume", "bind"):
continue
dest = mount.get("Destination", "")
if mtype == "volume":
label = mount.get("Name") or dest.replace("/", "_")
else:
label = mount.get("Source", "").replace("/", "_").lstrip("_")
archive_name = f"volumes/{label}.tar"
job_log(jid, f"Archiving volume: {label} ({dest})")
try:
stream, _ = client.api.get_archive(container.id, dest)
vol_data = b"".join(stream)
vi = tarfile.TarInfo(name=archive_name)
vi.size = len(vol_data)
vi.mtime = int(time.time())
tar.addfile(vi, BytesIO(vol_data))
job_log(jid, f"{archive_name} ({len(vol_data)//1024} KB)")
except Exception as e:
job_log(jid, f"⚠ Could not archive {label}: {e}")
if save_image:
image_tag = inspect["Config"].get("Image", "")
job_log(jid, f"Saving image: {image_tag}")
try:
img_data = b"".join(client.images.get(image_tag).save())
ii = tarfile.TarInfo(name="image.tar")
ii.size = len(img_data)
ii.mtime = int(time.time())
tar.addfile(ii, BytesIO(img_data))
job_log(jid, f"✓ image.tar ({len(img_data)//1024//1024} MB)")
except Exception as e:
job_log(jid, f"⚠ Image save failed: {e}")
job_log(jid, f"✓ Saved: {out_file.name}")
# Retention
if retention_count or retention_days:
deleted = apply_retention(Path(backup_dir), container.name,
retention_count, retention_days)
if deleted:
job_log(jid, f"Retention: removed {deleted} old backup(s)")
job_done(jid)
except Exception as e:
job_done(jid, str(e))
# ── Restore worker ────────────────────────────────────────────────────────────
def _run_restore(jid: str, host: str, backup_path: str,
new_name: str | None, start: bool, load_image: bool):
from io import BytesIO
try:
client = make_client(host)
client.ping()
job_log(jid, f"Connected to {host}")
bp = Path(backup_path)
if not bp.exists():
job_done(jid, f"File not found: {backup_path}")
return
with tarfile.open(bp, "r") as tar:
config = json.loads(tar.extractfile("config.json").read())
orig_name = config["Name"].lstrip("/")
container_name = new_name or orig_name
image_name = config["Config"]["Image"]
job_log(jid, f"Restoring '{orig_name}''{container_name}' (image: {image_name})")
if load_image:
with tarfile.open(bp, "r") as tar:
try:
img_data = tar.extractfile("image.tar").read()
job_log(jid, "Loading image from backup…")
client.images.load(img_data)
job_log(jid, "✓ Image loaded")
except KeyError:
job_log(jid, "⚠ No image.tar in backup — will pull")
try:
client.images.get(image_name)
job_log(jid, f"✓ Image {image_name} present")
except NotFound:
job_log(jid, f"Pulling {image_name}")
client.images.pull(image_name)
job_log(jid, f"✓ Pulled {image_name}")
try:
existing = client.containers.get(container_name)
job_log(jid, f"Removing existing container {container_name}")
existing.remove(force=True)
except NotFound:
pass
mounts = config.get("Mounts", [])
vol_data_map: dict[str, bytes] = {}
with tarfile.open(bp, "r") as tar:
for m in tar.getmembers():
if m.name.startswith("volumes/") and m.name.endswith(".tar"):
label = m.name[len("volumes/"):-len(".tar")]
vol_data_map[label] = tar.extractfile(m).read()
for mount in mounts:
if mount.get("Type") != "volume":
continue
vol_name = mount.get("Name", "")
dest = mount.get("Destination", "")
label = vol_name or dest.replace("/", "_")
try:
client.volumes.get(vol_name)
job_log(jid, f"Volume {vol_name} exists")
except NotFound:
client.volumes.create(name=vol_name)
job_log(jid, f"✓ Created volume {vol_name}")
if label in vol_data_map:
job_log(jid, f"Restoring volume data: {vol_name}")
try:
rc = client.containers.run(
"alpine",
command="sh -c 'cd /t && tar xf /tmp/vol.tar --strip-components=1'",
volumes={vol_name: {"bind": "/t", "mode": "rw"}},
detach=True, remove=False,
)
client.api.put_archive(rc.id, "/tmp", vol_data_map[label])
rc.wait()
rc.remove()
job_log(jid, f"✓ Volume restored: {vol_name}")
except Exception as e:
job_log(jid, f"⚠ Volume restore failed {vol_name}: {e}")
cfg_c = config["Config"]
hcfg = config["HostConfig"]
kwargs: dict = {
"image": image_name, "name": container_name,
"command": cfg_c.get("Cmd"), "entrypoint": cfg_c.get("Entrypoint"),
"environment": cfg_c.get("Env") or [],
"working_dir": cfg_c.get("WorkingDir") or None,
"user": cfg_c.get("User") or None,
"hostname": cfg_c.get("Hostname") or None,
"labels": cfg_c.get("Labels") or {},
"tty": cfg_c.get("Tty", False),
"stdin_open": cfg_c.get("OpenStdin", False),
}
exposed = cfg_c.get("ExposedPorts") or {}
port_bindings = hcfg.get("PortBindings") or {}
if exposed or port_bindings:
ports = {}
for pp in exposed:
binds = port_bindings.get(pp)
ports[pp] = [b["HostPort"] for b in binds if b.get("HostPort")] if binds else None
kwargs["ports"] = ports
restart = hcfg.get("RestartPolicy", {})
if restart.get("Name"):
kwargs["restart_policy"] = {"Name": restart["Name"],
"MaximumRetryCount": restart.get("MaximumRetryCount", 0)}
net_mode = hcfg.get("NetworkMode", "default")
if net_mode not in ("default", "bridge"):
kwargs["network_mode"] = net_mode
vol_binds = []
for m in mounts:
dest = m.get("Destination", "")
mode = "rw" if m.get("RW") is not False else "ro"
if m.get("Type") == "volume":
vol_binds.append(f"{m['Name']}:{dest}:{mode}")
elif m.get("Type") == "bind":
vol_binds.append(f"{m['Source']}:{dest}:{mode}")
if vol_binds:
kwargs["volumes"] = vol_binds
if hcfg.get("Privileged"):
kwargs["privileged"] = True
if hcfg.get("CapAdd"):
kwargs["cap_add"] = hcfg["CapAdd"]
ctr = client.containers.create(**{k: v for k, v in kwargs.items() if v is not None})
job_log(jid, f"✓ Container created: {ctr.name} ({ctr.short_id})")
if start:
ctr.start()
job_log(jid, "✓ Container started")
job_done(jid)
except Exception as e:
job_done(jid, str(e))
# ── Scheduler ─────────────────────────────────────────────────────────────────
scheduler = BackgroundScheduler(timezone="UTC")
scheduler.start()
def run_scheduled_backup(schedule_id: str):
cfg = load_config()
sched = next((s for s in cfg.get("schedules", []) if s["id"] == schedule_id), None)
if not sched or not sched.get("enabled", True):
return
backup_dir = cfg["backup_dir"]
host = sched["host"]
container = sched["container"]
save_image = sched.get("save_image", False)
pre_hook = sched.get("pre_hook", "")
r_count = sched.get("retention_count") or None
r_days = sched.get("retention_days") or None
jid = new_job(f"[Scheduled] {container} on {host}")
def worker():
_run_backup(jid, host, container, backup_dir, save_image,
pre_hook=pre_hook, retention_count=r_count, retention_days=r_days)
status = jobs.get(jid, {}).get("status", "unknown")
cfg2 = load_config()
for s in cfg2.get("schedules", []):
if s["id"] == schedule_id:
s["last_run"] = datetime.now().isoformat()
s["last_status"] = status
break
save_config(cfg2)
threading.Thread(target=worker, daemon=True).start()
return jid
def _cron_kwargs(expr: str) -> dict:
expr = CRON_ALIASES.get(expr.lower(), expr)
parts = expr.split()
if len(parts) != 5:
raise ValueError(f"Invalid cron: {expr}")
minute, hour, day, month, dow = parts
return dict(minute=minute, hour=hour, day=day, month=month, day_of_week=dow)
def _register_schedule(sched: dict):
try:
kwargs = _cron_kwargs(sched["cron"])
scheduler.add_job(run_scheduled_backup, CronTrigger(**kwargs),
id=sched["id"], args=[sched["id"]], replace_existing=True)
except Exception as e:
print(f"Warning: schedule {sched['id']} not registered: {e}")
def _unregister_schedule(schedule_id: str):
try:
scheduler.remove_job(schedule_id)
except Exception:
pass
# Load existing schedules on startup
for _s in load_config().get("schedules", []):
if _s.get("enabled", True):
_register_schedule(_s)
# ── Routes: main page ─────────────────────────────────────────────────────────
@app.route("/")
def index():
return render_template("index.html")
# ── Routes: config ────────────────────────────────────────────────────────────
@app.route("/api/config", methods=["GET", "POST"])
@login_required
def api_config():
if request.method == "POST":
cfg = load_config()
data = request.json or {}
if "backup_dir" in data:
cfg["backup_dir"] = data["backup_dir"]
save_config(cfg)
return jsonify(cfg)
cfg = load_config()
cfg.pop("auth", None) # never send password hash to browser
return jsonify(cfg)
# ── Routes: hosts ─────────────────────────────────────────────────────────────
@app.route("/api/hosts")
@login_required
def api_hosts_list():
cfg = load_config()
result = []
for host in cfg["hosts"]:
try:
c = make_client(host)
c.ping()
info = c.info()
result.append({"host": host, "ok": True,
"name": info.get("Name", ""),
"version": info.get("ServerVersion", ""),
"containers": info.get("Containers", 0)})
except Exception as e:
result.append({"host": host, "ok": False, "error": str(e)})
return jsonify(result)
@app.route("/api/hosts", methods=["POST"])
@login_required
def api_hosts_add():
host = (request.json or {}).get("host", "").strip()
if not host:
return jsonify({"error": "host required"}), 400
cfg = load_config()
if host not in cfg["hosts"]:
cfg["hosts"].append(host)
save_config(cfg)
return jsonify(cfg)
@app.route("/api/hosts/<path:host>", methods=["DELETE"])
@login_required
def api_hosts_delete(host):
cfg = load_config()
cfg["hosts"] = [h for h in cfg["hosts"] if h != host]
save_config(cfg)
return jsonify(cfg)
# ── Routes: containers ────────────────────────────────────────────────────────
@app.route("/api/containers")
@login_required
def api_containers():
cfg = load_config()
show_all = request.args.get("all", "false").lower() == "true"
host_filter = request.args.get("host")
hosts = [host_filter] if host_filter else cfg["hosts"]
result = []
for host in hosts:
try:
client = make_client(host)
for c in client.containers.list(all=show_all):
ports = {k: v[0]["HostPort"] if v else None for k, v in (c.ports or {}).items()}
result.append({"host": host, "id": c.short_id, "name": c.name,
"image": c.image.tags[0] if c.image.tags else c.image.short_id,
"status": c.status, "ports": ports})
except Exception as e:
result.append({"host": host, "id": None, "name": None,
"image": None, "status": "error", "error": str(e)})
return jsonify(result)
# ── Routes: backups ───────────────────────────────────────────────────────────
@app.route("/api/backups")
@login_required
def api_backups():
cfg = load_config()
backup_dir = Path(cfg["backup_dir"])
if not backup_dir.exists():
return jsonify([])
result = []
for f in sorted(backup_dir.glob("*.tar"), key=lambda x: x.stat().st_mtime, reverse=True):
stat = f.stat()
try:
with tarfile.open(f, "r") as tar:
members = tar.getmembers()
config = json.loads(tar.extractfile("config.json").read())
orig_name = config["Name"].lstrip("/")
image = config["Config"]["Image"]
has_image = any(m.name == "image.tar" for m in members)
volumes = [m.name[len("volumes/"):-len(".tar")]
for m in members
if m.name.startswith("volumes/") and m.name.endswith(".tar")]
except Exception:
orig_name = f.stem
image = "unknown"
has_image = False
volumes = []
result.append({
"file": f.name, "path": str(f),
"size_mb": round(stat.st_size / 1024 / 1024, 1),
"mtime": datetime.fromtimestamp(stat.st_mtime).isoformat(),
"container": orig_name, "image": image,
"has_image": has_image, "volumes": volumes,
})
return jsonify(result)
@app.route("/api/backups/<path:filename>", methods=["DELETE"])
@login_required
def api_backup_delete(filename):
cfg = load_config()
path = Path(cfg["backup_dir"]) / filename
if not path.exists():
return jsonify({"error": "not found"}), 404
path.unlink()
return jsonify({"ok": True})
# ── Routes: backup / restore / bulk ──────────────────────────────────────────
@app.route("/api/backup", methods=["POST"])
@login_required
def api_backup_start():
data = request.json or {}
host = data.get("host", "local")
container = data.get("container")
save_image = data.get("save_image", False)
pre_hook = data.get("pre_hook", "")
r_count = data.get("retention_count") or None
r_days = data.get("retention_days") or None
if not container:
return jsonify({"error": "container required"}), 400
cfg = load_config()
jid = new_job(f"Backup {container} on {host}")
threading.Thread(target=_run_backup,
args=(jid, host, container, cfg["backup_dir"],
save_image, pre_hook, r_count, r_days),
daemon=True).start()
return jsonify({"job_id": jid})
@app.route("/api/bulk-backup", methods=["POST"])
@login_required
def api_bulk_backup():
data = request.json or {}
host = data.get("host", "local")
save_image = data.get("save_image", False)
pre_hook = data.get("pre_hook", "")
try:
client = make_client(host)
client.ping()
containers = client.containers.list(all=False) # running only
except Exception as e:
return jsonify({"error": str(e)}), 500
if not containers:
return jsonify({"error": "No running containers on host"}), 400
cfg = load_config()
job_ids = []
for c in containers:
jid = new_job(f"Bulk: {c.name} on {host}")
threading.Thread(target=_run_backup,
args=(jid, host, c.name, cfg["backup_dir"],
save_image, pre_hook),
daemon=True).start()
job_ids.append({"job_id": jid, "container": c.name})
return jsonify({"jobs": job_ids})
@app.route("/api/restore", methods=["POST"])
@login_required
def api_restore_start():
data = request.json or {}
host = data.get("host", "local")
backup_path = data.get("backup_path")
new_name = data.get("new_name") or None
start = data.get("start", False)
load_image = data.get("load_image", False)
if not backup_path:
return jsonify({"error": "backup_path required"}), 400
jid = new_job(f"Restore {Path(backup_path).name}{host}")
threading.Thread(target=_run_restore,
args=(jid, host, backup_path, new_name, start, load_image),
daemon=True).start()
return jsonify({"job_id": jid})
# ── Routes: schedules ─────────────────────────────────────────────────────────
@app.route("/api/schedules")
@login_required
def api_schedules_list():
cfg = load_config()
schedules = cfg.get("schedules", [])
# Attach next_run from scheduler
for s in schedules:
job = scheduler.get_job(s["id"])
s["next_run"] = job.next_run_time.isoformat() if (job and job.next_run_time) else None
return jsonify(schedules)
@app.route("/api/schedules", methods=["POST"])
@login_required
def api_schedules_create():
data = request.json or {}
required = ("host", "container", "cron")
if not all(data.get(k) for k in required):
return jsonify({"error": "host, container, cron required"}), 400
# Validate cron
try:
_cron_kwargs(data["cron"])
except ValueError as e:
return jsonify({"error": str(e)}), 400
sched = {
"id": str(uuid.uuid4())[:8],
"host": data["host"],
"container": data["container"],
"cron": data["cron"],
"pre_hook": data.get("pre_hook", ""),
"save_image": data.get("save_image", False),
"retention_count": data.get("retention_count") or None,
"retention_days": data.get("retention_days") or None,
"enabled": data.get("enabled", True),
"last_run": None,
"last_status": None,
}
cfg = load_config()
cfg.setdefault("schedules", []).append(sched)
save_config(cfg)
if sched["enabled"]:
_register_schedule(sched)
return jsonify(sched), 201
@app.route("/api/schedules/<sid>", methods=["PUT"])
@login_required
def api_schedules_update(sid):
data = request.json or {}
cfg = load_config()
schedules = cfg.get("schedules", [])
sched = next((s for s in schedules if s["id"] == sid), None)
if not sched:
return jsonify({"error": "not found"}), 404
updatable = ("host", "container", "cron", "pre_hook", "save_image",
"retention_count", "retention_days", "enabled")
for k in updatable:
if k in data:
sched[k] = data[k]
if "cron" in data:
try:
_cron_kwargs(sched["cron"])
except ValueError as e:
return jsonify({"error": str(e)}), 400
save_config(cfg)
_unregister_schedule(sid)
if sched.get("enabled", True):
_register_schedule(sched)
return jsonify(sched)
@app.route("/api/schedules/<sid>", methods=["DELETE"])
@login_required
def api_schedules_delete(sid):
cfg = load_config()
cfg["schedules"] = [s for s in cfg.get("schedules", []) if s["id"] != sid]
save_config(cfg)
_unregister_schedule(sid)
return jsonify({"ok": True})
@app.route("/api/schedules/<sid>/run", methods=["POST"])
@login_required
def api_schedules_run_now(sid):
jid = run_scheduled_backup(sid)
if not jid:
return jsonify({"error": "schedule not found or disabled"}), 404
return jsonify({"job_id": jid})
# ── Routes: jobs ──────────────────────────────────────────────────────────────
@app.route("/api/jobs")
@login_required
def api_jobs():
with jobs_lock:
return jsonify(list(jobs.values()))
@app.route("/api/jobs/<jid>/stream")
def api_job_stream(jid):
# Auth check without decorator (SSE doesn't send JSON)
if auth_enabled() and not session.get("authenticated"):
return Response("data: unauthorized\n\n", mimetype="text/event-stream", status=401)
def generate():
last = 0
while True:
with jobs_lock:
job = jobs.get(jid)
if not job:
yield f"data: Job {jid} not found\n\n"
break
logs = job["logs"]
while last < len(logs):
line = logs[last].replace("\n", " ")
yield f"data: {line}\n\n"
last += 1
if line == "[DONE]":
return
if job["status"] in ("done", "error"):
break
time.sleep(0.15)
return Response(stream_with_context(generate()),
mimetype="text/event-stream",
headers={"Cache-Control": "no-cache", "X-Accel-Buffering": "no"})
if __name__ == "__main__":
import argparse
p = argparse.ArgumentParser()
p.add_argument("--host", default="0.0.0.0")
p.add_argument("--port", type=int, default=5999)
p.add_argument("--debug", action="store_true")
args = p.parse_args()
print(f"Docker Backup UI → http://{args.host}:{args.port}")
app.run(host=args.host, port=args.port, debug=args.debug)

4
webapp/start.sh Executable file
View File

@@ -0,0 +1,4 @@
#!/bin/bash
# Start Docker Backup Web UI
cd "$(dirname "$0")"
exec python3 app.py "$@"

1109
webapp/templates/index.html Normal file

File diff suppressed because it is too large Load Diff