fix: remove dead code (toast stubs, unused Redis key, tunnel manager fields)
- Remove 7 no-op exported stubs from toast.tsx (ToastProvider, ToastViewport, Toast, ToastTitle, ToastDescription, ToastClose, useToasts) — nothing imports them - Remove fwFailKey variable and its Set() call from worker.go — the firmware:check-failed Redis key was never read anywhere - Remove unused deviceStore and credCache fields from tunnel.Manager struct and drop corresponding parameters from NewManager(); update call site in main.go and all test usages Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -38,13 +38,3 @@ export function toast(options: ToastOptions) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Backward-compatible no-op exports for AppLayout migration
|
|
||||||
// These were used by the old Radix Toast implementation
|
|
||||||
export const ToastProvider = ({ children }: { children: React.ReactNode }) => <>{children}</>
|
|
||||||
export const ToastViewport = () => null
|
|
||||||
export const Toast = () => null
|
|
||||||
export const ToastTitle = () => null
|
|
||||||
export const ToastDescription = () => null
|
|
||||||
export const ToastClose = () => null
|
|
||||||
// eslint-disable-next-line react-refresh/only-export-components
|
|
||||||
export const useToasts = () => ({ toasts: [] as never[], dismiss: () => {} })
|
|
||||||
|
|||||||
@@ -196,8 +196,6 @@ func main() {
|
|||||||
cfg.TunnelPortMin,
|
cfg.TunnelPortMin,
|
||||||
cfg.TunnelPortMax,
|
cfg.TunnelPortMax,
|
||||||
time.Duration(cfg.TunnelIdleTimeout)*time.Second,
|
time.Duration(cfg.TunnelIdleTimeout)*time.Second,
|
||||||
deviceStore,
|
|
||||||
credentialCache,
|
|
||||||
)
|
)
|
||||||
defer tunnelMgr.Shutdown()
|
defer tunnelMgr.Shutdown()
|
||||||
slog.Info("tunnel manager initialized",
|
slog.Info("tunnel manager initialized",
|
||||||
|
|||||||
@@ -390,10 +390,6 @@ func PollDevice(
|
|||||||
slog.Warn("firmware check failed", "device_id", dev.ID, "error", fwErr)
|
slog.Warn("firmware check failed", "device_id", dev.ID, "error", fwErr)
|
||||||
// Set cooldown on failure too, but shorter (6h) so we retry sooner than success (24h).
|
// Set cooldown on failure too, but shorter (6h) so we retry sooner than success (24h).
|
||||||
// Prevents hammering devices that can't reach MikroTik update servers every poll cycle.
|
// Prevents hammering devices that can't reach MikroTik update servers every poll cycle.
|
||||||
fwFailKey := fmt.Sprintf("firmware:check-failed:%s", dev.ID)
|
|
||||||
if err := redisClientForFirmware.Set(ctx, fwFailKey, "1", 6*time.Hour).Err(); err != nil {
|
|
||||||
slog.Warn("Redis SET failed", "key", fwFailKey, "error", err)
|
|
||||||
}
|
|
||||||
// Also set the main checked key to prevent the success path from re-checking.
|
// Also set the main checked key to prevent the success path from re-checking.
|
||||||
if err := redisClientForFirmware.Set(ctx, fwCacheKey, "1", 6*time.Hour).Err(); err != nil {
|
if err := redisClientForFirmware.Set(ctx, fwCacheKey, "1", 6*time.Hour).Err(); err != nil {
|
||||||
slog.Warn("Redis SET failed", "key", fwCacheKey, "error", err)
|
slog.Warn("Redis SET failed", "key", fwCacheKey, "error", err)
|
||||||
|
|||||||
@@ -9,8 +9,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/staack/the-other-dude/poller/internal/store"
|
|
||||||
"github.com/staack/the-other-dude/poller/internal/vault"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// OpenTunnelResponse is returned by Manager.OpenTunnel.
|
// OpenTunnelResponse is returned by Manager.OpenTunnel.
|
||||||
@@ -32,26 +30,22 @@ type TunnelStatus struct {
|
|||||||
// Manager orchestrates the lifecycle of WinBox tunnels: open, close, idle
|
// Manager orchestrates the lifecycle of WinBox tunnels: open, close, idle
|
||||||
// cleanup, and status queries.
|
// cleanup, and status queries.
|
||||||
type Manager struct {
|
type Manager struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
tunnels map[string]*Tunnel
|
tunnels map[string]*Tunnel
|
||||||
portPool *PortPool
|
portPool *PortPool
|
||||||
idleTime time.Duration
|
idleTime time.Duration
|
||||||
deviceStore *store.DeviceStore
|
cancel context.CancelFunc
|
||||||
credCache *vault.CredentialCache
|
|
||||||
cancel context.CancelFunc
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewManager creates a Manager with ports in [portMin, portMax] and an idle
|
// NewManager creates a Manager with ports in [portMin, portMax] and an idle
|
||||||
// timeout of idleTime. deviceStore and credCache may be nil for tests.
|
// timeout of idleTime.
|
||||||
func NewManager(portMin, portMax int, idleTime time.Duration, ds *store.DeviceStore, cc *vault.CredentialCache) *Manager {
|
func NewManager(portMin, portMax int, idleTime time.Duration) *Manager {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
m := &Manager{
|
m := &Manager{
|
||||||
tunnels: make(map[string]*Tunnel),
|
tunnels: make(map[string]*Tunnel),
|
||||||
portPool: NewPortPool(portMin, portMax),
|
portPool: NewPortPool(portMin, portMax),
|
||||||
idleTime: idleTime,
|
idleTime: idleTime,
|
||||||
deviceStore: ds,
|
cancel: cancel,
|
||||||
credCache: cc,
|
|
||||||
cancel: cancel,
|
|
||||||
}
|
}
|
||||||
go m.idleLoop(ctx)
|
go m.idleLoop(ctx)
|
||||||
return m
|
return m
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ func TestManager_OpenTunnel(t *testing.T) {
|
|||||||
routerAddr, cleanup := mockRouter(t)
|
routerAddr, cleanup := mockRouter(t)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
mgr := NewManager(49000, 49010, 5*time.Minute, nil, nil)
|
mgr := NewManager(49000, 49010, 5*time.Minute)
|
||||||
defer mgr.Shutdown()
|
defer mgr.Shutdown()
|
||||||
|
|
||||||
resp, err := mgr.OpenTunnel("dev-1", "ten-1", "usr-1", routerAddr)
|
resp, err := mgr.OpenTunnel("dev-1", "ten-1", "usr-1", routerAddr)
|
||||||
@@ -26,7 +26,7 @@ func TestManager_CloseTunnel(t *testing.T) {
|
|||||||
routerAddr, cleanup := mockRouter(t)
|
routerAddr, cleanup := mockRouter(t)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
mgr := NewManager(49000, 49010, 5*time.Minute, nil, nil)
|
mgr := NewManager(49000, 49010, 5*time.Minute)
|
||||||
defer mgr.Shutdown()
|
defer mgr.Shutdown()
|
||||||
|
|
||||||
resp, _ := mgr.OpenTunnel("dev-1", "ten-1", "usr-1", routerAddr)
|
resp, _ := mgr.OpenTunnel("dev-1", "ten-1", "usr-1", routerAddr)
|
||||||
@@ -43,7 +43,7 @@ func TestManager_PortExhaustion(t *testing.T) {
|
|||||||
routerAddr, cleanup := mockRouter(t)
|
routerAddr, cleanup := mockRouter(t)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
mgr := NewManager(49000, 49001, 5*time.Minute, nil, nil) // 2 ports
|
mgr := NewManager(49000, 49001, 5*time.Minute) // 2 ports
|
||||||
defer mgr.Shutdown()
|
defer mgr.Shutdown()
|
||||||
|
|
||||||
_, err := mgr.OpenTunnel("dev-1", "ten-1", "usr-1", routerAddr)
|
_, err := mgr.OpenTunnel("dev-1", "ten-1", "usr-1", routerAddr)
|
||||||
@@ -58,7 +58,7 @@ func TestManager_IdleCleanup(t *testing.T) {
|
|||||||
routerAddr, cleanup := mockRouter(t)
|
routerAddr, cleanup := mockRouter(t)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
mgr := NewManager(49000, 49010, 100*time.Millisecond, nil, nil) // very short idle
|
mgr := NewManager(49000, 49010, 100*time.Millisecond) // very short idle
|
||||||
defer mgr.Shutdown()
|
defer mgr.Shutdown()
|
||||||
|
|
||||||
resp, _ := mgr.OpenTunnel("dev-1", "ten-1", "usr-1", routerAddr)
|
resp, _ := mgr.OpenTunnel("dev-1", "ten-1", "usr-1", routerAddr)
|
||||||
@@ -73,7 +73,7 @@ func TestManager_StatusList(t *testing.T) {
|
|||||||
routerAddr, cleanup := mockRouter(t)
|
routerAddr, cleanup := mockRouter(t)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
mgr := NewManager(49000, 49010, 5*time.Minute, nil, nil)
|
mgr := NewManager(49000, 49010, 5*time.Minute)
|
||||||
defer mgr.Shutdown()
|
defer mgr.Shutdown()
|
||||||
|
|
||||||
mgr.OpenTunnel("dev-1", "ten-1", "usr-1", routerAddr)
|
mgr.OpenTunnel("dev-1", "ten-1", "usr-1", routerAddr)
|
||||||
|
|||||||
Reference in New Issue
Block a user