MQTT-based live CPU/RAM metrics + remove auth from HA control

- Enrich server data with MQTT system topics (cpu_usage_percent,
  ram_usage_percent, temp, model etc.) published by Unraid MQTT Agent
- Works for both Daddelolymp and Adriahub topics
- MQTT overlay runs on every request (even cached) for fresh metrics
- Remove JWT auth from /api/ha/control — local dashboard doesn't need it
- Add cpu brand field to GraphQL query

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Sam 2026-03-02 22:41:16 +01:00
parent 94727ebe70
commit 5d3d4f4015
3 changed files with 97 additions and 6 deletions

View file

@ -5,10 +5,9 @@ from __future__ import annotations
import logging import logging
from typing import Any, Dict, Optional from typing import Any, Dict, Optional
from fastapi import APIRouter, Depends from fastapi import APIRouter
from pydantic import BaseModel from pydantic import BaseModel
from server.auth import require_admin
from server.cache import cache from server.cache import cache
from server.config import get_settings from server.config import get_settings
from server.services.ha_service import call_ha_service, fetch_ha_data from server.services.ha_service import call_ha_service, fetch_ha_data
@ -58,7 +57,6 @@ class HAControlRequest(BaseModel):
@router.post("/ha/control") @router.post("/ha/control")
async def control_ha( async def control_ha(
body: HAControlRequest, body: HAControlRequest,
admin_user: str = Depends(require_admin), # noqa: ARG001
) -> Dict[str, Any]: ) -> Dict[str, Any]:
"""Control a Home Assistant entity (toggle light, open cover, etc.).""" """Control a Home Assistant entity (toggle light, open cover, etc.)."""

View file

@ -9,6 +9,7 @@ from fastapi import APIRouter
from server.cache import cache from server.cache import cache
from server.config import get_settings from server.config import get_settings
from server.services.mqtt_service import mqtt_service
from server.services.unraid_service import ServerConfig, fetch_all_servers from server.services.unraid_service import ServerConfig, fetch_all_servers
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -18,6 +19,93 @@ router = APIRouter(prefix="/api", tags=["servers"])
CACHE_KEY = "servers" CACHE_KEY = "servers"
# ---------------------------------------------------------------------------
# MQTT enrichment — overlay live system metrics from MQTT topics
# ---------------------------------------------------------------------------
def _enrich_from_mqtt(servers: List[Dict[str, Any]]) -> None:
"""Merge live CPU/RAM data from MQTT ``<prefix>/system`` topics.
The Unraid MQTT Agent plugin publishes JSON payloads to topics like
``unraid-daddelolymp/system`` or ``Adriahub/system`` every ~15 s.
These contain live ``cpu_usage_percent``, ``ram_usage_percent``, etc.
that the GraphQL API does not expose.
"""
store = mqtt_service.store
for srv in servers:
name = srv.get("name", "")
if not name:
continue
# Try common topic patterns
system_data: Dict[str, Any] | None = None
for pattern in (
f"{name}/system", # "Adriahub/system"
f"unraid-{name.lower()}/system", # "unraid-daddelolymp/system"
f"unraid-{name}/system", # "unraid-Daddelolymp/system"
):
msg = store.get(pattern)
if msg is not None and isinstance(msg.payload, dict):
system_data = msg.payload
break
if not system_data:
continue
# --- CPU ---
cpu_pct = system_data.get("cpu_usage_percent")
if cpu_pct is not None:
srv["cpu"]["usage_pct"] = round(float(cpu_pct), 1)
cpu_model = system_data.get("cpu_model")
if cpu_model:
srv["cpu"]["brand"] = cpu_model
cpu_temp = system_data.get("cpu_temp_celsius")
if cpu_temp is not None:
srv["cpu"]["temp_c"] = cpu_temp
cores = system_data.get("cpu_cores")
if cores:
srv["cpu"]["cores"] = cores
threads = system_data.get("cpu_threads")
if threads:
srv["cpu"]["threads"] = threads
# --- RAM ---
ram_pct = system_data.get("ram_usage_percent")
if ram_pct is not None:
srv["ram"]["pct"] = round(float(ram_pct), 1)
ram_total = system_data.get("ram_total_bytes")
if ram_total:
srv["ram"]["total_gb"] = round(ram_total / (1024 ** 3), 1)
ram_used = system_data.get("ram_used_bytes")
if ram_used:
srv["ram"]["used_gb"] = round(ram_used / (1024 ** 3), 1)
# --- Uptime ---
uptime_secs = system_data.get("uptime_seconds")
if uptime_secs:
days = uptime_secs // 86400
hours = (uptime_secs % 86400) // 3600
srv["uptime"] = f"{days}d {hours}h"
srv["online"] = True
logger.debug(
"[UNRAID] %s: MQTT enriched — CPU %.1f%% %.0f°C, RAM %.1f%%",
name,
srv["cpu"].get("usage_pct", 0),
srv["cpu"].get("temp_c", 0) or 0,
srv["ram"].get("pct", 0),
)
@router.get("/servers") @router.get("/servers")
async def get_servers() -> Dict[str, Any]: async def get_servers() -> Dict[str, Any]:
"""Return status information for all configured Unraid servers. """Return status information for all configured Unraid servers.
@ -32,6 +120,8 @@ async def get_servers() -> Dict[str, Any]:
# --- cache hit? ----------------------------------------------------------- # --- cache hit? -----------------------------------------------------------
cached = await cache.get(CACHE_KEY) cached = await cache.get(CACHE_KEY)
if cached is not None: if cached is not None:
# Always overlay fresh MQTT data even on cache hits
_enrich_from_mqtt(cached.get("servers", []))
return cached return cached
# --- cache miss ----------------------------------------------------------- # --- cache miss -----------------------------------------------------------
@ -56,6 +146,9 @@ async def get_servers() -> Dict[str, Any]:
"message": str(exc), "message": str(exc),
} }
# Overlay live MQTT system metrics
_enrich_from_mqtt(servers_data)
payload: Dict[str, Any] = { payload: Dict[str, Any] = {
"servers": servers_data, "servers": servers_data,
} }

View file

@ -17,7 +17,7 @@ _GRAPHQL_QUERY = """
online online
info { info {
os { hostname uptime } os { hostname uptime }
cpu { model cores threads } cpu { model cores threads manufacturer brand }
memory { layout { size type } } memory { layout { size type } }
} }
docker { docker {
@ -72,9 +72,9 @@ def _parse_graphql_response(data: Dict[str, Any], result: Dict[str, Any]) -> Non
cpu_info = info.get("cpu", {}) cpu_info = info.get("cpu", {})
result["cpu"]["cores"] = cpu_info.get("cores", 0) result["cpu"]["cores"] = cpu_info.get("cores", 0)
# GraphQL API doesn't expose CPU usage % — keep 0
# Store threads for the frontend to show
result["cpu"]["threads"] = cpu_info.get("threads", 0) result["cpu"]["threads"] = cpu_info.get("threads", 0)
result["cpu"]["brand"] = cpu_info.get("brand", "")
# GraphQL API doesn't expose CPU usage % — keep 0
# Memory: sum layout slots for total GB # Memory: sum layout slots for total GB
mem_layout = info.get("memory", {}).get("layout", []) mem_layout = info.get("memory", {}).get("layout", [])