Compare commits

...

5 Commits
main ... dev

230 changed files with 47630 additions and 18284 deletions

View File

@ -3,12 +3,9 @@ DATA_ROOT=../data
BOTS_WORKSPACE_ROOT=../workspace/bots BOTS_WORKSPACE_ROOT=../workspace/bots
# Database # Database
# SQLite (recommended): leave DATABASE_URL unset, backend will use: # PostgreSQL:
# sqlite:///{DATA_ROOT}/nanobot_dashboard.db DATABASE_URL=postgresql+psycopg://user:password@127.0.0.1:5432/nanobot_dashboard
# DATABASE_URL=sqlite:///../data/nanobot_dashboard.db # MySQL:
# PostgreSQL example:
# DATABASE_URL=postgresql+psycopg://user:password@127.0.0.1:5432/nanobot_dashboard
# MySQL example:
# DATABASE_URL=mysql+pymysql://user:password@127.0.0.1:3306/nanobot_dashboard # DATABASE_URL=mysql+pymysql://user:password@127.0.0.1:3306/nanobot_dashboard
# Show SQL statements in backend logs (debug only). # Show SQL statements in backend logs (debug only).
DATABASE_ECHO=true DATABASE_ECHO=true
@ -50,6 +47,8 @@ STT_DEVICE=cpu
APP_HOST=0.0.0.0 APP_HOST=0.0.0.0
APP_PORT=8000 APP_PORT=8000
APP_RELOAD=true APP_RELOAD=true
APP_LOG_LEVEL=warning
APP_ACCESS_LOG=false
# Optional overrides (fallback only; usually keep empty when using template files) # Optional overrides (fallback only; usually keep empty when using template files)
DEFAULT_AGENTS_MD= DEFAULT_AGENTS_MD=

View File

@ -0,0 +1,124 @@
from typing import List, Optional
from fastapi import APIRouter, Depends, File, Form, UploadFile
from sqlmodel import Session
from core.database import get_session
from models.bot import NanobotImage
from api.dashboard_router_support import DashboardRouterDeps
def build_dashboard_assets_router(*, deps: DashboardRouterDeps) -> APIRouter:
router = APIRouter()
@router.get("/api/images", response_model=List[NanobotImage])
def list_images(session: Session = Depends(get_session)):
return deps.image_service.list_images(session=session)
@router.delete("/api/images/{tag:path}")
def delete_image(tag: str, session: Session = Depends(get_session)):
return deps.image_service.delete_image(session=session, tag=tag)
@router.get("/api/docker-images")
def list_docker_images(repository: str = "nanobot-base"):
return deps.image_service.list_docker_images(repository=repository)
@router.post("/api/images/register")
def register_image(payload: dict, session: Session = Depends(get_session)):
return deps.image_service.register_image(session=session, payload=payload)
@router.post("/api/providers/test")
async def test_provider(payload: dict):
return await deps.provider_test_service.test_provider(payload=payload)
@router.get("/api/platform/skills")
def list_skill_market(session: Session = Depends(get_session)):
return deps.skill_service.list_market_items(session=session)
@router.post("/api/platform/skills")
async def create_skill_market_item(
skill_key: str = Form(""),
display_name: str = Form(""),
description: str = Form(""),
file: UploadFile = File(...),
session: Session = Depends(get_session),
):
return await deps.skill_service.create_market_item(
session=session,
skill_key=skill_key,
display_name=display_name,
description=description,
file=file,
)
@router.put("/api/platform/skills/{skill_id}")
async def update_skill_market_item(
skill_id: int,
skill_key: str = Form(""),
display_name: str = Form(""),
description: str = Form(""),
file: Optional[UploadFile] = File(None),
session: Session = Depends(get_session),
):
return await deps.skill_service.update_market_item(
session=session,
skill_id=skill_id,
skill_key=skill_key,
display_name=display_name,
description=description,
file=file,
)
@router.delete("/api/platform/skills/{skill_id}")
def delete_skill_market_item(skill_id: int, session: Session = Depends(get_session)):
return deps.skill_service.delete_market_item(session=session, skill_id=skill_id)
@router.get("/api/bots/{bot_id}/skills")
def list_bot_skills(bot_id: str, session: Session = Depends(get_session)):
return deps.skill_service.list_workspace_skills_for_bot(
session=session,
bot_id=bot_id,
resolve_edge_state_context=deps.resolve_edge_state_context,
logger=deps.logger,
)
@router.get("/api/bots/{bot_id}/skill-market")
def list_bot_skill_market(bot_id: str, session: Session = Depends(get_session)):
return deps.skill_service.list_bot_market_items_for_bot(
session=session,
bot_id=bot_id,
resolve_edge_state_context=deps.resolve_edge_state_context,
logger=deps.logger,
)
@router.post("/api/bots/{bot_id}/skill-market/{skill_id}/install")
def install_bot_skill_from_market(bot_id: str, skill_id: int, session: Session = Depends(get_session)):
return deps.skill_service.install_market_item_for_bot_checked(
session=session,
bot_id=bot_id,
skill_id=skill_id,
resolve_edge_state_context=deps.resolve_edge_state_context,
logger=deps.logger,
)
@router.post("/api/bots/{bot_id}/skills/upload")
async def upload_bot_skill_zip(bot_id: str, file: UploadFile = File(...), session: Session = Depends(get_session)):
return await deps.skill_service.upload_bot_skill_zip_for_bot(
session=session,
bot_id=bot_id,
file=file,
resolve_edge_state_context=deps.resolve_edge_state_context,
logger=deps.logger,
)
@router.delete("/api/bots/{bot_id}/skills/{skill_name}")
def delete_bot_skill(bot_id: str, skill_name: str, session: Session = Depends(get_session)):
return deps.skill_service.delete_workspace_skill_for_bot(
session=session,
bot_id=bot_id,
skill_name=skill_name,
resolve_edge_state_context=deps.resolve_edge_state_context,
)
return router

View File

@ -0,0 +1,153 @@
from fastapi import APIRouter, Depends, Request
from sqlmodel import Session
from core.database import get_session
from schemas.dashboard import (
BotCreateRequest,
BotDeployRequest,
BotEnvParamsUpdateRequest,
BotMcpConfigUpdateRequest,
BotToolsConfigUpdateRequest,
BotUpdateRequest,
ChannelConfigRequest,
ChannelConfigUpdateRequest,
)
from api.dashboard_router_support import DashboardRouterDeps
def build_dashboard_bot_admin_router(*, deps: DashboardRouterDeps) -> APIRouter:
router = APIRouter()
@router.post("/api/bots")
def create_bot(payload: BotCreateRequest, session: Session = Depends(get_session)):
return deps.bot_lifecycle_service.create_bot(session=session, payload=payload)
@router.get("/api/bots")
def list_bots(request: Request, session: Session = Depends(get_session)):
current_user_id = int(getattr(request.state, "sys_user_id", 0) or 0)
return deps.bot_query_service.list_bots(app_state=request.app.state, session=session, current_user_id=current_user_id)
@router.get("/api/bots/{bot_id}")
def get_bot_detail(bot_id: str, request: Request, session: Session = Depends(get_session)):
return deps.bot_query_service.get_bot_detail(app_state=request.app.state, session=session, bot_id=bot_id)
@router.get("/api/bots/{bot_id}/resources")
def get_bot_resources(bot_id: str, request: Request, session: Session = Depends(get_session)):
return deps.bot_query_service.get_bot_resources(app_state=request.app.state, session=session, bot_id=bot_id)
@router.put("/api/bots/{bot_id}")
def update_bot(bot_id: str, payload: BotUpdateRequest, session: Session = Depends(get_session)):
return deps.bot_lifecycle_service.update_bot(session=session, bot_id=bot_id, payload=payload)
@router.post("/api/bots/{bot_id}/deploy")
async def deploy_bot(bot_id: str, payload: BotDeployRequest, request: Request, session: Session = Depends(get_session)):
return await deps.bot_lifecycle_service.deploy_bot(
app_state=request.app.state,
session=session,
bot_id=bot_id,
node_id=payload.node_id,
runtime_kind=payload.runtime_kind,
image_tag=payload.image_tag,
auto_start=bool(payload.auto_start),
)
@router.post("/api/bots/{bot_id}/start")
async def start_bot(bot_id: str, request: Request, session: Session = Depends(get_session)):
return await deps.bot_lifecycle_service.start_bot(app_state=request.app.state, session=session, bot_id=bot_id)
@router.post("/api/bots/{bot_id}/stop")
def stop_bot(bot_id: str, request: Request, session: Session = Depends(get_session)):
return deps.bot_lifecycle_service.stop_bot(app_state=request.app.state, session=session, bot_id=bot_id)
@router.post("/api/bots/{bot_id}/enable")
def enable_bot(bot_id: str, session: Session = Depends(get_session)):
return deps.bot_lifecycle_service.enable_bot(session=session, bot_id=bot_id)
@router.post("/api/bots/{bot_id}/disable")
def disable_bot(bot_id: str, request: Request, session: Session = Depends(get_session)):
return deps.bot_lifecycle_service.disable_bot(app_state=request.app.state, session=session, bot_id=bot_id)
@router.post("/api/bots/{bot_id}/deactivate")
def deactivate_bot(bot_id: str, request: Request, session: Session = Depends(get_session)):
return deps.bot_lifecycle_service.deactivate_bot(app_state=request.app.state, session=session, bot_id=bot_id)
@router.delete("/api/bots/{bot_id}")
def delete_bot(bot_id: str, request: Request, delete_workspace: bool = True, session: Session = Depends(get_session)):
return deps.bot_lifecycle_service.delete_bot(
app_state=request.app.state,
session=session,
bot_id=bot_id,
delete_workspace=delete_workspace,
)
@router.get("/api/bots/{bot_id}/channels")
def list_bot_channels(bot_id: str, session: Session = Depends(get_session)):
return deps.bot_channel_service.list_channels(session=session, bot_id=bot_id)
@router.post("/api/bots/{bot_id}/channels")
def create_bot_channel(bot_id: str, payload: ChannelConfigRequest, session: Session = Depends(get_session)):
return deps.bot_channel_service.create_channel(session=session, bot_id=bot_id, payload=payload)
@router.put("/api/bots/{bot_id}/channels/{channel_id}")
def update_bot_channel(bot_id: str, channel_id: str, payload: ChannelConfigUpdateRequest, session: Session = Depends(get_session)):
return deps.bot_channel_service.update_channel(
session=session,
bot_id=bot_id,
channel_id=channel_id,
payload=payload,
)
@router.delete("/api/bots/{bot_id}/channels/{channel_id}")
def delete_bot_channel(bot_id: str, channel_id: str, session: Session = Depends(get_session)):
return deps.bot_channel_service.delete_channel(session=session, bot_id=bot_id, channel_id=channel_id)
@router.get("/api/bots/{bot_id}/tools-config")
def get_bot_tools_config(bot_id: str, session: Session = Depends(get_session)):
return deps.bot_query_service.get_tools_config(session=session, bot_id=bot_id)
@router.put("/api/bots/{bot_id}/tools-config")
def update_bot_tools_config(bot_id: str, payload: BotToolsConfigUpdateRequest, session: Session = Depends(get_session)):
return deps.bot_query_service.update_tools_config(session=session, bot_id=bot_id, payload=payload)
@router.get("/api/bots/{bot_id}/mcp-config")
def get_bot_mcp_config(bot_id: str, session: Session = Depends(get_session)):
return deps.bot_config_state_service.get_mcp_config_for_bot(session=session, bot_id=bot_id)
@router.put("/api/bots/{bot_id}/mcp-config")
def update_bot_mcp_config(bot_id: str, payload: BotMcpConfigUpdateRequest, session: Session = Depends(get_session)):
return deps.bot_config_state_service.update_mcp_config_for_bot(
session=session,
bot_id=bot_id,
mcp_servers=payload.mcp_servers,
)
@router.get("/api/bots/{bot_id}/env-params")
def get_bot_env_params(bot_id: str, session: Session = Depends(get_session)):
return deps.bot_config_state_service.get_env_params_for_bot(session=session, bot_id=bot_id)
@router.put("/api/bots/{bot_id}/env-params")
def update_bot_env_params(bot_id: str, payload: BotEnvParamsUpdateRequest, session: Session = Depends(get_session)):
return deps.bot_config_state_service.update_env_params_for_bot(
session=session,
bot_id=bot_id,
env_params=payload.env_params,
)
@router.get("/api/bots/{bot_id}/cron/jobs")
def list_cron_jobs(bot_id: str, include_disabled: bool = True, session: Session = Depends(get_session)):
return deps.bot_config_state_service.list_cron_jobs_for_bot(
session=session,
bot_id=bot_id,
include_disabled=include_disabled,
)
@router.post("/api/bots/{bot_id}/cron/jobs/{job_id}/stop")
def stop_cron_job(bot_id: str, job_id: str, session: Session = Depends(get_session)):
return deps.bot_config_state_service.stop_cron_job_for_bot(session=session, bot_id=bot_id, job_id=job_id)
@router.delete("/api/bots/{bot_id}/cron/jobs/{job_id}")
def delete_cron_job(bot_id: str, job_id: str, session: Session = Depends(get_session)):
return deps.bot_config_state_service.delete_cron_job_for_bot(session=session, bot_id=bot_id, job_id=job_id)
return router

View File

@ -0,0 +1,197 @@
from typing import List, Optional
from fastapi import APIRouter, Depends, File, Form, Request, UploadFile, WebSocket
from sqlmodel import Session
from core.database import get_session
from schemas.dashboard import (
CommandRequest,
MessageFeedbackRequest,
WorkspaceFileUpdateRequest,
)
from api.dashboard_router_support import DashboardRouterDeps
def build_dashboard_bot_io_router(*, deps: DashboardRouterDeps) -> APIRouter:
router = APIRouter()
@router.post("/api/bots/{bot_id}/command")
def send_command(bot_id: str, payload: CommandRequest, request: Request, session: Session = Depends(get_session)):
return deps.runtime_service.send_command_for_bot(
app_state=request.app.state,
session=session,
bot_id=bot_id,
payload=payload,
)
@router.get("/api/bots/{bot_id}/messages")
def list_bot_messages(bot_id: str, limit: int = 200, session: Session = Depends(get_session)):
return deps.bot_message_service.list_messages(session=session, bot_id=bot_id, limit=limit)
@router.get("/api/bots/{bot_id}/messages/page")
def list_bot_messages_page(bot_id: str, limit: Optional[int] = None, before_id: Optional[int] = None, session: Session = Depends(get_session)):
return deps.bot_message_service.list_messages_page(
session=session,
bot_id=bot_id,
limit=limit,
before_id=before_id,
)
@router.get("/api/bots/{bot_id}/messages/by-date")
def list_bot_messages_by_date(
bot_id: str,
date: str,
tz_offset_minutes: Optional[int] = None,
limit: Optional[int] = None,
session: Session = Depends(get_session),
):
return deps.bot_message_service.list_messages_by_date(
session=session,
bot_id=bot_id,
date=date,
tz_offset_minutes=tz_offset_minutes,
limit=limit,
)
@router.put("/api/bots/{bot_id}/messages/{message_id}/feedback")
def update_bot_message_feedback(bot_id: str, message_id: int, payload: MessageFeedbackRequest, session: Session = Depends(get_session)):
return deps.bot_message_service.update_feedback(
session=session,
bot_id=bot_id,
message_id=message_id,
feedback=payload.feedback,
)
@router.delete("/api/bots/{bot_id}/messages")
def clear_bot_messages(bot_id: str, request: Request, session: Session = Depends(get_session)):
return deps.runtime_service.clear_messages_for_bot(app_state=request.app.state, session=session, bot_id=bot_id)
@router.post("/api/bots/{bot_id}/sessions/dashboard-direct/clear")
def clear_bot_dashboard_direct_session(bot_id: str, request: Request, session: Session = Depends(get_session)):
return deps.runtime_service.clear_dashboard_direct_session_for_bot(
app_state=request.app.state,
session=session,
bot_id=bot_id,
)
@router.get("/api/bots/{bot_id}/logs")
def get_bot_logs(bot_id: str, tail: int = 300, request: Request = None, session: Session = Depends(get_session)):
return deps.runtime_service.get_logs_for_bot(
app_state=request.app.state,
session=session,
bot_id=bot_id,
tail=tail,
)
@router.get("/api/bots/{bot_id}/workspace/tree")
def get_workspace_tree(bot_id: str, path: Optional[str] = None, recursive: bool = False, request: Request = None, session: Session = Depends(get_session)):
return deps.workspace_service.list_tree_for_bot(
app_state=request.app.state,
session=session,
bot_id=bot_id,
path=path,
recursive=recursive,
)
@router.get("/api/bots/{bot_id}/workspace/file")
def read_workspace_file(bot_id: str, path: str, max_bytes: int = 200000, request: Request = None, session: Session = Depends(get_session)):
return deps.workspace_service.read_file_for_bot(
app_state=request.app.state,
session=session,
bot_id=bot_id,
path=path,
max_bytes=max_bytes,
)
@router.put("/api/bots/{bot_id}/workspace/file")
def update_workspace_file(bot_id: str, path: str, payload: WorkspaceFileUpdateRequest, request: Request = None, session: Session = Depends(get_session)):
return deps.workspace_service.write_markdown_for_bot(
app_state=request.app.state,
session=session,
bot_id=bot_id,
path=path,
content=str(payload.content or ""),
)
@router.get("/api/bots/{bot_id}/workspace/download")
def download_workspace_file(bot_id: str, path: str, download: bool = False, request: Request = None, session: Session = Depends(get_session)):
return deps.workspace_service.serve_file_for_bot(
app_state=request.app.state,
session=session,
bot_id=bot_id,
path=path,
download=download,
request=request,
public=False,
redirect_html_to_raw=True,
)
@router.get("/public/bots/{bot_id}/workspace/download")
def public_download_workspace_file(bot_id: str, path: str, download: bool = False, request: Request = None, session: Session = Depends(get_session)):
return deps.workspace_service.serve_file_for_bot(
app_state=request.app.state,
session=session,
bot_id=bot_id,
path=path,
download=download,
request=request,
public=True,
redirect_html_to_raw=True,
)
@router.get("/api/bots/{bot_id}/workspace/raw/{path:path}")
def raw_workspace_file(bot_id: str, path: str, download: bool = False, request: Request = None, session: Session = Depends(get_session)):
return deps.workspace_service.serve_file_for_bot(
app_state=request.app.state,
session=session,
bot_id=bot_id,
path=path,
download=download,
request=request,
public=False,
redirect_html_to_raw=False,
)
@router.get("/public/bots/{bot_id}/workspace/raw/{path:path}")
def public_raw_workspace_file(bot_id: str, path: str, download: bool = False, request: Request = None, session: Session = Depends(get_session)):
return deps.workspace_service.serve_file_for_bot(
app_state=request.app.state,
session=session,
bot_id=bot_id,
path=path,
download=download,
request=request,
public=True,
redirect_html_to_raw=False,
)
@router.post("/api/bots/{bot_id}/workspace/upload")
async def upload_workspace_files(bot_id: str, files: List[UploadFile] = File(...), path: Optional[str] = None, request: Request = None, session: Session = Depends(get_session)):
return await deps.workspace_service.upload_files_for_bot(
app_state=request.app.state,
session=session,
bot_id=bot_id,
files=files,
path=path,
)
@router.post("/api/bots/{bot_id}/speech/transcribe")
async def transcribe_bot_speech(
bot_id: str,
file: UploadFile = File(...),
language: Optional[str] = Form(None),
session: Session = Depends(get_session),
):
return await deps.speech_transcription_service.transcribe(
session=session,
bot_id=bot_id,
file=file,
language=language,
)
@router.websocket("/ws/monitor/{bot_id}")
async def websocket_endpoint(websocket: WebSocket, bot_id: str):
await deps.app_lifecycle_service.handle_websocket(websocket, bot_id)
return router

View File

@ -0,0 +1,46 @@
from fastapi import APIRouter
from api.dashboard_assets_router import build_dashboard_assets_router
from api.dashboard_bot_admin_router import build_dashboard_bot_admin_router
from api.dashboard_bot_io_router import build_dashboard_bot_io_router
from api.dashboard_router_support import DashboardRouterDeps
def build_dashboard_router(
*,
image_service,
provider_test_service,
bot_lifecycle_service,
bot_query_service,
bot_channel_service,
skill_service,
bot_config_state_service,
runtime_service,
bot_message_service,
workspace_service,
speech_transcription_service,
app_lifecycle_service,
resolve_edge_state_context,
logger,
) -> APIRouter:
deps = DashboardRouterDeps(
image_service=image_service,
provider_test_service=provider_test_service,
bot_lifecycle_service=bot_lifecycle_service,
bot_query_service=bot_query_service,
bot_channel_service=bot_channel_service,
skill_service=skill_service,
bot_config_state_service=bot_config_state_service,
runtime_service=runtime_service,
bot_message_service=bot_message_service,
workspace_service=workspace_service,
speech_transcription_service=speech_transcription_service,
app_lifecycle_service=app_lifecycle_service,
resolve_edge_state_context=resolve_edge_state_context,
logger=logger,
)
router = APIRouter()
router.include_router(build_dashboard_assets_router(deps=deps))
router.include_router(build_dashboard_bot_admin_router(deps=deps))
router.include_router(build_dashboard_bot_io_router(deps=deps))
return router

View File

@ -0,0 +1,20 @@
from dataclasses import dataclass
from typing import Any, Callable
@dataclass(frozen=True)
class DashboardRouterDeps:
image_service: Any
provider_test_service: Any
bot_lifecycle_service: Any
bot_query_service: Any
bot_channel_service: Any
skill_service: Any
bot_config_state_service: Any
runtime_service: Any
bot_message_service: Any
workspace_service: Any
speech_transcription_service: Any
app_lifecycle_service: Any
resolve_edge_state_context: Callable[[str], Any]
logger: Any

View File

@ -0,0 +1,8 @@
from fastapi import APIRouter
from api.platform_overview_router import router as platform_overview_router
from api.platform_settings_router import router as platform_settings_router
router = APIRouter()
router.include_router(platform_overview_router)
router.include_router(platform_settings_router)

View File

@ -0,0 +1,159 @@
from fastapi import APIRouter, Depends, HTTPException, Request
from sqlmodel import Session, select
from core.database import get_session
from models.bot import BotInstance
from providers.target import ProviderTarget
from services.node_registry_service import ManagedNode
from api.platform_node_support import (
edge_node_self_with_native_preflight,
managed_node_from_payload,
normalize_node_payload,
serialize_node,
)
from api.platform_shared import (
cached_platform_nodes_payload,
invalidate_platform_nodes_cache,
invalidate_platform_overview_cache,
logger,
store_platform_nodes_payload,
)
from clients.edge.errors import log_edge_failure
from schemas.platform import ManagedNodePayload
router = APIRouter()
@router.get("/api/platform/nodes")
def list_platform_nodes(request: Request, session: Session = Depends(get_session)):
cached_payload = cached_platform_nodes_payload()
if cached_payload is not None:
return cached_payload
node_registry = getattr(request.app.state, "node_registry_service", None)
if node_registry is None or not hasattr(node_registry, "list_nodes"):
return {"items": []}
resolve_edge_client = getattr(request.app.state, "resolve_edge_client", None)
refreshed_items = []
for node in node_registry.list_nodes():
metadata = dict(node.metadata or {})
refresh_failed = False
if callable(resolve_edge_client) and str(metadata.get("transport_kind") or "").strip().lower() == "edge" and bool(node.enabled):
try:
client = resolve_edge_client(
ProviderTarget(
node_id=node.node_id,
transport_kind="edge",
runtime_kind=str(metadata.get("runtime_kind") or "docker"),
core_adapter=str(metadata.get("core_adapter") or "nanobot"),
)
)
node_self = edge_node_self_with_native_preflight(client=client, node=node)
node = node_registry.mark_node_seen(
session,
node_id=node.node_id,
display_name=str(node.display_name or node_self.get("display_name") or node.node_id),
capabilities=dict(node_self.get("capabilities") or {}),
resources=dict(node_self.get("resources") or {}),
)
except Exception as exc:
refresh_failed = True
log_edge_failure(
logger,
key=f"platform-node-refresh:{node.node_id}",
exc=exc,
message=f"Failed to refresh edge node metadata for node_id={node.node_id}",
)
refreshed_items.append((node, refresh_failed))
return store_platform_nodes_payload([
serialize_node(node, refresh_failed=refresh_failed)
for node, refresh_failed in refreshed_items
])
@router.get("/api/platform/nodes/{node_id}")
def get_platform_node(node_id: str, request: Request, session: Session = Depends(get_session)):
normalized_node_id = str(node_id or "").strip().lower()
node_registry = getattr(request.app.state, "node_registry_service", None)
if node_registry is None or not hasattr(node_registry, "get_node"):
raise HTTPException(status_code=500, detail="node registry is unavailable")
node = node_registry.get_node(normalized_node_id)
if node is None:
raise HTTPException(status_code=404, detail=f"Managed node not found: {normalized_node_id}")
return serialize_node(node)
@router.post("/api/platform/nodes")
def create_platform_node(payload: ManagedNodePayload, request: Request, session: Session = Depends(get_session)):
node_registry = getattr(request.app.state, "node_registry_service", None)
if node_registry is None or not hasattr(node_registry, "get_node"):
raise HTTPException(status_code=500, detail="node registry is unavailable")
normalized = normalize_node_payload(payload)
if node_registry.get_node(normalized.node_id) is not None:
raise HTTPException(status_code=409, detail=f"Node already exists: {normalized.node_id}")
node = node_registry.upsert_node(session, managed_node_from_payload(normalized))
invalidate_platform_overview_cache()
invalidate_platform_nodes_cache()
return serialize_node(node)
@router.put("/api/platform/nodes/{node_id}")
def update_platform_node(node_id: str, payload: ManagedNodePayload, request: Request, session: Session = Depends(get_session)):
normalized_node_id = str(node_id or "").strip().lower()
node_registry = getattr(request.app.state, "node_registry_service", None)
if node_registry is None or not hasattr(node_registry, "get_node"):
raise HTTPException(status_code=500, detail="node registry is unavailable")
existing = node_registry.get_node(normalized_node_id)
if existing is None:
raise HTTPException(status_code=404, detail=f"Managed node not found: {normalized_node_id}")
normalized = normalize_node_payload(payload)
if normalized.node_id != normalized_node_id:
raise HTTPException(status_code=400, detail="node_id cannot be changed")
node = node_registry.upsert_node(
session,
ManagedNode(
node_id=normalized_node_id,
display_name=normalized.display_name,
base_url=normalized.base_url,
enabled=bool(normalized.enabled),
auth_token=normalized.auth_token or existing.auth_token,
metadata={
"transport_kind": normalized.transport_kind,
"runtime_kind": normalized.runtime_kind,
"core_adapter": normalized.core_adapter,
"workspace_root": normalized.workspace_root,
"native_command": normalized.native_command,
"native_workdir": normalized.native_workdir,
"native_sandbox_mode": normalized.native_sandbox_mode,
},
capabilities=dict(existing.capabilities or {}),
resources=dict(existing.resources or {}),
last_seen_at=existing.last_seen_at,
),
)
invalidate_platform_overview_cache()
invalidate_platform_nodes_cache()
return serialize_node(node)
@router.delete("/api/platform/nodes/{node_id}")
def delete_platform_node(node_id: str, request: Request, session: Session = Depends(get_session)):
normalized_node_id = str(node_id or "").strip().lower()
if normalized_node_id == "local":
raise HTTPException(status_code=400, detail="Local node cannot be deleted")
node_registry = getattr(request.app.state, "node_registry_service", None)
if node_registry is None or not hasattr(node_registry, "get_node"):
raise HTTPException(status_code=500, detail="node registry is unavailable")
if node_registry.get_node(normalized_node_id) is None:
raise HTTPException(status_code=404, detail=f"Managed node not found: {normalized_node_id}")
attached_bot_ids = session.exec(select(BotInstance.id).where(BotInstance.node_id == normalized_node_id)).all()
if attached_bot_ids:
raise HTTPException(
status_code=400,
detail=f"Node {normalized_node_id} still has bots assigned: {', '.join(str(item) for item in attached_bot_ids[:5])}",
)
node_registry.delete_node(session, normalized_node_id)
invalidate_platform_overview_cache()
invalidate_platform_nodes_cache()
return {"status": "deleted", "node_id": normalized_node_id}

View File

@ -0,0 +1,119 @@
import httpx
from fastapi import APIRouter, Depends, HTTPException, Request
from sqlmodel import Session
from clients.edge.http import HttpEdgeClient
from core.database import get_session
from schemas.platform import ManagedNodePayload
from api.platform_node_support import (
managed_node_from_payload,
normalize_node_payload,
test_edge_connectivity,
test_edge_native_preflight,
)
from api.platform_shared import invalidate_platform_nodes_cache
router = APIRouter()
@router.post("/api/platform/nodes/test")
def test_platform_node(payload: ManagedNodePayload, request: Request):
normalized = normalize_node_payload(payload)
temp_node = managed_node_from_payload(normalized)
result = test_edge_connectivity(
lambda _target: HttpEdgeClient(
node=temp_node,
http_client_factory=lambda: httpx.Client(timeout=10.0, trust_env=False),
async_http_client_factory=lambda: httpx.AsyncClient(timeout=10.0, trust_env=False),
),
temp_node,
)
return result.model_dump()
@router.post("/api/platform/nodes/native/preflight")
def test_platform_node_native_preflight(payload: ManagedNodePayload, request: Request):
normalized = normalize_node_payload(payload)
temp_node = managed_node_from_payload(normalized)
result = test_edge_native_preflight(
lambda _target: HttpEdgeClient(
node=temp_node,
http_client_factory=lambda: httpx.Client(timeout=10.0, trust_env=False),
async_http_client_factory=lambda: httpx.AsyncClient(timeout=10.0, trust_env=False),
),
temp_node,
native_command=str(normalized.native_command or "").strip() or None,
native_workdir=str(normalized.native_workdir or "").strip() or None,
)
return result.model_dump()
@router.post("/api/platform/nodes/{node_id}/test")
def test_saved_platform_node(node_id: str, request: Request, session: Session = Depends(get_session)):
normalized_node_id = str(node_id or "").strip().lower()
node_registry = getattr(request.app.state, "node_registry_service", None)
if node_registry is None or not hasattr(node_registry, "get_node"):
raise HTTPException(status_code=500, detail="node registry is unavailable")
node = node_registry.get_node(normalized_node_id)
if node is None:
raise HTTPException(status_code=404, detail=f"Managed node not found: {normalized_node_id}")
transport_kind = str((node.metadata or {}).get("transport_kind") or "edge").strip().lower()
if transport_kind != "edge":
invalidate_platform_nodes_cache()
raise HTTPException(status_code=400, detail="Only edge transport is supported")
result = test_edge_connectivity(
lambda _target: HttpEdgeClient(
node=node,
http_client_factory=lambda: httpx.Client(timeout=10.0, trust_env=False),
async_http_client_factory=lambda: httpx.AsyncClient(timeout=10.0, trust_env=False),
),
node,
)
if result.ok:
node_registry.mark_node_seen(
session,
node_id=node.node_id,
display_name=str(node.display_name or result.node_self.get("display_name") or node.node_id) if result.node_self else node.display_name,
capabilities=dict(result.node_self.get("capabilities") or {}) if result.node_self else dict(node.capabilities or {}),
resources=dict(result.node_self.get("resources") or {}) if result.node_self else dict(getattr(node, "resources", {}) or {}),
)
invalidate_platform_nodes_cache()
return result.model_dump()
@router.post("/api/platform/nodes/{node_id}/native/preflight")
def test_saved_platform_node_native_preflight(node_id: str, request: Request, session: Session = Depends(get_session)):
normalized_node_id = str(node_id or "").strip().lower()
node_registry = getattr(request.app.state, "node_registry_service", None)
if node_registry is None or not hasattr(node_registry, "get_node"):
raise HTTPException(status_code=500, detail="node registry is unavailable")
node = node_registry.get_node(normalized_node_id)
if node is None:
raise HTTPException(status_code=404, detail=f"Managed node not found: {normalized_node_id}")
transport_kind = str((node.metadata or {}).get("transport_kind") or "edge").strip().lower()
if transport_kind != "edge":
invalidate_platform_nodes_cache()
raise HTTPException(status_code=400, detail="Only edge transport is supported")
metadata = dict(node.metadata or {})
result = test_edge_native_preflight(
lambda _target: HttpEdgeClient(
node=node,
http_client_factory=lambda: httpx.Client(timeout=10.0, trust_env=False),
async_http_client_factory=lambda: httpx.AsyncClient(timeout=10.0, trust_env=False),
),
node,
native_command=str(metadata.get("native_command") or "").strip() or None,
native_workdir=str(metadata.get("native_workdir") or "").strip() or None,
)
if result.status == "online" and result.node_self:
node_registry.mark_node_seen(
session,
node_id=node.node_id,
display_name=str(node.display_name or result.node_self.get("display_name") or node.node_id),
capabilities=dict(result.node_self.get("capabilities") or {}),
resources=dict(result.node_self.get("resources") or {}),
)
invalidate_platform_nodes_cache()
return result.model_dump()

View File

@ -0,0 +1,57 @@
from fastapi import APIRouter, Depends, Request
from sqlmodel import Session
from clients.edge.errors import log_edge_failure
from core.database import get_session
from providers.selector import get_runtime_provider
from providers.target import ProviderTarget
from services.platform_overview_service import build_node_resource_overview
from api.platform_shared import logger
router = APIRouter()
@router.get("/api/platform/nodes/{node_id}/resources")
def get_platform_node_resources(node_id: str, request: Request, session: Session = Depends(get_session)):
normalized_node_id = str(node_id or "").strip().lower()
node_registry = getattr(request.app.state, "node_registry_service", None)
if node_registry is not None and hasattr(node_registry, "get_node"):
node = node_registry.get_node(normalized_node_id)
if node is not None:
metadata = dict(getattr(node, "metadata", {}) or {})
if str(metadata.get("transport_kind") or "").strip().lower() == "edge":
resolve_edge_client = getattr(request.app.state, "resolve_edge_client", None)
if callable(resolve_edge_client):
base = build_node_resource_overview(session, node_id=normalized_node_id, read_runtime=None)
client = resolve_edge_client(
ProviderTarget(
node_id=normalized_node_id,
transport_kind="edge",
runtime_kind=str(metadata.get("runtime_kind") or "docker"),
core_adapter=str(metadata.get("core_adapter") or "nanobot"),
)
)
try:
resource_report = dict(client.get_node_resources() or {})
except Exception as exc:
log_edge_failure(
logger,
key=f"platform-node-resources:{normalized_node_id}",
exc=exc,
message=f"Failed to load edge node resources for node_id={normalized_node_id}",
)
return base
base["resources"] = dict(resource_report.get("resources") or resource_report)
if resource_report:
base["node_report"] = resource_report
return base
def _read_runtime(bot):
provider = get_runtime_provider(request.app.state, bot)
status = str(provider.get_runtime_status(bot_id=str(bot.id or "")) or "STOPPED").upper()
runtime = dict(provider.get_resource_snapshot(bot_id=str(bot.id or "")) or {})
runtime.setdefault("docker_status", status)
return status, runtime
return build_node_resource_overview(session, node_id=normalized_node_id, read_runtime=_read_runtime)

View File

@ -0,0 +1,251 @@
import shlex
import time
from typing import Any, Dict, Optional
import httpx
from fastapi import HTTPException
from clients.edge.errors import log_edge_failure, summarize_edge_exception
from clients.edge.http import HttpEdgeClient
from providers.target import ProviderTarget
from schemas.platform import (
ManagedNodeConnectivityResult,
ManagedNodeNativePreflightResult,
ManagedNodePayload,
)
from services.node_registry_service import ManagedNode
from api.platform_shared import logger
def normalize_native_sandbox_mode(raw_value: Any) -> str:
text = str(raw_value or "").strip().lower()
if text in {"workspace", "sandbox", "strict"}:
return "workspace"
if text in {"full_access", "full-access", "danger-full-access", "escape"}:
return "full_access"
return "inherit"
def normalize_node_payload(payload: ManagedNodePayload) -> ManagedNodePayload:
normalized_node_id = str(payload.node_id or "").strip().lower()
if not normalized_node_id:
raise HTTPException(status_code=400, detail="node_id is required")
transport_kind = str(payload.transport_kind or "edge").strip().lower() or "edge"
if transport_kind != "edge":
raise HTTPException(status_code=400, detail="Only edge transport is supported")
runtime_kind = str(payload.runtime_kind or "docker").strip().lower() or "docker"
core_adapter = str(payload.core_adapter or "nanobot").strip().lower() or "nanobot"
native_sandbox_mode = normalize_native_sandbox_mode(payload.native_sandbox_mode)
base_url = str(payload.base_url or "").strip()
if transport_kind == "edge" and not base_url:
raise HTTPException(status_code=400, detail="base_url is required for edge nodes")
return payload.model_copy(
update={
"node_id": normalized_node_id,
"display_name": str(payload.display_name or normalized_node_id).strip() or normalized_node_id,
"base_url": base_url,
"auth_token": str(payload.auth_token or "").strip(),
"transport_kind": transport_kind,
"runtime_kind": runtime_kind,
"core_adapter": core_adapter,
"workspace_root": str(payload.workspace_root or "").strip(),
"native_command": str(payload.native_command or "").strip(),
"native_workdir": str(payload.native_workdir or "").strip(),
"native_sandbox_mode": native_sandbox_mode,
}
)
def managed_node_from_payload(payload: ManagedNodePayload) -> ManagedNode:
normalized = normalize_node_payload(payload)
return ManagedNode(
node_id=normalized.node_id,
display_name=normalized.display_name,
base_url=normalized.base_url,
enabled=bool(normalized.enabled),
auth_token=normalized.auth_token,
metadata={
"transport_kind": normalized.transport_kind,
"runtime_kind": normalized.runtime_kind,
"core_adapter": normalized.core_adapter,
"workspace_root": normalized.workspace_root,
"native_command": normalized.native_command,
"native_workdir": normalized.native_workdir,
"native_sandbox_mode": normalized.native_sandbox_mode,
},
)
def node_status(node: ManagedNode, *, refresh_failed: bool = False) -> str:
if not bool(node.enabled):
return "disabled"
transport_kind = str((node.metadata or {}).get("transport_kind") or "edge").strip().lower()
if transport_kind != "edge":
return "unknown"
if refresh_failed:
return "offline"
return "online" if node.last_seen_at else "unknown"
def serialize_node(node: ManagedNode, *, refresh_failed: bool = False) -> Dict[str, Any]:
metadata = dict(node.metadata or {})
return {
"node_id": node.node_id,
"display_name": node.display_name,
"base_url": node.base_url,
"enabled": bool(node.enabled),
"transport_kind": str(metadata.get("transport_kind") or ""),
"runtime_kind": str(metadata.get("runtime_kind") or ""),
"core_adapter": str(metadata.get("core_adapter") or ""),
"workspace_root": str(metadata.get("workspace_root") or ""),
"native_command": str(metadata.get("native_command") or ""),
"native_workdir": str(metadata.get("native_workdir") or ""),
"native_sandbox_mode": str(metadata.get("native_sandbox_mode") or "inherit"),
"metadata": metadata,
"capabilities": dict(node.capabilities or {}),
"resources": dict(getattr(node, "resources", {}) or {}),
"last_seen_at": node.last_seen_at,
"status": node_status(node, refresh_failed=refresh_failed),
}
def split_native_command(raw_command: Optional[str]) -> list[str]:
text = str(raw_command or "").strip()
if not text:
return []
try:
return [str(item or "").strip() for item in shlex.split(text) if str(item or "").strip()]
except Exception:
return [text]
def runtime_native_supported(node_self: Dict[str, Any]) -> bool:
capabilities = dict(node_self.get("capabilities") or {})
runtime_caps = dict(capabilities.get("runtime") or {})
return bool(runtime_caps.get("native") is True)
def edge_node_self_with_native_preflight(*, client: HttpEdgeClient, node: ManagedNode) -> Dict[str, Any]:
node_self = dict(client.heartbeat_node() or {})
metadata = dict(node.metadata or {})
native_command = str(metadata.get("native_command") or "").strip() or None
native_workdir = str(metadata.get("native_workdir") or "").strip() or None
runtime_kind = str(metadata.get("runtime_kind") or "docker").strip().lower()
should_probe = bool(native_command or native_workdir or runtime_kind == "native")
if not should_probe:
return node_self
try:
preflight = dict(client.preflight_native(native_command=native_command, native_workdir=native_workdir) or {})
except Exception as exc:
log_edge_failure(
logger,
key=f"platform-node-native-preflight:{node.node_id}",
exc=exc,
message=f"Failed to run native preflight for node_id={node.node_id}",
)
return node_self
caps = dict(node_self.get("capabilities") or {})
process_caps = dict(caps.get("process") or {})
if preflight.get("command"):
process_caps["command"] = list(preflight.get("command") or [])
process_caps["available"] = bool(preflight.get("ok"))
process_caps["command_available"] = bool(preflight.get("command_available"))
process_caps["workdir_exists"] = bool(preflight.get("workdir_exists"))
process_caps["workdir"] = str(preflight.get("workdir") or "")
process_caps["detail"] = str(preflight.get("detail") or "")
caps["process"] = process_caps
node_self["capabilities"] = caps
node_self["native_preflight"] = preflight
return node_self
def test_edge_connectivity(resolve_edge_client, node: ManagedNode) -> ManagedNodeConnectivityResult:
started = time.perf_counter()
try:
client = resolve_edge_client(
ProviderTarget(
node_id=node.node_id,
transport_kind="edge",
runtime_kind=str((node.metadata or {}).get("runtime_kind") or "docker"),
core_adapter=str((node.metadata or {}).get("core_adapter") or "nanobot"),
)
)
node_self = edge_node_self_with_native_preflight(client=client, node=node)
latency_ms = max(1, int((time.perf_counter() - started) * 1000))
return ManagedNodeConnectivityResult(
ok=True,
status="online",
latency_ms=latency_ms,
detail="dashboard-edge reachable",
node_self=node_self,
)
except Exception as exc:
latency_ms = max(1, int((time.perf_counter() - started) * 1000))
return ManagedNodeConnectivityResult(
ok=False,
status="offline",
latency_ms=latency_ms,
detail=summarize_edge_exception(exc),
node_self=None,
)
def test_edge_native_preflight(
resolve_edge_client,
node: ManagedNode,
*,
native_command: Optional[str] = None,
native_workdir: Optional[str] = None,
) -> ManagedNodeNativePreflightResult:
started = time.perf_counter()
command_hint = split_native_command(native_command)
workdir_hint = str(native_workdir or "").strip()
try:
client = resolve_edge_client(
ProviderTarget(
node_id=node.node_id,
transport_kind="edge",
runtime_kind=str((node.metadata or {}).get("runtime_kind") or "docker"),
core_adapter=str((node.metadata or {}).get("core_adapter") or "nanobot"),
)
)
node_self = dict(client.heartbeat_node() or {})
preflight = dict(
client.preflight_native(
native_command=native_command,
native_workdir=native_workdir,
) or {}
)
latency_ms = max(1, int((time.perf_counter() - started) * 1000))
command = [str(item or "").strip() for item in list(preflight.get("command") or []) if str(item or "").strip()]
workdir = str(preflight.get("workdir") or "")
detail = str(preflight.get("detail") or "")
if not detail:
detail = "native launcher ready" if bool(preflight.get("ok")) else "native launcher not ready"
return ManagedNodeNativePreflightResult(
ok=bool(preflight.get("ok")),
status="online",
latency_ms=latency_ms,
detail=detail,
command=command,
workdir=workdir,
command_available=bool(preflight.get("command_available")),
workdir_exists=bool(preflight.get("workdir_exists")),
runtime_native_supported=runtime_native_supported(node_self),
node_self=node_self,
)
except Exception as exc:
latency_ms = max(1, int((time.perf_counter() - started) * 1000))
return ManagedNodeNativePreflightResult(
ok=False,
status="offline",
latency_ms=latency_ms,
detail=summarize_edge_exception(exc),
command=command_hint,
workdir=workdir_hint,
command_available=False,
workdir_exists=False if workdir_hint else True,
runtime_native_supported=False,
node_self=None,
)

View File

@ -0,0 +1,11 @@
from fastapi import APIRouter
from api.platform_node_catalog_router import router as platform_node_catalog_router
from api.platform_node_probe_router import router as platform_node_probe_router
from api.platform_node_resource_router import router as platform_node_resource_router
router = APIRouter()
router.include_router(platform_node_catalog_router)
router.include_router(platform_node_probe_router)
router.include_router(platform_node_resource_router)

View File

@ -0,0 +1,79 @@
from typing import Optional
from fastapi import APIRouter, Depends, Request
from sqlmodel import Session
from api.platform_shared import (
apply_platform_runtime_changes,
cached_platform_overview_payload,
invalidate_platform_nodes_cache,
invalidate_platform_overview_cache,
store_platform_overview_payload,
)
from core.database import get_session
from providers.selector import get_runtime_provider
from services.platform_activity_service import list_activity_events
from services.platform_analytics_service import build_dashboard_analytics
from services.platform_overview_service import build_platform_overview
from services.platform_usage_service import list_usage
router = APIRouter()
@router.get("/api/platform/overview")
def get_platform_overview(request: Request, session: Session = Depends(get_session)):
cached_payload = cached_platform_overview_payload()
if cached_payload is not None:
return cached_payload
def _read_runtime(bot):
provider = get_runtime_provider(request.app.state, bot)
status = str(provider.get_runtime_status(bot_id=str(bot.id or "")) or "STOPPED").upper()
runtime = dict(provider.get_resource_snapshot(bot_id=str(bot.id or "")) or {})
runtime.setdefault("docker_status", status)
return status, runtime
payload = build_platform_overview(session, read_runtime=_read_runtime)
return store_platform_overview_payload(payload)
@router.post("/api/platform/cache/clear")
def clear_platform_cache():
invalidate_platform_overview_cache()
invalidate_platform_nodes_cache()
return {"status": "cleared"}
@router.post("/api/platform/reload")
def reload_platform_runtime(request: Request):
apply_platform_runtime_changes(request)
return {"status": "reloaded"}
@router.get("/api/platform/usage")
def get_platform_usage(
bot_id: Optional[str] = None,
limit: int = 100,
offset: int = 0,
session: Session = Depends(get_session),
):
return list_usage(session, bot_id=bot_id, limit=limit, offset=offset)
@router.get("/api/platform/dashboard-analytics")
def get_platform_dashboard_analytics(
since_days: int = 7,
events_limit: int = 20,
session: Session = Depends(get_session),
):
return build_dashboard_analytics(session, since_days=since_days, events_limit=events_limit)
@router.get("/api/platform/events")
def get_platform_events(
bot_id: Optional[str] = None,
limit: int = 100,
offset: int = 0,
session: Session = Depends(get_session),
):
return list_activity_events(session, bot_id=bot_id, limit=limit, offset=offset)

View File

@ -1,107 +1,9 @@
from typing import Optional from fastapi import APIRouter
from fastapi import APIRouter, Depends, HTTPException, Request from api.platform_admin_router import router as platform_admin_router
from sqlmodel import Session from api.platform_nodes_router import router as platform_nodes_router
from core.cache import cache
from core.database import get_session
from schemas.platform import PlatformSettingsPayload, SystemSettingPayload
from services.platform_service import (
build_platform_overview,
create_or_update_system_setting,
delete_system_setting,
get_platform_settings,
list_system_settings,
list_activity_events,
list_usage,
save_platform_settings,
)
router = APIRouter() router = APIRouter()
router.include_router(platform_admin_router)
router.include_router(platform_nodes_router)
def _apply_platform_runtime_changes(request: Request) -> None:
cache.delete_prefix("")
speech_service = getattr(request.app.state, "speech_service", None)
if speech_service is not None and hasattr(speech_service, "reset_runtime"):
speech_service.reset_runtime()
@router.get("/api/platform/overview")
def get_platform_overview(request: Request, session: Session = Depends(get_session)):
docker_manager = getattr(request.app.state, "docker_manager", None)
return build_platform_overview(session, docker_manager)
@router.get("/api/platform/settings")
def get_platform_settings_api(session: Session = Depends(get_session)):
return get_platform_settings(session).model_dump()
@router.put("/api/platform/settings")
def update_platform_settings_api(payload: PlatformSettingsPayload, request: Request, session: Session = Depends(get_session)):
result = save_platform_settings(session, payload).model_dump()
_apply_platform_runtime_changes(request)
return result
@router.post("/api/platform/cache/clear")
def clear_platform_cache():
cache.delete_prefix("")
return {"status": "cleared"}
@router.post("/api/platform/reload")
def reload_platform_runtime(request: Request):
_apply_platform_runtime_changes(request)
return {"status": "reloaded"}
@router.get("/api/platform/usage")
def get_platform_usage(
bot_id: Optional[str] = None,
limit: int = 100,
offset: int = 0,
session: Session = Depends(get_session),
):
return list_usage(session, bot_id=bot_id, limit=limit, offset=offset)
@router.get("/api/platform/events")
def get_platform_events(bot_id: Optional[str] = None, limit: int = 100, session: Session = Depends(get_session)):
return {"items": list_activity_events(session, bot_id=bot_id, limit=limit)}
@router.get("/api/platform/system-settings")
def get_system_settings(search: str = "", session: Session = Depends(get_session)):
return {"items": list_system_settings(session, search=search)}
@router.post("/api/platform/system-settings")
def create_system_setting(payload: SystemSettingPayload, request: Request, session: Session = Depends(get_session)):
try:
result = create_or_update_system_setting(session, payload)
_apply_platform_runtime_changes(request)
return result
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
@router.put("/api/platform/system-settings/{key}")
def update_system_setting(key: str, payload: SystemSettingPayload, request: Request, session: Session = Depends(get_session)):
try:
result = create_or_update_system_setting(session, payload.model_copy(update={"key": key}))
_apply_platform_runtime_changes(request)
return result
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
@router.delete("/api/platform/system-settings/{key}")
def remove_system_setting(key: str, request: Request, session: Session = Depends(get_session)):
try:
delete_system_setting(session, key)
_apply_platform_runtime_changes(request)
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
return {"status": "deleted", "key": key}

View File

@ -0,0 +1,71 @@
from fastapi import APIRouter, Depends, HTTPException, Request
from sqlmodel import Session
from api.platform_shared import apply_platform_runtime_changes
from core.database import get_session
from schemas.platform import PlatformSettingsPayload, SystemSettingPayload
from services.platform_settings_service import (
create_or_update_system_setting,
delete_system_setting,
get_platform_settings,
list_system_settings,
save_platform_settings,
)
router = APIRouter()
@router.get("/api/platform/settings")
def get_platform_settings_api(session: Session = Depends(get_session)):
return get_platform_settings(session).model_dump()
@router.put("/api/platform/settings")
def update_platform_settings_api(
payload: PlatformSettingsPayload,
request: Request,
session: Session = Depends(get_session),
):
result = save_platform_settings(session, payload).model_dump()
apply_platform_runtime_changes(request)
return result
@router.get("/api/platform/system-settings")
def get_system_settings(search: str = "", session: Session = Depends(get_session)):
return {"items": list_system_settings(session, search=search)}
@router.post("/api/platform/system-settings")
def create_system_setting(payload: SystemSettingPayload, request: Request, session: Session = Depends(get_session)):
try:
result = create_or_update_system_setting(session, payload)
apply_platform_runtime_changes(request)
return result
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
@router.put("/api/platform/system-settings/{key}")
def update_system_setting(
key: str,
payload: SystemSettingPayload,
request: Request,
session: Session = Depends(get_session),
):
try:
result = create_or_update_system_setting(session, payload.model_copy(update={"key": key}))
apply_platform_runtime_changes(request)
return result
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
@router.delete("/api/platform/system-settings/{key}")
def remove_system_setting(key: str, request: Request, session: Session = Depends(get_session)):
try:
delete_system_setting(session, key)
apply_platform_runtime_changes(request)
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
return {"status": "deleted", "key": key}

View File

@ -0,0 +1,54 @@
import logging
from typing import Any, Dict, Optional
from fastapi import Request
from core.cache import cache
logger = logging.getLogger(__name__)
PLATFORM_OVERVIEW_CACHE_KEY = "platform:overview"
PLATFORM_OVERVIEW_CACHE_TTL_SECONDS = 15
PLATFORM_NODES_CACHE_KEY = "platform:nodes:list"
PLATFORM_NODES_CACHE_TTL_SECONDS = 20
def cached_platform_overview_payload() -> Optional[Dict[str, Any]]:
cached = cache.get_json(PLATFORM_OVERVIEW_CACHE_KEY)
return cached if isinstance(cached, dict) else None
def store_platform_overview_payload(payload: Dict[str, Any]) -> Dict[str, Any]:
cache.set_json(PLATFORM_OVERVIEW_CACHE_KEY, payload, ttl=PLATFORM_OVERVIEW_CACHE_TTL_SECONDS)
return payload
def invalidate_platform_overview_cache() -> None:
cache.delete(PLATFORM_OVERVIEW_CACHE_KEY)
def cached_platform_nodes_payload() -> Optional[Dict[str, Any]]:
cached = cache.get_json(PLATFORM_NODES_CACHE_KEY)
if not isinstance(cached, dict):
return None
items = cached.get("items")
if not isinstance(items, list):
return None
return {"items": items}
def store_platform_nodes_payload(items: list[Dict[str, Any]]) -> Dict[str, Any]:
payload = {"items": items}
cache.set_json(PLATFORM_NODES_CACHE_KEY, payload, ttl=PLATFORM_NODES_CACHE_TTL_SECONDS)
return payload
def invalidate_platform_nodes_cache() -> None:
cache.delete(PLATFORM_NODES_CACHE_KEY)
def apply_platform_runtime_changes(request: Request) -> None:
invalidate_platform_overview_cache()
invalidate_platform_nodes_cache()
speech_service = getattr(request.app.state, "speech_service", None)
if speech_service is not None and hasattr(speech_service, "reset_runtime"):
speech_service.reset_runtime()

View File

@ -0,0 +1,230 @@
from fastapi import APIRouter, Depends, HTTPException, Request
from sqlmodel import Session, select
from core.database import get_session
from models.sys_auth import SysUser
from schemas.sys_auth import (
SysAuthBootstrapResponse,
SysAuthLoginRequest,
SysProfileUpdateRequest,
SysAuthStatusResponse,
SysRoleGrantBootstrapResponse,
SysRoleListResponse,
SysRoleSummaryResponse,
SysRoleUpsertRequest,
SysUserCreateRequest,
SysUserListResponse,
SysUserSummaryResponse,
SysUserUpdateRequest,
)
from services.sys_auth_service import (
DEFAULT_ADMIN_USERNAME,
authenticate_user,
build_user_bootstrap,
create_sys_role,
create_sys_user,
delete_sys_role,
delete_sys_user,
issue_user_token,
list_role_grant_bootstrap,
list_sys_roles,
list_sys_users,
resolve_user_by_token,
revoke_user_token,
update_sys_role,
update_sys_user,
update_current_sys_user_profile,
)
router = APIRouter()
def _extract_auth_token(request: Request) -> str:
authorization = str(request.headers.get("authorization") or "").strip()
if authorization.lower().startswith("bearer "):
return authorization[7:].strip()
return str(request.headers.get("x-auth-token") or request.query_params.get("auth_token") or "").strip()
def _require_current_user(request: Request, session: Session) -> SysUser:
state_user_id = getattr(request.state, "sys_user_id", None)
if state_user_id:
user = session.get(SysUser, state_user_id)
if user is not None and bool(user.is_active):
return user
token = _extract_auth_token(request)
user = resolve_user_by_token(session, token)
if user is None:
raise HTTPException(status_code=401, detail="Authentication required")
return user
@router.get("/api/sys/auth/status", response_model=SysAuthStatusResponse)
def get_sys_auth_status(session: Session = Depends(get_session)):
user_count = len(session.exec(select(SysUser)).all())
return SysAuthStatusResponse(
enabled=True,
user_count=user_count,
default_username=DEFAULT_ADMIN_USERNAME,
)
@router.post("/api/sys/auth/login", response_model=SysAuthBootstrapResponse)
def login_sys_user(payload: SysAuthLoginRequest, session: Session = Depends(get_session)):
username = str(payload.username or "").strip().lower()
password = str(payload.password or "")
user = authenticate_user(session, username, password)
if user is None:
raise HTTPException(status_code=401, detail="Invalid username or password")
try:
token, expires_at = issue_user_token(session, user)
except RuntimeError as exc:
raise HTTPException(status_code=503, detail=str(exc)) from exc
return SysAuthBootstrapResponse.model_validate(build_user_bootstrap(session, user, token=token, expires_at=expires_at))
@router.post("/api/sys/auth/logout")
def logout_sys_user(request: Request, session: Session = Depends(get_session)):
token = _extract_auth_token(request)
user = resolve_user_by_token(session, token)
if user is not None:
revoke_user_token(token)
return {"success": True}
@router.get("/api/sys/auth/me", response_model=SysAuthBootstrapResponse)
def get_current_sys_user(request: Request, session: Session = Depends(get_session)):
user = _require_current_user(request, session)
return SysAuthBootstrapResponse.model_validate(build_user_bootstrap(session, user))
@router.put("/api/sys/auth/me", response_model=SysAuthBootstrapResponse)
def update_current_sys_user(
payload: SysProfileUpdateRequest,
request: Request,
session: Session = Depends(get_session),
):
current_user = _require_current_user(request, session)
try:
user = update_current_sys_user_profile(
session,
user_id=int(current_user.id or 0),
display_name=payload.display_name,
password=payload.password,
)
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
return SysAuthBootstrapResponse.model_validate(build_user_bootstrap(session, user))
@router.get("/api/sys/users", response_model=SysUserListResponse)
def list_sys_users_api(request: Request, session: Session = Depends(get_session)):
_require_current_user(request, session)
return SysUserListResponse(items=[SysUserSummaryResponse.model_validate(item) for item in list_sys_users(session)])
@router.post("/api/sys/users", response_model=SysUserSummaryResponse)
def create_sys_user_api(payload: SysUserCreateRequest, request: Request, session: Session = Depends(get_session)):
_require_current_user(request, session)
try:
item = create_sys_user(
session,
username=payload.username,
display_name=payload.display_name,
password=payload.password,
role_id=int(payload.role_id),
is_active=bool(payload.is_active),
bot_ids=list(payload.bot_ids or []),
)
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
return SysUserSummaryResponse.model_validate(item)
@router.put("/api/sys/users/{user_id}", response_model=SysUserSummaryResponse)
def update_sys_user_api(user_id: int, payload: SysUserUpdateRequest, request: Request, session: Session = Depends(get_session)):
current_user = _require_current_user(request, session)
try:
item = update_sys_user(
session,
user_id=int(user_id),
display_name=payload.display_name,
password=payload.password,
role_id=int(payload.role_id),
is_active=bool(payload.is_active),
bot_ids=list(payload.bot_ids or []),
acting_user_id=int(current_user.id or 0),
)
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
return SysUserSummaryResponse.model_validate(item)
@router.delete("/api/sys/users/{user_id}")
def delete_sys_user_api(user_id: int, request: Request, session: Session = Depends(get_session)):
current_user = _require_current_user(request, session)
try:
delete_sys_user(session, user_id=int(user_id), acting_user_id=int(current_user.id or 0))
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
return {"success": True}
@router.get("/api/sys/roles", response_model=SysRoleListResponse)
def list_sys_roles_api(request: Request, session: Session = Depends(get_session)):
_require_current_user(request, session)
return SysRoleListResponse(items=[SysRoleSummaryResponse.model_validate(item) for item in list_sys_roles(session)])
@router.get("/api/sys/roles/grants/bootstrap", response_model=SysRoleGrantBootstrapResponse)
def list_sys_role_grants_bootstrap_api(request: Request, session: Session = Depends(get_session)):
_require_current_user(request, session)
return SysRoleGrantBootstrapResponse.model_validate(list_role_grant_bootstrap(session))
@router.post("/api/sys/roles", response_model=SysRoleSummaryResponse)
def create_sys_role_api(payload: SysRoleUpsertRequest, request: Request, session: Session = Depends(get_session)):
_require_current_user(request, session)
try:
item = create_sys_role(
session,
role_key=payload.role_key,
name=payload.name,
description=payload.description,
is_active=bool(payload.is_active),
sort_order=int(payload.sort_order),
menu_keys=list(payload.menu_keys or []),
permission_keys=list(payload.permission_keys or []),
)
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
return SysRoleSummaryResponse.model_validate(item)
@router.put("/api/sys/roles/{role_id}", response_model=SysRoleSummaryResponse)
def update_sys_role_api(role_id: int, payload: SysRoleUpsertRequest, request: Request, session: Session = Depends(get_session)):
_require_current_user(request, session)
try:
item = update_sys_role(
session,
role_id=int(role_id),
name=payload.name,
description=payload.description,
is_active=bool(payload.is_active),
sort_order=int(payload.sort_order),
menu_keys=list(payload.menu_keys or []),
permission_keys=list(payload.permission_keys or []),
)
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
return SysRoleSummaryResponse.model_validate(item)
@router.delete("/api/sys/roles/{role_id}")
def delete_sys_role_api(role_id: int, request: Request, session: Session = Depends(get_session)):
_require_current_user(request, session)
try:
delete_sys_role(session, role_id=int(role_id))
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
return {"success": True}

View File

@ -0,0 +1,29 @@
from fastapi import APIRouter
from schemas.dashboard import SystemTemplatesUpdateRequest
def build_system_runtime_router(*, system_service) -> APIRouter:
router = APIRouter()
@router.get("/api/system/defaults")
def get_system_defaults():
return system_service.get_system_defaults()
@router.get("/api/system/templates")
def get_system_templates():
return system_service.get_system_templates()
@router.put("/api/system/templates")
def update_system_templates(payload: SystemTemplatesUpdateRequest):
return system_service.update_system_templates(payload=payload)
@router.get("/api/health")
def get_health():
return system_service.get_health()
@router.get("/api/health/cache")
def get_cache_health():
return system_service.get_cache_health()
return router

View File

@ -0,0 +1,85 @@
import logging
import os
import re
from typing import Any
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from api.platform_router import router as platform_router
from api.sys_router import router as sys_router
from api.system_runtime_router import build_system_runtime_router
from api.topic_router import router as topic_router
from bootstrap.app_runtime import assemble_app_runtime
from core.config_manager import BotConfigManager
from core.docker_manager import BotDockerManager
from core.settings import BOTS_WORKSPACE_ROOT, DATA_ROOT
from core.speech_service import WhisperSpeechService
app = FastAPI(title="Dashboard Nanobot API")
logger = logging.getLogger("dashboard.backend")
LAST_ACTION_MAX_LENGTH = 16000
def _normalize_last_action_text(value: Any) -> str:
text = str(value or "").replace("\r\n", "\n").replace("\r", "\n").strip()
if not text:
return ""
text = re.sub(r"\n{4,}", "\n\n\n", text)
return text[:LAST_ACTION_MAX_LENGTH]
def _apply_log_noise_guard() -> None:
for name in (
"httpx",
"httpcore",
"uvicorn.access",
"watchfiles.main",
"watchfiles.watcher",
):
logging.getLogger(name).setLevel(logging.WARNING)
_apply_log_noise_guard()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(topic_router)
app.include_router(platform_router)
app.include_router(sys_router)
os.makedirs(BOTS_WORKSPACE_ROOT, exist_ok=True)
os.makedirs(DATA_ROOT, exist_ok=True)
docker_manager = BotDockerManager(host_data_root=BOTS_WORKSPACE_ROOT)
config_manager = BotConfigManager(host_data_root=BOTS_WORKSPACE_ROOT)
speech_service = WhisperSpeechService()
app.state.docker_manager = docker_manager
app.state.speech_service = speech_service
BOT_ID_PATTERN = re.compile(r"^[A-Za-z0-9_]+$")
runtime_assembly = assemble_app_runtime(
app=app,
logger=logger,
bots_workspace_root=BOTS_WORKSPACE_ROOT,
data_root=DATA_ROOT,
docker_manager=docker_manager,
config_manager=config_manager,
speech_service=speech_service,
bot_id_pattern=BOT_ID_PATTERN,
)
app.include_router(build_system_runtime_router(system_service=runtime_assembly.system_service))
@app.middleware("http")
async def bot_access_password_guard(request: Request, call_next):
return await runtime_assembly.dashboard_auth_service.guard(request, call_next)
@app.on_event("startup")
async def on_startup():
await runtime_assembly.app_lifecycle_service.on_startup()

View File

@ -0,0 +1,482 @@
from dataclasses import dataclass
from typing import Any, Dict
from clients.edge.errors import is_expected_edge_offline_error, log_edge_failure, summarize_edge_exception
from core.cache import cache
from core.database import engine, init_database
from core.settings import (
AGENT_MD_TEMPLATES_FILE,
DATABASE_ECHO,
DATABASE_ENGINE,
DATABASE_URL_DISPLAY,
DEFAULT_AGENTS_MD,
DEFAULT_BOT_SYSTEM_TIMEZONE,
DEFAULT_IDENTITY_MD,
DEFAULT_SOUL_MD,
DEFAULT_TOOLS_MD,
DEFAULT_USER_MD,
PROJECT_ROOT,
REDIS_ENABLED,
REDIS_PREFIX,
REDIS_URL,
TOPIC_PRESET_TEMPLATES,
TOPIC_PRESETS_TEMPLATES_FILE,
load_agent_md_templates,
load_topic_presets_template,
)
from providers.provision.edge import EdgeProvisionProvider
from providers.provision.local import LocalProvisionProvider
from providers.registry import ProviderRegistry
from providers.runtime.edge import EdgeRuntimeProvider
from providers.runtime.local import LocalRuntimeProvider
from providers.selector import get_provision_provider, get_runtime_provider
from providers.target import ProviderTarget, normalize_provider_target, provider_target_from_config, provider_target_to_dict
from providers.workspace.edge import EdgeWorkspaceProvider
from providers.workspace.local import LocalWorkspaceProvider
from services.app_lifecycle_service import AppLifecycleService
from services.bot_channel_service import BotChannelService
from services.bot_command_service import BotCommandService
from services.bot_config_state_service import BotConfigStateService
from services.bot_infra_service import BotInfraService
from services.bot_lifecycle_service import BotLifecycleService
from services.bot_message_service import BotMessageService
from services.bot_query_service import BotQueryService
from services.bot_runtime_snapshot_service import BotRuntimeSnapshotService
from services.dashboard_auth_service import DashboardAuthService
from services.image_service import ImageService
from services.node_registry_service import NodeRegistryService
from services.platform_activity_service import (
prune_expired_activity_events,
record_activity_event,
)
from services.platform_settings_service import (
get_chat_pull_page_size,
get_platform_settings_snapshot,
get_speech_runtime_settings,
)
from services.platform_usage_service import (
bind_usage_message,
create_usage_request,
fail_latest_usage,
finalize_usage_from_packet,
)
from services.provider_test_service import ProviderTestService
from services.runtime_event_service import RuntimeEventService
from services.runtime_service import RuntimeService
from services.skill_service import SkillService
from services.system_service import SystemService
from services.topic_runtime import publish_runtime_topic_packet
from services.workspace_service import WorkspaceService
from bootstrap.app_runtime_support import (
attach_runtime_services,
build_image_runtime_service,
build_speech_transcription_runtime_service,
build_system_runtime_service,
include_dashboard_api,
reconcile_image_registry,
register_provider_runtime,
)
@dataclass
class AppRuntimeAssembly:
dashboard_auth_service: DashboardAuthService
system_service: SystemService
app_lifecycle_service: AppLifecycleService
def assemble_app_runtime(
*,
app: Any,
logger: Any,
bots_workspace_root: str,
data_root: str,
docker_manager: Any,
config_manager: Any,
speech_service: Any,
bot_id_pattern: Any,
) -> AppRuntimeAssembly:
node_registry_service = NodeRegistryService()
skill_service = SkillService()
dashboard_auth_service = DashboardAuthService(engine=engine)
provider_registry = ProviderRegistry()
bot_infra_service = BotInfraService(
app=app,
engine=engine,
config_manager=config_manager,
node_registry_service=node_registry_service,
logger=logger,
bots_workspace_root=bots_workspace_root,
default_soul_md=DEFAULT_SOUL_MD,
default_agents_md=DEFAULT_AGENTS_MD,
default_user_md=DEFAULT_USER_MD,
default_tools_md=DEFAULT_TOOLS_MD,
default_identity_md=DEFAULT_IDENTITY_MD,
default_bot_system_timezone=DEFAULT_BOT_SYSTEM_TIMEZONE,
normalize_provider_target=normalize_provider_target,
provider_target_from_config=provider_target_from_config,
provider_target_to_dict=provider_target_to_dict,
resolve_provider_bundle_key=lambda target: provider_registry.resolve_bundle_key(target),
get_provision_provider=get_provision_provider,
read_env_store=lambda bot_id: bot_config_state_service.read_env_store(bot_id),
read_bot_runtime_snapshot=lambda bot: _read_bot_runtime_snapshot(bot),
normalize_media_list=lambda raw, bot_id: _normalize_media_list(raw, bot_id),
)
node_registry_service.register_node(bot_infra_service.local_managed_node())
app.state.node_registry_service = node_registry_service
_read_bot_config = bot_infra_service.read_bot_config
_write_bot_config = bot_infra_service.write_bot_config
_default_provider_target = bot_infra_service.default_provider_target
_read_bot_provider_target = bot_infra_service.read_bot_provider_target
_resolve_bot_provider_target_for_instance = bot_infra_service.resolve_bot_provider_target_for_instance
_clear_provider_target_override = bot_infra_service.clear_provider_target_override
_apply_provider_target_to_bot = bot_infra_service.apply_provider_target_to_bot
_local_managed_node = bot_infra_service.local_managed_node
_provider_target_from_node = bot_infra_service.provider_target_from_node
_node_display_name = bot_infra_service.node_display_name
_node_metadata = bot_infra_service.node_metadata
_serialize_provider_target_summary = bot_infra_service.serialize_provider_target_summary
_resolve_edge_client = bot_infra_service.resolve_edge_client
_resolve_edge_state_context = bot_infra_service.resolve_edge_state_context
_read_edge_state_data = bot_infra_service.read_edge_state_data
_write_edge_state_data = bot_infra_service.write_edge_state_data
_read_bot_resources = bot_infra_service.read_bot_resources
_migrate_bot_resources_store = bot_infra_service.migrate_bot_resources_store
_normalize_channel_extra = bot_infra_service.normalize_channel_extra
_read_global_delivery_flags = bot_infra_service.read_global_delivery_flags
_channel_api_to_cfg = bot_infra_service.channel_api_to_cfg
_get_bot_channels_from_config = bot_infra_service.get_bot_channels_from_config
_normalize_initial_channels = bot_infra_service.normalize_initial_channels
_parse_message_media = bot_infra_service.parse_message_media
_normalize_env_params = bot_infra_service.normalize_env_params
_get_default_system_timezone = bot_infra_service.get_default_system_timezone
_normalize_system_timezone = bot_infra_service.normalize_system_timezone
_resolve_bot_env_params = bot_infra_service.resolve_bot_env_params
_safe_float = bot_infra_service.safe_float
_safe_int = bot_infra_service.safe_int
_normalize_resource_limits = bot_infra_service.normalize_resource_limits
_sync_workspace_channels = bot_infra_service.sync_workspace_channels
_set_bot_provider_target = bot_infra_service.set_bot_provider_target
_sync_bot_workspace_via_provider = bot_infra_service.sync_bot_workspace_via_provider
_workspace_root = bot_infra_service.workspace_root
_cron_store_path = bot_infra_service.cron_store_path
_env_store_path = bot_infra_service.env_store_path
_clear_bot_sessions = bot_infra_service.clear_bot_sessions
_clear_bot_dashboard_direct_session = bot_infra_service.clear_bot_dashboard_direct_session
_ensure_provider_target_supported = bot_infra_service.ensure_provider_target_supported
_resolve_workspace_path = bot_infra_service.resolve_workspace_path
_calc_dir_size_bytes = bot_infra_service.calc_dir_size_bytes
_is_video_attachment_path = bot_infra_service.is_video_attachment_path
_is_visual_attachment_path = bot_infra_service.is_visual_attachment_path
bot_config_state_service = BotConfigStateService(
read_edge_state_data=_read_edge_state_data,
write_edge_state_data=_write_edge_state_data,
read_bot_config=_read_bot_config,
write_bot_config=_write_bot_config,
invalidate_bot_detail_cache=lambda *args, **kwargs: _invalidate_bot_detail_cache(*args, **kwargs),
env_store_path=_env_store_path,
cron_store_path=_cron_store_path,
normalize_env_params=_normalize_env_params,
)
def _write_env_store(bot_id: str, env_params: Dict[str, str]) -> None:
bot_config_state_service.write_env_store(bot_id, env_params)
local_provision_provider = LocalProvisionProvider(sync_workspace_func=_sync_workspace_channels)
local_runtime_provider = LocalRuntimeProvider(
docker_manager=docker_manager,
on_state_change=lambda *args, **kwargs: docker_callback(*args, **kwargs),
provision_provider=local_provision_provider,
read_runtime_snapshot=lambda *args, **kwargs: _read_bot_runtime_snapshot(*args, **kwargs),
resolve_env_params=_resolve_bot_env_params,
write_env_store=_write_env_store,
invalidate_bot_cache=lambda *args, **kwargs: _invalidate_bot_detail_cache(*args, **kwargs),
record_agent_loop_ready_warning=lambda *args, **kwargs: _record_agent_loop_ready_warning(*args, **kwargs),
safe_float=_safe_float,
safe_int=_safe_int,
)
local_workspace_provider = LocalWorkspaceProvider()
edge_provision_provider = EdgeProvisionProvider(
read_provider_target=_read_bot_provider_target,
resolve_edge_client=_resolve_edge_client,
read_runtime_snapshot=lambda *args, **kwargs: _read_bot_runtime_snapshot(*args, **kwargs),
read_bot_channels=_get_bot_channels_from_config,
read_node_metadata=_node_metadata,
)
edge_runtime_provider = EdgeRuntimeProvider(
read_provider_target=_read_bot_provider_target,
resolve_edge_client=_resolve_edge_client,
read_runtime_snapshot=lambda *args, **kwargs: _read_bot_runtime_snapshot(*args, **kwargs),
resolve_env_params=_resolve_bot_env_params,
read_bot_channels=_get_bot_channels_from_config,
read_node_metadata=_node_metadata,
)
edge_workspace_provider = EdgeWorkspaceProvider(
read_provider_target=_read_bot_provider_target,
resolve_edge_client=_resolve_edge_client,
read_node_metadata=_node_metadata,
)
local_provider_target = ProviderTarget(
node_id="local",
transport_kind="edge",
runtime_kind="docker",
core_adapter="nanobot",
)
register_provider_runtime(
app=app,
provider_registry=provider_registry,
local_provider_target=local_provider_target,
local_provision_provider=local_provision_provider,
local_runtime_provider=local_runtime_provider,
local_workspace_provider=local_workspace_provider,
edge_provision_provider=edge_provision_provider,
edge_runtime_provider=edge_runtime_provider,
edge_workspace_provider=edge_workspace_provider,
resolve_bot_provider_target_for_instance=_resolve_bot_provider_target_for_instance,
resolve_edge_client=_resolve_edge_client,
)
bot_runtime_snapshot_service = BotRuntimeSnapshotService(
engine=engine,
logger=logger,
docker_manager=docker_manager,
default_soul_md=DEFAULT_SOUL_MD,
default_agents_md=DEFAULT_AGENTS_MD,
default_user_md=DEFAULT_USER_MD,
default_tools_md=DEFAULT_TOOLS_MD,
default_identity_md=DEFAULT_IDENTITY_MD,
workspace_root=_workspace_root,
resolve_edge_state_context=_resolve_edge_state_context,
read_bot_config=_read_bot_config,
resolve_bot_env_params=_resolve_bot_env_params,
resolve_bot_provider_target_for_instance=_resolve_bot_provider_target_for_instance,
read_global_delivery_flags=_read_global_delivery_flags,
safe_float=_safe_float,
safe_int=_safe_int,
get_default_system_timezone=_get_default_system_timezone,
read_bot_resources=_read_bot_resources,
node_display_name=_node_display_name,
get_runtime_provider=get_runtime_provider,
invalidate_bot_detail_cache=lambda *args, **kwargs: _invalidate_bot_detail_cache(*args, **kwargs),
record_activity_event=record_activity_event,
)
_read_bot_runtime_snapshot = bot_runtime_snapshot_service.read_bot_runtime_snapshot
_serialize_bot = bot_runtime_snapshot_service.serialize_bot
_serialize_bot_list_item = bot_runtime_snapshot_service.serialize_bot_list_item
_refresh_bot_runtime_status = bot_runtime_snapshot_service.refresh_bot_runtime_status
_record_agent_loop_ready_warning = bot_runtime_snapshot_service.record_agent_loop_ready_warning
runtime_event_service = RuntimeEventService(
app=app,
engine=engine,
cache=cache,
logger=logger,
publish_runtime_topic_packet=publish_runtime_topic_packet,
bind_usage_message=bind_usage_message,
finalize_usage_from_packet=finalize_usage_from_packet,
workspace_root=_workspace_root,
parse_message_media=_parse_message_media,
)
_normalize_media_list = runtime_event_service.normalize_media_list
_persist_runtime_packet = runtime_event_service.persist_runtime_packet
_broadcast_runtime_packet = runtime_event_service.broadcast_runtime_packet
docker_callback = runtime_event_service.docker_callback
_cache_key_bots_list = runtime_event_service.cache_key_bots_list
_cache_key_bot_detail = runtime_event_service.cache_key_bot_detail
_cache_key_bot_messages = runtime_event_service.cache_key_bot_messages
_cache_key_bot_messages_page = runtime_event_service.cache_key_bot_messages_page
_serialize_bot_message_row = runtime_event_service.serialize_bot_message_row
_resolve_local_day_range = runtime_event_service.resolve_local_day_range
_cache_key_images = runtime_event_service.cache_key_images
_invalidate_bot_detail_cache = runtime_event_service.invalidate_bot_detail_cache
_invalidate_bot_messages_cache = runtime_event_service.invalidate_bot_messages_cache
_invalidate_images_cache = runtime_event_service.invalidate_images_cache
bot_command_service = BotCommandService(
read_runtime_snapshot=_read_bot_runtime_snapshot,
normalize_media_list=_normalize_media_list,
resolve_workspace_path=_resolve_workspace_path,
is_visual_attachment_path=_is_visual_attachment_path,
is_video_attachment_path=_is_video_attachment_path,
create_usage_request=create_usage_request,
record_activity_event=record_activity_event,
fail_latest_usage=fail_latest_usage,
persist_runtime_packet=_persist_runtime_packet,
get_main_loop=lambda app_state: getattr(app_state, "main_loop", None),
broadcast_packet=_broadcast_runtime_packet,
)
workspace_service = WorkspaceService()
runtime_service = RuntimeService(
command_service=bot_command_service,
resolve_runtime_provider=get_runtime_provider,
clear_bot_sessions=_clear_bot_sessions,
clear_dashboard_direct_session_file=_clear_bot_dashboard_direct_session,
invalidate_bot_detail_cache=_invalidate_bot_detail_cache,
invalidate_bot_messages_cache=_invalidate_bot_messages_cache,
record_activity_event=record_activity_event,
)
app_lifecycle_service = AppLifecycleService(
app=app,
engine=engine,
cache=cache,
logger=logger,
project_root=PROJECT_ROOT,
database_engine=DATABASE_ENGINE,
database_echo=DATABASE_ECHO,
database_url_display=DATABASE_URL_DISPLAY,
redis_enabled=REDIS_ENABLED,
init_database=init_database,
node_registry_service=node_registry_service,
local_managed_node=_local_managed_node,
prune_expired_activity_events=prune_expired_activity_events,
migrate_bot_resources_store=_migrate_bot_resources_store,
resolve_bot_provider_target_for_instance=_resolve_bot_provider_target_for_instance,
default_provider_target=_default_provider_target,
set_bot_provider_target=_set_bot_provider_target,
apply_provider_target_to_bot=_apply_provider_target_to_bot,
normalize_provider_target=normalize_provider_target,
runtime_service=runtime_service,
runtime_event_service=runtime_event_service,
clear_provider_target_overrides=bot_infra_service.clear_provider_target_overrides,
)
bot_query_service = BotQueryService(
cache=cache,
cache_key_bots_list=_cache_key_bots_list,
cache_key_bot_detail=_cache_key_bot_detail,
refresh_bot_runtime_status=_refresh_bot_runtime_status,
serialize_bot=_serialize_bot,
serialize_bot_list_item=_serialize_bot_list_item,
read_bot_resources=_read_bot_resources,
resolve_bot_provider_target=_resolve_bot_provider_target_for_instance,
get_runtime_provider=get_runtime_provider,
workspace_root=_workspace_root,
calc_dir_size_bytes=_calc_dir_size_bytes,
logger=logger,
)
bot_channel_service = BotChannelService(
read_bot_config=_read_bot_config,
write_bot_config=_write_bot_config,
sync_bot_workspace_via_provider=_sync_bot_workspace_via_provider,
invalidate_bot_detail_cache=_invalidate_bot_detail_cache,
get_bot_channels_from_config=_get_bot_channels_from_config,
normalize_channel_extra=_normalize_channel_extra,
channel_api_to_cfg=_channel_api_to_cfg,
read_global_delivery_flags=_read_global_delivery_flags,
)
bot_message_service = BotMessageService(
cache=cache,
cache_key_bot_messages=_cache_key_bot_messages,
cache_key_bot_messages_page=_cache_key_bot_messages_page,
serialize_bot_message_row=_serialize_bot_message_row,
resolve_local_day_range=_resolve_local_day_range,
invalidate_bot_messages_cache=_invalidate_bot_messages_cache,
get_chat_pull_page_size=get_chat_pull_page_size,
)
speech_transcription_service = build_speech_transcription_runtime_service(
data_root=data_root,
speech_service=speech_service,
get_speech_runtime_settings=get_speech_runtime_settings,
logger=logger,
)
image_service = build_image_runtime_service(
cache=cache,
cache_key_images=_cache_key_images,
invalidate_images_cache=_invalidate_images_cache,
docker_manager=docker_manager,
reconcile_image_registry_fn=lambda session: reconcile_image_registry(session, docker_manager=docker_manager),
)
provider_test_service = ProviderTestService()
system_service = build_system_runtime_service(
engine=engine,
cache=cache,
database_engine=DATABASE_ENGINE,
redis_enabled=REDIS_ENABLED,
redis_url=REDIS_URL,
redis_prefix=REDIS_PREFIX,
agent_md_templates_file=str(AGENT_MD_TEMPLATES_FILE),
topic_presets_templates_file=str(TOPIC_PRESETS_TEMPLATES_FILE),
default_soul_md=DEFAULT_SOUL_MD,
default_agents_md=DEFAULT_AGENTS_MD,
default_user_md=DEFAULT_USER_MD,
default_tools_md=DEFAULT_TOOLS_MD,
default_identity_md=DEFAULT_IDENTITY_MD,
topic_preset_templates=TOPIC_PRESET_TEMPLATES,
get_default_system_timezone=_get_default_system_timezone,
load_agent_md_templates=load_agent_md_templates,
load_topic_presets_template=load_topic_presets_template,
get_platform_settings_snapshot=get_platform_settings_snapshot,
get_speech_runtime_settings=get_speech_runtime_settings,
)
bot_lifecycle_service = BotLifecycleService(
bot_id_pattern=bot_id_pattern,
runtime_service=runtime_service,
refresh_bot_runtime_status=_refresh_bot_runtime_status,
resolve_bot_provider_target=_resolve_bot_provider_target_for_instance,
provider_target_from_node=_provider_target_from_node,
default_provider_target=_default_provider_target,
ensure_provider_target_supported=_ensure_provider_target_supported,
require_ready_image=image_service.require_ready_image,
sync_bot_workspace_via_provider=_sync_bot_workspace_via_provider,
apply_provider_target_to_bot=_apply_provider_target_to_bot,
serialize_provider_target_summary=_serialize_provider_target_summary,
serialize_bot=_serialize_bot,
node_display_name=_node_display_name,
invalidate_bot_detail_cache=_invalidate_bot_detail_cache,
record_activity_event=record_activity_event,
normalize_env_params=_normalize_env_params,
normalize_system_timezone=_normalize_system_timezone,
normalize_resource_limits=_normalize_resource_limits,
write_env_store=_write_env_store,
resolve_bot_env_params=_resolve_bot_env_params,
clear_provider_target_override=_clear_provider_target_override,
normalize_initial_channels=_normalize_initial_channels,
is_expected_edge_offline_error=is_expected_edge_offline_error,
summarize_edge_exception=summarize_edge_exception,
resolve_edge_client=_resolve_edge_client,
node_metadata=_node_metadata,
log_edge_failure=log_edge_failure,
invalidate_bot_messages_cache=_invalidate_bot_messages_cache,
logger=logger,
)
attach_runtime_services(
app=app,
bot_command_service=bot_command_service,
bot_lifecycle_service=bot_lifecycle_service,
app_lifecycle_service=app_lifecycle_service,
bot_query_service=bot_query_service,
bot_channel_service=bot_channel_service,
bot_message_service=bot_message_service,
bot_runtime_snapshot_service=bot_runtime_snapshot_service,
image_service=image_service,
provider_test_service=provider_test_service,
runtime_event_service=runtime_event_service,
speech_transcription_service=speech_transcription_service,
system_service=system_service,
workspace_service=workspace_service,
runtime_service=runtime_service,
)
include_dashboard_api(
app=app,
image_service=image_service,
provider_test_service=provider_test_service,
bot_lifecycle_service=bot_lifecycle_service,
bot_query_service=bot_query_service,
bot_channel_service=bot_channel_service,
skill_service=skill_service,
bot_config_state_service=bot_config_state_service,
runtime_service=runtime_service,
bot_message_service=bot_message_service,
workspace_service=workspace_service,
speech_transcription_service=speech_transcription_service,
app_lifecycle_service=app_lifecycle_service,
resolve_edge_state_context=_resolve_edge_state_context,
logger=logger,
)
return AppRuntimeAssembly(
dashboard_auth_service=dashboard_auth_service,
system_service=system_service,
app_lifecycle_service=app_lifecycle_service,
)

View File

@ -0,0 +1,231 @@
from typing import Any
from sqlmodel import Session, select
from api.dashboard_router import build_dashboard_router
from models.bot import NanobotImage
from services.image_service import ImageService
from services.speech_transcription_service import SpeechTranscriptionService
from services.system_service import SystemService
def reconcile_image_registry(session: Session, *, docker_manager: Any) -> None:
db_images = session.exec(select(NanobotImage)).all()
for image in db_images:
if docker_manager.has_image(image.tag):
try:
docker_image = docker_manager.client.images.get(image.tag) if docker_manager.client else None
image.image_id = docker_image.id if docker_image else image.image_id
except Exception:
pass
image.status = "READY"
else:
image.status = "UNKNOWN"
session.add(image)
session.commit()
def register_provider_runtime(
*,
app: Any,
provider_registry: Any,
local_provider_target: Any,
local_provision_provider: Any,
local_runtime_provider: Any,
local_workspace_provider: Any,
edge_provision_provider: Any,
edge_runtime_provider: Any,
edge_workspace_provider: Any,
resolve_bot_provider_target_for_instance: Any,
resolve_edge_client: Any,
) -> None:
provider_registry.register_bundle(
key=local_provider_target.key,
runtime_provider=local_runtime_provider,
workspace_provider=local_workspace_provider,
provision_provider=local_provision_provider,
)
provider_registry.register_bundle(
key=type(local_provider_target)(
node_id="local",
transport_kind="edge",
runtime_kind="docker",
core_adapter="nanobot",
).key,
runtime_provider=edge_runtime_provider,
workspace_provider=edge_workspace_provider,
provision_provider=edge_provision_provider,
)
provider_registry.register_bundle(
key=type(local_provider_target)(
node_id="local",
transport_kind="edge",
runtime_kind="native",
core_adapter="nanobot",
).key,
runtime_provider=edge_runtime_provider,
workspace_provider=edge_workspace_provider,
provision_provider=edge_provision_provider,
)
app.state.provider_default_node_id = local_provider_target.node_id
app.state.provider_default_transport_kind = local_provider_target.transport_kind
app.state.provider_default_runtime_kind = local_provider_target.runtime_kind
app.state.provider_default_core_adapter = local_provider_target.core_adapter
app.state.provider_registry = provider_registry
app.state.resolve_bot_provider_target = resolve_bot_provider_target_for_instance
app.state.resolve_edge_client = resolve_edge_client
app.state.edge_provision_provider = edge_provision_provider
app.state.edge_runtime_provider = edge_runtime_provider
app.state.edge_workspace_provider = edge_workspace_provider
app.state.provision_provider = local_provision_provider
app.state.runtime_provider = local_runtime_provider
app.state.workspace_provider = local_workspace_provider
def build_speech_transcription_runtime_service(
*,
data_root: str,
speech_service: Any,
get_speech_runtime_settings: Any,
logger: Any,
) -> SpeechTranscriptionService:
return SpeechTranscriptionService(
data_root=data_root,
speech_service=speech_service,
get_speech_runtime_settings=get_speech_runtime_settings,
logger=logger,
)
def build_image_runtime_service(
*,
cache: Any,
cache_key_images: Any,
invalidate_images_cache: Any,
docker_manager: Any,
reconcile_image_registry_fn: Any,
) -> ImageService:
return ImageService(
cache=cache,
cache_key_images=cache_key_images,
invalidate_images_cache=invalidate_images_cache,
reconcile_image_registry=reconcile_image_registry_fn,
docker_manager=docker_manager,
)
def build_system_runtime_service(
*,
engine: Any,
cache: Any,
database_engine: str,
redis_enabled: bool,
redis_url: str,
redis_prefix: str,
agent_md_templates_file: str,
topic_presets_templates_file: str,
default_soul_md: str,
default_agents_md: str,
default_user_md: str,
default_tools_md: str,
default_identity_md: str,
topic_preset_templates: Any,
get_default_system_timezone: Any,
load_agent_md_templates: Any,
load_topic_presets_template: Any,
get_platform_settings_snapshot: Any,
get_speech_runtime_settings: Any,
) -> SystemService:
return SystemService(
engine=engine,
cache=cache,
database_engine=database_engine,
redis_enabled=redis_enabled,
redis_url=redis_url,
redis_prefix=redis_prefix,
agent_md_templates_file=agent_md_templates_file,
topic_presets_templates_file=topic_presets_templates_file,
default_soul_md=default_soul_md,
default_agents_md=default_agents_md,
default_user_md=default_user_md,
default_tools_md=default_tools_md,
default_identity_md=default_identity_md,
topic_preset_templates=topic_preset_templates,
get_default_system_timezone=get_default_system_timezone,
load_agent_md_templates=load_agent_md_templates,
load_topic_presets_template=load_topic_presets_template,
get_platform_settings_snapshot=get_platform_settings_snapshot,
get_speech_runtime_settings=get_speech_runtime_settings,
)
def attach_runtime_services(
*,
app: Any,
bot_command_service: Any,
bot_lifecycle_service: Any,
app_lifecycle_service: Any,
bot_query_service: Any,
bot_channel_service: Any,
bot_message_service: Any,
bot_runtime_snapshot_service: Any,
image_service: Any,
provider_test_service: Any,
runtime_event_service: Any,
speech_transcription_service: Any,
system_service: Any,
workspace_service: Any,
runtime_service: Any,
) -> None:
app.state.bot_command_service = bot_command_service
app.state.bot_lifecycle_service = bot_lifecycle_service
app.state.app_lifecycle_service = app_lifecycle_service
app.state.bot_query_service = bot_query_service
app.state.bot_channel_service = bot_channel_service
app.state.bot_message_service = bot_message_service
app.state.bot_runtime_snapshot_service = bot_runtime_snapshot_service
app.state.image_service = image_service
app.state.provider_test_service = provider_test_service
app.state.runtime_event_service = runtime_event_service
app.state.speech_transcription_service = speech_transcription_service
app.state.system_service = system_service
app.state.workspace_service = workspace_service
app.state.runtime_service = runtime_service
def include_dashboard_api(
*,
app: Any,
image_service: Any,
provider_test_service: Any,
bot_lifecycle_service: Any,
bot_query_service: Any,
bot_channel_service: Any,
skill_service: Any,
bot_config_state_service: Any,
runtime_service: Any,
bot_message_service: Any,
workspace_service: Any,
speech_transcription_service: Any,
app_lifecycle_service: Any,
resolve_edge_state_context: Any,
logger: Any,
) -> None:
app.include_router(
build_dashboard_router(
image_service=image_service,
provider_test_service=provider_test_service,
bot_lifecycle_service=bot_lifecycle_service,
bot_query_service=bot_query_service,
bot_channel_service=bot_channel_service,
skill_service=skill_service,
bot_config_state_service=bot_config_state_service,
runtime_service=runtime_service,
bot_message_service=bot_message_service,
workspace_service=workspace_service,
speech_transcription_service=speech_transcription_service,
app_lifecycle_service=app_lifecycle_service,
resolve_edge_state_context=resolve_edge_state_context,
logger=logger,
)
)

View File

@ -0,0 +1 @@
# Client package for dashboard-edge integrations.

View File

@ -0,0 +1,168 @@
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional
from fastapi import Request, UploadFile
from fastapi.responses import Response
from models.bot import BotInstance
class EdgeClient(ABC):
@abstractmethod
async def start_bot(self, *, bot: BotInstance, start_payload: Dict[str, Any]) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def stop_bot(self, *, bot: BotInstance) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def deliver_command(self, *, bot_id: str, command: str, media: Optional[List[str]] = None) -> Optional[str]:
raise NotImplementedError
@abstractmethod
def get_recent_logs(self, *, bot_id: str, tail: int = 300) -> List[str]:
raise NotImplementedError
@abstractmethod
def ensure_monitor(self, *, bot_id: str) -> bool:
raise NotImplementedError
@abstractmethod
def get_monitor_packets(self, *, bot_id: str, after_seq: int = 0, limit: int = 200) -> List[Dict[str, Any]]:
raise NotImplementedError
@abstractmethod
def get_runtime_status(self, *, bot_id: str) -> str:
raise NotImplementedError
@abstractmethod
def get_resource_snapshot(self, *, bot_id: str) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def get_node_resources(self) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def get_node_self(self) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def preflight_native(self, *, native_command: Optional[str] = None, native_workdir: Optional[str] = None) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def read_state(
self,
*,
bot_id: str,
state_key: str,
workspace_root: Optional[str] = None,
) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def write_state(
self,
*,
bot_id: str,
state_key: str,
data: Dict[str, Any],
workspace_root: Optional[str] = None,
) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def sync_bot_workspace(
self,
*,
bot_id: str,
channels_override: Optional[List[Dict[str, Any]]] = None,
global_delivery_override: Optional[Dict[str, Any]] = None,
runtime_overrides: Optional[Dict[str, Any]] = None,
) -> None:
raise NotImplementedError
@abstractmethod
def purge_workspace(self, *, bot_id: str, workspace_root: Optional[str] = None) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def list_tree(
self,
*,
bot_id: str,
path: Optional[str] = None,
recursive: bool = False,
workspace_root: Optional[str] = None,
) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def read_file(
self,
*,
bot_id: str,
path: str,
max_bytes: int = 200000,
workspace_root: Optional[str] = None,
) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def write_markdown(
self,
*,
bot_id: str,
path: str,
content: str,
workspace_root: Optional[str] = None,
) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def write_text_file(
self,
*,
bot_id: str,
path: str,
content: str,
workspace_root: Optional[str] = None,
) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
async def upload_files(
self,
*,
bot_id: str,
files: List[UploadFile],
path: Optional[str] = None,
workspace_root: Optional[str] = None,
) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def delete_workspace_path(
self,
*,
bot_id: str,
path: str,
workspace_root: Optional[str] = None,
) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def serve_file(
self,
*,
bot_id: str,
path: str,
download: bool,
request: Request,
public: bool = False,
redirect_html_to_raw: bool = False,
workspace_root: Optional[str] = None,
) -> Response:
raise NotImplementedError

View File

@ -0,0 +1,84 @@
import logging
import threading
import time
from typing import Any
import httpx
from fastapi import HTTPException
_OFFLINE_LOG_LOCK = threading.Lock()
_OFFLINE_LOGGED_AT: dict[str, float] = {}
_DEFAULT_LOG_COOLDOWN_SECONDS = 60.0
def describe_edge_node(node: Any) -> str:
display_name = str(getattr(node, "display_name", "") or "").strip()
node_id = str(getattr(node, "node_id", "") or "").strip()
if display_name and node_id and display_name != node_id:
return f"{display_name} ({node_id})"
return display_name or node_id or "unknown edge node"
def summarize_edge_exception(exc: Exception) -> str:
detail = getattr(exc, "detail", None)
text = str(detail if detail is not None else exc).strip()
if not text:
return exc.__class__.__name__
return text[:400]
def edge_transport_http_exception(exc: httpx.RequestError, *, node: Any) -> HTTPException:
node_label = describe_edge_node(node)
if isinstance(exc, httpx.TimeoutException):
detail = f"dashboard-edge timed out for node {node_label}"
else:
reason = str(exc).strip() or exc.__class__.__name__
detail = f"dashboard-edge is unreachable for node {node_label}: {reason}"
return HTTPException(status_code=502, detail=detail[:400])
def is_expected_edge_offline_error(exc: Exception) -> bool:
if isinstance(exc, httpx.RequestError):
return True
if not isinstance(exc, HTTPException):
return False
if int(getattr(exc, "status_code", 0) or 0) not in {502, 503, 504}:
return False
detail = summarize_edge_exception(exc).lower()
markers = (
"dashboard-edge is unreachable",
"dashboard-edge timed out",
"connection refused",
"request failed before receiving a response",
"name or service not known",
"nodename nor servname provided",
"temporary failure in name resolution",
)
return any(marker in detail for marker in markers)
def log_edge_failure(
logger: logging.Logger,
*,
key: str,
exc: Exception,
message: str,
cooldown_seconds: float = _DEFAULT_LOG_COOLDOWN_SECONDS,
) -> None:
detail = summarize_edge_exception(exc)
if is_expected_edge_offline_error(exc):
if _should_emit_offline_log(key=key, cooldown_seconds=cooldown_seconds):
logger.info("%s detail=%s", message, detail)
return
logger.exception("%s detail=%s", message, detail)
def _should_emit_offline_log(*, key: str, cooldown_seconds: float) -> bool:
now = time.monotonic()
normalized_key = str(key or "edge-offline").strip() or "edge-offline"
with _OFFLINE_LOG_LOCK:
last_logged_at = _OFFLINE_LOGGED_AT.get(normalized_key, 0.0)
if now - last_logged_at < max(1.0, float(cooldown_seconds or _DEFAULT_LOG_COOLDOWN_SECONDS)):
return False
_OFFLINE_LOGGED_AT[normalized_key] = now
return True

View File

@ -0,0 +1,543 @@
import mimetypes
import os
from typing import Any, Callable, Dict, List, Optional
from urllib.parse import quote
import httpx
from fastapi import HTTPException, Request, UploadFile
from fastapi.responses import RedirectResponse, Response
from clients.edge.base import EdgeClient
from clients.edge.errors import edge_transport_http_exception
from models.bot import BotInstance
from schemas.edge import (
EdgeCommandRequest,
EdgeLogsResponse,
EdgeNativePreflightRequest,
EdgeNativePreflightResponse,
EdgeNodeHeartbeatResponse,
EdgeMonitorPacketsResponse,
EdgeMarkdownWriteRequest,
EdgeMonitorEnsureResponse,
EdgeNodeResourcesResponse,
EdgeNodeSelfResponse,
EdgeStateResponse,
EdgeStateWriteRequest,
EdgeStartBotRequest,
EdgeStatusResponse,
EdgeWorkspaceSyncRequest,
)
from services.node_registry_service import ManagedNode
EDGE_AUTH_HEADER = "x-dashboard-edge-token"
class HttpEdgeClient(EdgeClient):
def __init__(
self,
*,
node: ManagedNode,
http_client_factory: Optional[Callable[[], httpx.Client]] = None,
async_http_client_factory: Optional[Callable[[], httpx.AsyncClient]] = None,
) -> None:
self._node = node
self._http_client_factory = http_client_factory or (lambda: httpx.Client(timeout=15.0, trust_env=False))
self._async_http_client_factory = async_http_client_factory or (
lambda: httpx.AsyncClient(timeout=15.0, trust_env=False)
)
async def start_bot(self, *, bot: BotInstance, start_payload: Dict[str, Any]) -> Dict[str, Any]:
payload = await self._async_request_json(
"POST",
f"/api/edge/bots/{bot.id}/start",
json=EdgeStartBotRequest.model_validate(start_payload).model_dump(),
)
return EdgeStatusResponse.model_validate(payload).model_dump()
def stop_bot(self, *, bot: BotInstance) -> Dict[str, Any]:
payload = self._request_json("POST", f"/api/edge/bots/{bot.id}/stop")
return EdgeStatusResponse.model_validate(payload).model_dump()
def deliver_command(self, *, bot_id: str, command: str, media: Optional[List[str]] = None) -> Optional[str]:
self._request_json(
"POST",
f"/api/edge/bots/{bot_id}/command",
json=EdgeCommandRequest(command=command, media=list(media or [])).model_dump(),
)
return None
def get_recent_logs(self, *, bot_id: str, tail: int = 300) -> List[str]:
payload = self._request_json(
"GET",
f"/api/edge/bots/{bot_id}/logs",
params={"tail": max(1, int(tail or 300))},
)
return EdgeLogsResponse.model_validate(payload).logs
def ensure_monitor(self, *, bot_id: str) -> bool:
payload = self._request_json("POST", f"/api/edge/bots/{bot_id}/monitor/ensure")
return bool(EdgeMonitorEnsureResponse.model_validate(payload).ensured)
def get_monitor_packets(self, *, bot_id: str, after_seq: int = 0, limit: int = 200) -> List[Dict[str, Any]]:
payload = self._request_json(
"GET",
f"/api/edge/bots/{bot_id}/monitor/packets",
params={"after_seq": max(0, int(after_seq or 0)), "limit": max(1, int(limit or 200))},
)
parsed = EdgeMonitorPacketsResponse.model_validate(payload)
rows: List[Dict[str, Any]] = []
for item in parsed.packets or []:
rows.append(item.model_dump())
return rows
def get_runtime_status(self, *, bot_id: str) -> str:
payload = self._request_json("GET", f"/api/edge/bots/{bot_id}/runtime/status")
return str(payload.get("status") or "STOPPED").upper()
def get_resource_snapshot(self, *, bot_id: str) -> Dict[str, Any]:
return self._request_json("GET", f"/api/edge/bots/{bot_id}/resources")
def get_node_resources(self) -> Dict[str, Any]:
payload = self._request_json("GET", "/api/edge/node/resources")
return EdgeNodeResourcesResponse.model_validate(payload).model_dump()
def get_node_self(self) -> Dict[str, Any]:
payload = self._request_json("GET", "/api/edge/node/self")
return EdgeNodeSelfResponse.model_validate(payload).model_dump()
def heartbeat_node(self) -> Dict[str, Any]:
payload = self._request_json("POST", "/api/edge/node/heartbeat")
return EdgeNodeHeartbeatResponse.model_validate(payload).model_dump()
def preflight_native(self, *, native_command: Optional[str] = None, native_workdir: Optional[str] = None) -> Dict[str, Any]:
payload = self._request_json(
"POST",
"/api/edge/runtime/native/preflight",
json=EdgeNativePreflightRequest(
native_command=str(native_command or "").strip() or None,
native_workdir=str(native_workdir or "").strip() or None,
).model_dump(),
)
return EdgeNativePreflightResponse.model_validate(payload).model_dump()
def read_state(
self,
*,
bot_id: str,
state_key: str,
workspace_root: Optional[str] = None,
) -> Dict[str, Any]:
params: Dict[str, Any] = {}
if workspace_root:
params["workspace_root"] = str(workspace_root).strip()
payload = self._request_json(
"GET",
f"/api/edge/bots/{bot_id}/state/{state_key}",
params=params or None,
)
return EdgeStateResponse.model_validate(payload).model_dump()
def write_state(
self,
*,
bot_id: str,
state_key: str,
data: Dict[str, Any],
workspace_root: Optional[str] = None,
) -> Dict[str, Any]:
payload = self._request_json(
"PUT",
f"/api/edge/bots/{bot_id}/state/{state_key}",
json=EdgeStateWriteRequest(
data=dict(data or {}),
workspace_root=str(workspace_root or "").strip() or None,
).model_dump(),
)
return EdgeStateResponse.model_validate(payload).model_dump()
def sync_bot_workspace(
self,
*,
bot_id: str,
channels_override: Optional[List[Dict[str, Any]]] = None,
global_delivery_override: Optional[Dict[str, Any]] = None,
runtime_overrides: Optional[Dict[str, Any]] = None,
) -> None:
self._request_json(
"POST",
f"/api/edge/bots/{bot_id}/workspace/sync",
json=EdgeWorkspaceSyncRequest(
channels_override=channels_override,
global_delivery_override=global_delivery_override,
runtime_overrides=runtime_overrides,
).model_dump(),
)
def purge_workspace(self, *, bot_id: str, workspace_root: Optional[str] = None) -> Dict[str, Any]:
params: Dict[str, Any] = {}
if workspace_root:
params["workspace_root"] = str(workspace_root).strip()
payload = self._request_json(
"POST",
f"/api/edge/bots/{bot_id}/workspace/purge",
params=params or None,
)
return EdgeStatusResponse.model_validate(payload).model_dump()
def list_tree(
self,
*,
bot_id: str,
path: Optional[str] = None,
recursive: bool = False,
workspace_root: Optional[str] = None,
) -> Dict[str, Any]:
params: Dict[str, Any] = {"recursive": bool(recursive)}
if path:
params["path"] = path
if workspace_root:
params["workspace_root"] = str(workspace_root).strip()
return self._request_json("GET", f"/api/edge/bots/{bot_id}/workspace/tree", params=params)
def read_file(
self,
*,
bot_id: str,
path: str,
max_bytes: int = 200000,
workspace_root: Optional[str] = None,
) -> Dict[str, Any]:
params: Dict[str, Any] = {"path": path, "max_bytes": max(4096, int(max_bytes or 200000))}
if workspace_root:
params["workspace_root"] = str(workspace_root).strip()
return self._request_json(
"GET",
f"/api/edge/bots/{bot_id}/workspace/file",
params=params,
)
def write_markdown(
self,
*,
bot_id: str,
path: str,
content: str,
workspace_root: Optional[str] = None,
) -> Dict[str, Any]:
params: Dict[str, Any] = {"path": path}
if workspace_root:
params["workspace_root"] = str(workspace_root).strip()
return self._request_json(
"PUT",
f"/api/edge/bots/{bot_id}/workspace/file/markdown",
params=params,
json=EdgeMarkdownWriteRequest(content=str(content or "")).model_dump(),
)
def write_text_file(
self,
*,
bot_id: str,
path: str,
content: str,
workspace_root: Optional[str] = None,
) -> Dict[str, Any]:
params: Dict[str, Any] = {"path": path}
if workspace_root:
params["workspace_root"] = str(workspace_root).strip()
return self._request_json(
"PUT",
f"/api/edge/bots/{bot_id}/workspace/file/text",
params=params,
json=EdgeMarkdownWriteRequest(content=str(content or "")).model_dump(),
)
async def upload_files(
self,
*,
bot_id: str,
files: List[UploadFile],
path: Optional[str] = None,
workspace_root: Optional[str] = None,
) -> Dict[str, Any]:
base_url = self._require_base_url()
multipart_files = []
response: httpx.Response | None = None
try:
async with self._async_http_client_factory() as client:
for upload in files:
await upload.seek(0)
multipart_files.append(
(
"files",
(
upload.filename or "upload.bin",
upload.file,
upload.content_type or "application/octet-stream",
),
)
)
response = await client.request(
method="POST",
url=f"{base_url}/api/edge/bots/{quote(bot_id, safe='')}/workspace/upload",
headers=self._headers(),
params=self._workspace_upload_params(path=path, workspace_root=workspace_root),
files=multipart_files,
)
except httpx.RequestError as exc:
raise edge_transport_http_exception(exc, node=self._node) from exc
finally:
for upload in files:
await upload.close()
if response is None:
raise HTTPException(status_code=502, detail="dashboard-edge upload request failed before receiving a response")
return self._parse_json_response(response)
def delete_workspace_path(
self,
*,
bot_id: str,
path: str,
workspace_root: Optional[str] = None,
) -> Dict[str, Any]:
params: Dict[str, Any] = {"path": path}
if workspace_root:
params["workspace_root"] = str(workspace_root).strip()
return self._request_json(
"DELETE",
f"/api/edge/bots/{bot_id}/workspace/file",
params=params,
)
def upload_local_files(
self,
*,
bot_id: str,
local_paths: List[str],
path: Optional[str] = None,
workspace_root: Optional[str] = None,
) -> Dict[str, Any]:
if not local_paths:
return {"bot_id": bot_id, "files": []}
base_url = self._require_base_url()
multipart_files = []
handles = []
response: httpx.Response | None = None
try:
for local_path in local_paths:
normalized = os.path.abspath(os.path.expanduser(str(local_path or "").strip()))
if not os.path.isfile(normalized):
raise HTTPException(status_code=400, detail=f"Local upload file not found: {local_path}")
handle = open(normalized, "rb")
handles.append(handle)
multipart_files.append(
(
"files",
(
os.path.basename(normalized),
handle,
mimetypes.guess_type(normalized)[0] or "application/octet-stream",
),
)
)
with self._http_client_factory() as client:
response = client.request(
method="POST",
url=f"{base_url}/api/edge/bots/{quote(bot_id, safe='')}/workspace/upload",
headers=self._headers(),
params=self._workspace_upload_params(path=path, workspace_root=workspace_root),
files=multipart_files,
)
except OSError as exc:
raise HTTPException(status_code=500, detail=f"Failed to open local upload file: {exc.strerror or str(exc)}") from exc
except httpx.RequestError as exc:
raise edge_transport_http_exception(exc, node=self._node) from exc
finally:
for handle in handles:
try:
handle.close()
except Exception:
continue
if response is None:
raise HTTPException(status_code=502, detail="dashboard-edge upload request failed before receiving a response")
return self._parse_json_response(response)
def serve_file(
self,
*,
bot_id: str,
path: str,
download: bool,
request: Request,
public: bool = False,
redirect_html_to_raw: bool = False,
workspace_root: Optional[str] = None,
) -> Response:
media_type, _ = mimetypes.guess_type(path)
if redirect_html_to_raw and not download and str(media_type or "").startswith("text/html"):
raw_url = self._build_dashboard_raw_url(bot_id=bot_id, path=path, public=public)
if raw_url:
return RedirectResponse(url=raw_url, status_code=307)
base_url = self._require_base_url()
url = self._build_edge_file_url(
bot_id=bot_id,
path=path,
download=download,
raw=not redirect_html_to_raw,
workspace_root=workspace_root,
)
headers = self._headers()
range_header = request.headers.get("range", "").strip()
if range_header and not download:
headers["range"] = range_header
try:
with self._http_client_factory() as client:
response = client.request(
method="GET",
url=f"{base_url}{url}",
headers=headers,
)
except httpx.RequestError as exc:
raise edge_transport_http_exception(exc, node=self._node) from exc
self._raise_for_status(response)
return Response(
content=response.content,
status_code=response.status_code,
media_type=response.headers.get("content-type") or "application/octet-stream",
headers=self._response_proxy_headers(response),
)
def _request_json(
self,
method: str,
path: str,
*,
params: Optional[Dict[str, Any]] = None,
json: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
base_url = self._require_base_url()
try:
with self._http_client_factory() as client:
response = client.request(
method=method.upper(),
url=f"{base_url}{path}",
headers=self._headers(),
params=params,
json=json,
)
except httpx.RequestError as exc:
raise edge_transport_http_exception(exc, node=self._node) from exc
return self._parse_json_response(response)
async def _async_request_json(
self,
method: str,
path: str,
*,
params: Optional[Dict[str, Any]] = None,
json: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
base_url = self._require_base_url()
try:
async with self._async_http_client_factory() as client:
response = await client.request(
method=method.upper(),
url=f"{base_url}{path}",
headers=self._headers(),
params=params,
json=json,
)
except httpx.RequestError as exc:
raise edge_transport_http_exception(exc, node=self._node) from exc
return self._parse_json_response(response)
def _headers(self) -> Dict[str, str]:
headers = {"accept": "application/json"}
token = str(self._node.auth_token or "").strip()
if token:
headers[EDGE_AUTH_HEADER] = token
return headers
def _require_base_url(self) -> str:
base_url = str(self._node.base_url or "").strip().rstrip("/")
if not base_url:
raise self._not_implemented("connect to node")
return base_url
@staticmethod
def _raise_for_status(response: httpx.Response) -> None:
try:
response.raise_for_status()
except httpx.HTTPStatusError as exc:
detail = exc.response.text.strip() or str(exc)
raise HTTPException(status_code=502, detail=f"dashboard-edge request failed: {detail[:400]}") from exc
@classmethod
def _parse_json_response(cls, response: httpx.Response) -> Dict[str, Any]:
cls._raise_for_status(response)
try:
payload = response.json()
except Exception as exc:
raise HTTPException(status_code=502, detail="dashboard-edge returned invalid JSON") from exc
if not isinstance(payload, dict):
raise HTTPException(status_code=502, detail="dashboard-edge returned unexpected payload")
return payload
@staticmethod
def _build_dashboard_raw_url(bot_id: str, path: str, public: bool) -> str:
normalized = "/".join(part for part in str(path or "").strip().split("/") if part)
if not normalized:
return ""
prefix = "/public" if public else "/api"
return f"{prefix}/bots/{quote(bot_id, safe='')}/workspace/raw/{quote(normalized, safe='/')}"
@staticmethod
def _build_edge_file_url(
*,
bot_id: str,
path: str,
download: bool,
raw: bool,
workspace_root: Optional[str] = None,
) -> str:
workspace_root_qs = ""
normalized_workspace_root = str(workspace_root or "").strip()
if normalized_workspace_root:
workspace_root_qs = f"&workspace_root={quote(normalized_workspace_root, safe='/')}"
if raw:
normalized = "/".join(part for part in str(path or "").strip().split("/") if part)
if not normalized:
raise HTTPException(status_code=400, detail="invalid workspace path")
return (
f"/api/edge/bots/{quote(bot_id, safe='')}/workspace/raw/"
f"{quote(normalized, safe='/')}?download={'true' if download else 'false'}{workspace_root_qs}"
)
return (
f"/api/edge/bots/{quote(bot_id, safe='')}/workspace/download"
f"?path={quote(str(path or ''), safe='/')}&download={'true' if download else 'false'}{workspace_root_qs}"
)
@staticmethod
def _workspace_upload_params(*, path: Optional[str], workspace_root: Optional[str]) -> Optional[Dict[str, Any]]:
params: Dict[str, Any] = {}
if path:
params["path"] = path
normalized_workspace_root = str(workspace_root or "").strip()
if normalized_workspace_root:
params["workspace_root"] = normalized_workspace_root
return params or None
@staticmethod
def _response_proxy_headers(response: httpx.Response) -> Dict[str, str]:
kept: Dict[str, str] = {}
for name in ("accept-ranges", "content-disposition", "content-length", "content-range", "cache-control"):
value = response.headers.get(name)
if value:
kept[name] = value
return kept
def _not_implemented(self, capability: str) -> HTTPException:
node_label = self._node.display_name or self._node.node_id
return HTTPException(status_code=501, detail=f"dashboard-edge {capability} is not implemented yet for node {node_label}")

View File

@ -71,6 +71,11 @@ class BotConfigManager:
existing_tools = existing_config.get("tools") existing_tools = existing_config.get("tools")
tools_cfg: Dict[str, Any] = dict(existing_tools) if isinstance(existing_tools, dict) else {} tools_cfg: Dict[str, Any] = dict(existing_tools) if isinstance(existing_tools, dict) else {}
native_sandbox_mode = self._normalize_native_sandbox_mode(bot_data.get("native_sandbox_mode"))
if native_sandbox_mode == "workspace":
tools_cfg["restrictToWorkspace"] = True
elif native_sandbox_mode == "full_access":
tools_cfg["restrictToWorkspace"] = False
if "mcp_servers" in bot_data: if "mcp_servers" in bot_data:
mcp_servers = bot_data.get("mcp_servers") mcp_servers = bot_data.get("mcp_servers")
if isinstance(mcp_servers, dict): if isinstance(mcp_servers, dict):
@ -249,3 +254,12 @@ class BotConfigManager:
if not rows: if not rows:
return ["*"] return ["*"]
return rows return rows
@staticmethod
def _normalize_native_sandbox_mode(raw_value: Any) -> str:
text = str(raw_value or "").strip().lower()
if text in {"workspace", "sandbox", "strict"}:
return "workspace"
if text in {"full_access", "full-access", "danger-full-access", "escape"}:
return "full_access"
return "inherit"

View File

@ -15,23 +15,22 @@ from core.settings import (
from models import bot as _bot_models # noqa: F401 from models import bot as _bot_models # noqa: F401
from models import platform as _platform_models # noqa: F401 from models import platform as _platform_models # noqa: F401
from models import skill as _skill_models # noqa: F401 from models import skill as _skill_models # noqa: F401
from models import sys_auth as _sys_auth_models # noqa: F401
from models import topic as _topic_models # noqa: F401 from models import topic as _topic_models # noqa: F401
from services.sys_auth_service import seed_sys_auth
_engine_kwargs = { _engine_kwargs = {
"echo": DATABASE_ECHO, "echo": DATABASE_ECHO,
} }
if DATABASE_ENGINE == "sqlite": _engine_kwargs.update(
_engine_kwargs["connect_args"] = {"check_same_thread": False} {
else: "pool_pre_ping": True,
_engine_kwargs.update( "pool_size": DATABASE_POOL_SIZE,
{ "max_overflow": DATABASE_MAX_OVERFLOW,
"pool_pre_ping": True, "pool_timeout": DATABASE_POOL_TIMEOUT,
"pool_size": DATABASE_POOL_SIZE, "pool_recycle": DATABASE_POOL_RECYCLE,
"max_overflow": DATABASE_MAX_OVERFLOW, }
"pool_timeout": DATABASE_POOL_TIMEOUT, )
"pool_recycle": DATABASE_POOL_RECYCLE,
}
)
engine = create_engine(DATABASE_URL, **_engine_kwargs) engine = create_engine(DATABASE_URL, **_engine_kwargs)
@ -41,6 +40,7 @@ BOT_IMAGE_TABLE = "bot_image"
BOT_REQUEST_USAGE_TABLE = "bot_request_usage" BOT_REQUEST_USAGE_TABLE = "bot_request_usage"
BOT_ACTIVITY_EVENT_TABLE = "bot_activity_event" BOT_ACTIVITY_EVENT_TABLE = "bot_activity_event"
SYS_SETTING_TABLE = "sys_setting" SYS_SETTING_TABLE = "sys_setting"
MANAGED_NODE_TABLE = "managed_node"
POSTGRES_MIGRATION_LOCK_KEY = 2026031801 POSTGRES_MIGRATION_LOCK_KEY = 2026031801
MYSQL_MIGRATION_LOCK_NAME = "dashboard_nanobot_schema_migration" MYSQL_MIGRATION_LOCK_NAME = "dashboard_nanobot_schema_migration"
LEGACY_TABLE_PAIRS = [ LEGACY_TABLE_PAIRS = [
@ -266,30 +266,41 @@ def _ensure_botinstance_columns() -> None:
dialect = engine.dialect.name dialect = engine.dialect.name
required_columns = { required_columns = {
"current_state": { "current_state": {
"sqlite": "TEXT DEFAULT 'IDLE'",
"postgresql": "TEXT DEFAULT 'IDLE'", "postgresql": "TEXT DEFAULT 'IDLE'",
"mysql": "VARCHAR(64) DEFAULT 'IDLE'", "mysql": "VARCHAR(64) DEFAULT 'IDLE'",
}, },
"last_action": { "last_action": {
"sqlite": "TEXT",
"postgresql": "TEXT", "postgresql": "TEXT",
"mysql": "LONGTEXT", "mysql": "LONGTEXT",
}, },
"image_tag": { "image_tag": {
"sqlite": "TEXT DEFAULT 'nanobot-base:v0.1.4'",
"postgresql": "TEXT DEFAULT 'nanobot-base:v0.1.4'", "postgresql": "TEXT DEFAULT 'nanobot-base:v0.1.4'",
"mysql": "VARCHAR(255) DEFAULT 'nanobot-base:v0.1.4'", "mysql": "VARCHAR(255) DEFAULT 'nanobot-base:v0.1.4'",
}, },
"access_password": { "access_password": {
"sqlite": "TEXT DEFAULT ''",
"postgresql": "TEXT DEFAULT ''", "postgresql": "TEXT DEFAULT ''",
"mysql": "VARCHAR(255) DEFAULT ''", "mysql": "VARCHAR(255) DEFAULT ''",
}, },
"enabled": { "enabled": {
"sqlite": "INTEGER NOT NULL DEFAULT 1",
"postgresql": "BOOLEAN NOT NULL DEFAULT TRUE", "postgresql": "BOOLEAN NOT NULL DEFAULT TRUE",
"mysql": "BOOLEAN NOT NULL DEFAULT TRUE", "mysql": "BOOLEAN NOT NULL DEFAULT TRUE",
}, },
"node_id": {
"postgresql": "TEXT NOT NULL DEFAULT 'local'",
"mysql": "VARCHAR(120) NOT NULL DEFAULT 'local'",
},
"transport_kind": {
"postgresql": "TEXT NOT NULL DEFAULT 'direct'",
"mysql": "VARCHAR(32) NOT NULL DEFAULT 'direct'",
},
"runtime_kind": {
"postgresql": "TEXT NOT NULL DEFAULT 'docker'",
"mysql": "VARCHAR(32) NOT NULL DEFAULT 'docker'",
},
"core_adapter": {
"postgresql": "TEXT NOT NULL DEFAULT 'nanobot'",
"mysql": "VARCHAR(64) NOT NULL DEFAULT 'nanobot'",
},
} }
inspector = inspect(engine) inspector = inspect(engine)
@ -304,13 +315,36 @@ def _ensure_botinstance_columns() -> None:
for col, ddl_map in required_columns.items(): for col, ddl_map in required_columns.items():
if col in existing: if col in existing:
continue continue
ddl = ddl_map.get(dialect) or ddl_map.get("sqlite") ddl = ddl_map.get(dialect) or ddl_map.get("postgresql")
conn.execute(text(f"ALTER TABLE {BOT_INSTANCE_TABLE} ADD COLUMN {col} {ddl}")) conn.execute(text(f"ALTER TABLE {BOT_INSTANCE_TABLE} ADD COLUMN {col} {ddl}"))
if "enabled" in existing: if "enabled" in existing:
if dialect == "sqlite": conn.execute(text(f"UPDATE {BOT_INSTANCE_TABLE} SET enabled = TRUE WHERE enabled IS NULL"))
conn.execute(text(f"UPDATE {BOT_INSTANCE_TABLE} SET enabled = 1 WHERE enabled IS NULL")) conn.commit()
else:
conn.execute(text(f"UPDATE {BOT_INSTANCE_TABLE} SET enabled = TRUE WHERE enabled IS NULL"))
def _ensure_botinstance_indexes() -> None:
required_indexes = [
("idx_bot_instance_enabled", BOT_INSTANCE_TABLE, ["enabled"]),
("idx_bot_instance_docker_status", BOT_INSTANCE_TABLE, ["docker_status"]),
("idx_bot_instance_node_id", BOT_INSTANCE_TABLE, ["node_id"]),
("idx_bot_instance_transport_kind", BOT_INSTANCE_TABLE, ["transport_kind"]),
("idx_bot_instance_runtime_kind", BOT_INSTANCE_TABLE, ["runtime_kind"]),
("idx_bot_instance_core_adapter", BOT_INSTANCE_TABLE, ["core_adapter"]),
("idx_bot_instance_node_transport_runtime", BOT_INSTANCE_TABLE, ["node_id", "transport_kind", "runtime_kind"]),
]
inspector = inspect(engine)
with engine.connect() as conn:
if not inspector.has_table(BOT_INSTANCE_TABLE):
return
existing = {
str(item.get("name"))
for item in inspector.get_indexes(BOT_INSTANCE_TABLE)
if item.get("name")
}
for name, table_name, columns in required_indexes:
if name in existing:
continue
conn.execute(text(f"CREATE INDEX {name} ON {table_name} ({', '.join(columns)})"))
conn.commit() conn.commit()
@ -350,8 +384,6 @@ def _drop_legacy_botinstance_columns() -> None:
try: try:
if engine.dialect.name == "mysql": if engine.dialect.name == "mysql":
conn.execute(text(f"ALTER TABLE {BOT_INSTANCE_TABLE} DROP COLUMN `{col}`")) conn.execute(text(f"ALTER TABLE {BOT_INSTANCE_TABLE} DROP COLUMN `{col}`"))
elif engine.dialect.name == "sqlite":
conn.execute(text(f'ALTER TABLE {BOT_INSTANCE_TABLE} DROP COLUMN "{col}"'))
else: else:
conn.execute(text(f'ALTER TABLE {BOT_INSTANCE_TABLE} DROP COLUMN IF EXISTS "{col}"')) conn.execute(text(f'ALTER TABLE {BOT_INSTANCE_TABLE} DROP COLUMN IF EXISTS "{col}"'))
except Exception: except Exception:
@ -360,24 +392,6 @@ def _drop_legacy_botinstance_columns() -> None:
conn.commit() conn.commit()
def _ensure_botmessage_columns() -> None:
if engine.dialect.name != "sqlite":
return
required_columns = {
"media_json": "TEXT",
"feedback": "TEXT",
"feedback_at": "DATETIME",
}
with engine.connect() as conn:
existing_rows = conn.execute(text(f"PRAGMA table_info({BOT_MESSAGE_TABLE})")).fetchall()
existing = {str(row[1]) for row in existing_rows}
for col, ddl in required_columns.items():
if col in existing:
continue
conn.execute(text(f"ALTER TABLE {BOT_MESSAGE_TABLE} ADD COLUMN {col} {ddl}"))
conn.commit()
def _drop_legacy_skill_tables() -> None: def _drop_legacy_skill_tables() -> None:
"""Drop deprecated skill registry tables (moved to workspace filesystem mode).""" """Drop deprecated skill registry tables (moved to workspace filesystem mode)."""
with engine.connect() as conn: with engine.connect() as conn:
@ -390,32 +404,26 @@ def _ensure_sys_setting_columns() -> None:
dialect = engine.dialect.name dialect = engine.dialect.name
required_columns = { required_columns = {
"name": { "name": {
"sqlite": "TEXT NOT NULL DEFAULT ''",
"postgresql": "TEXT NOT NULL DEFAULT ''", "postgresql": "TEXT NOT NULL DEFAULT ''",
"mysql": "VARCHAR(200) NOT NULL DEFAULT ''", "mysql": "VARCHAR(200) NOT NULL DEFAULT ''",
}, },
"category": { "category": {
"sqlite": "TEXT NOT NULL DEFAULT 'general'",
"postgresql": "TEXT NOT NULL DEFAULT 'general'", "postgresql": "TEXT NOT NULL DEFAULT 'general'",
"mysql": "VARCHAR(64) NOT NULL DEFAULT 'general'", "mysql": "VARCHAR(64) NOT NULL DEFAULT 'general'",
}, },
"description": { "description": {
"sqlite": "TEXT NOT NULL DEFAULT ''",
"postgresql": "TEXT NOT NULL DEFAULT ''", "postgresql": "TEXT NOT NULL DEFAULT ''",
"mysql": "LONGTEXT", "mysql": "LONGTEXT",
}, },
"value_type": { "value_type": {
"sqlite": "TEXT NOT NULL DEFAULT 'json'",
"postgresql": "TEXT NOT NULL DEFAULT 'json'", "postgresql": "TEXT NOT NULL DEFAULT 'json'",
"mysql": "VARCHAR(32) NOT NULL DEFAULT 'json'", "mysql": "VARCHAR(32) NOT NULL DEFAULT 'json'",
}, },
"is_public": { "is_public": {
"sqlite": "INTEGER NOT NULL DEFAULT 0",
"postgresql": "BOOLEAN NOT NULL DEFAULT FALSE", "postgresql": "BOOLEAN NOT NULL DEFAULT FALSE",
"mysql": "BOOLEAN NOT NULL DEFAULT FALSE", "mysql": "BOOLEAN NOT NULL DEFAULT FALSE",
}, },
"sort_order": { "sort_order": {
"sqlite": "INTEGER NOT NULL DEFAULT 100",
"postgresql": "INTEGER NOT NULL DEFAULT 100", "postgresql": "INTEGER NOT NULL DEFAULT 100",
"mysql": "INTEGER NOT NULL DEFAULT 100", "mysql": "INTEGER NOT NULL DEFAULT 100",
}, },
@ -432,7 +440,7 @@ def _ensure_sys_setting_columns() -> None:
for col, ddl_map in required_columns.items(): for col, ddl_map in required_columns.items():
if col in existing: if col in existing:
continue continue
ddl = ddl_map.get(dialect) or ddl_map.get("sqlite") ddl = ddl_map.get(dialect) or ddl_map.get("postgresql")
conn.execute(text(f"ALTER TABLE {SYS_SETTING_TABLE} ADD COLUMN {col} {ddl}")) conn.execute(text(f"ALTER TABLE {SYS_SETTING_TABLE} ADD COLUMN {col} {ddl}"))
conn.commit() conn.commit()
@ -441,17 +449,14 @@ def _ensure_bot_request_usage_columns() -> None:
dialect = engine.dialect.name dialect = engine.dialect.name
required_columns = { required_columns = {
"message_id": { "message_id": {
"sqlite": "INTEGER",
"postgresql": "INTEGER", "postgresql": "INTEGER",
"mysql": "INTEGER", "mysql": "INTEGER",
}, },
"provider": { "provider": {
"sqlite": "TEXT",
"postgresql": "TEXT", "postgresql": "TEXT",
"mysql": "VARCHAR(120)", "mysql": "VARCHAR(120)",
}, },
"model": { "model": {
"sqlite": "TEXT",
"postgresql": "TEXT", "postgresql": "TEXT",
"mysql": "VARCHAR(255)", "mysql": "VARCHAR(255)",
}, },
@ -468,69 +473,105 @@ def _ensure_bot_request_usage_columns() -> None:
for col, ddl_map in required_columns.items(): for col, ddl_map in required_columns.items():
if col in existing: if col in existing:
continue continue
ddl = ddl_map.get(dialect) or ddl_map.get("sqlite") ddl = ddl_map.get(dialect) or ddl_map.get("postgresql")
conn.execute(text(f"ALTER TABLE {BOT_REQUEST_USAGE_TABLE} ADD COLUMN {col} {ddl}")) conn.execute(text(f"ALTER TABLE {BOT_REQUEST_USAGE_TABLE} ADD COLUMN {col} {ddl}"))
conn.commit() conn.commit()
def _ensure_topic_tables_sqlite() -> None: def _ensure_managed_node_columns() -> None:
if engine.dialect.name != "sqlite": dialect = engine.dialect.name
required_columns = {
"display_name": {
"postgresql": "TEXT NOT NULL DEFAULT ''",
"mysql": "VARCHAR(200) NOT NULL DEFAULT ''",
},
"base_url": {
"postgresql": "TEXT NOT NULL DEFAULT ''",
"mysql": "TEXT NOT NULL",
},
"enabled": {
"postgresql": "BOOLEAN NOT NULL DEFAULT TRUE",
"mysql": "BOOLEAN NOT NULL DEFAULT TRUE",
},
"auth_token": {
"postgresql": "TEXT NOT NULL DEFAULT ''",
"mysql": "TEXT NOT NULL",
},
"transport_kind": {
"postgresql": "TEXT NOT NULL DEFAULT 'direct'",
"mysql": "VARCHAR(32) NOT NULL DEFAULT 'direct'",
},
"runtime_kind": {
"postgresql": "TEXT NOT NULL DEFAULT 'docker'",
"mysql": "VARCHAR(32) NOT NULL DEFAULT 'docker'",
},
"core_adapter": {
"postgresql": "TEXT NOT NULL DEFAULT 'nanobot'",
"mysql": "VARCHAR(64) NOT NULL DEFAULT 'nanobot'",
},
"metadata_json": {
"postgresql": "TEXT NOT NULL DEFAULT '{}'",
"mysql": "LONGTEXT",
},
"capabilities_json": {
"postgresql": "TEXT NOT NULL DEFAULT '{}'",
"mysql": "LONGTEXT",
},
"resources_json": {
"postgresql": "TEXT NOT NULL DEFAULT '{}'",
"mysql": "LONGTEXT",
},
"last_seen_at": {
"postgresql": "TIMESTAMP",
"mysql": "DATETIME",
},
"created_at": {
"postgresql": "TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP",
"mysql": "DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP",
},
"updated_at": {
"postgresql": "TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP",
"mysql": "DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP",
},
}
inspector = inspect(engine)
if not inspector.has_table(MANAGED_NODE_TABLE):
return return
with engine.connect() as conn: with engine.connect() as conn:
conn.execute( existing = {
text( str(row.get("name"))
""" for row in inspect(conn).get_columns(MANAGED_NODE_TABLE)
CREATE TABLE IF NOT EXISTS topic_topic ( if row.get("name")
id INTEGER PRIMARY KEY AUTOINCREMENT, }
bot_id TEXT NOT NULL, for col, ddl_map in required_columns.items():
topic_key TEXT NOT NULL, if col in existing:
name TEXT NOT NULL DEFAULT '', continue
description TEXT NOT NULL DEFAULT '', ddl = ddl_map.get(dialect) or ddl_map.get("postgresql")
is_active INTEGER NOT NULL DEFAULT 1, conn.execute(text(f"ALTER TABLE {MANAGED_NODE_TABLE} ADD COLUMN {col} {ddl}"))
is_default_fallback INTEGER NOT NULL DEFAULT 0, conn.commit()
routing_json TEXT NOT NULL DEFAULT '{}',
view_schema_json TEXT NOT NULL DEFAULT '{}',
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY(bot_id) REFERENCES bot_instance(id)
)
"""
)
)
conn.execute(
text(
"""
CREATE TABLE IF NOT EXISTS topic_item (
id INTEGER PRIMARY KEY AUTOINCREMENT,
bot_id TEXT NOT NULL,
topic_key TEXT NOT NULL,
title TEXT NOT NULL DEFAULT '',
content TEXT NOT NULL DEFAULT '',
level TEXT NOT NULL DEFAULT 'info',
tags_json TEXT,
view_json TEXT,
source TEXT NOT NULL DEFAULT 'mcp',
dedupe_key TEXT,
is_read INTEGER NOT NULL DEFAULT 0,
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY(bot_id) REFERENCES bot_instance(id)
)
"""
)
)
conn.execute(text("CREATE UNIQUE INDEX IF NOT EXISTS uq_topic_topic_bot_topic_key ON topic_topic(bot_id, topic_key)"))
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_topic_topic_bot_id ON topic_topic(bot_id)")) def _ensure_managed_node_indexes() -> None:
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_topic_topic_topic_key ON topic_topic(topic_key)")) required_indexes = [
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_topic_topic_bot_fallback ON topic_topic(bot_id, is_default_fallback)")) ("idx_managed_node_enabled", MANAGED_NODE_TABLE, ["enabled"]),
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_topic_item_bot_id ON topic_item(bot_id)")) ("idx_managed_node_transport_kind", MANAGED_NODE_TABLE, ["transport_kind"]),
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_topic_item_topic_key ON topic_item(topic_key)")) ("idx_managed_node_runtime_kind", MANAGED_NODE_TABLE, ["runtime_kind"]),
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_topic_item_level ON topic_item(level)")) ("idx_managed_node_core_adapter", MANAGED_NODE_TABLE, ["core_adapter"]),
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_topic_item_source ON topic_item(source)")) ("idx_managed_node_last_seen_at", MANAGED_NODE_TABLE, ["last_seen_at"]),
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_topic_item_is_read ON topic_item(is_read)")) ]
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_topic_item_created_at ON topic_item(created_at)")) inspector = inspect(engine)
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_topic_item_bot_topic_created_at ON topic_item(bot_id, topic_key, created_at)")) with engine.connect() as conn:
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_topic_item_bot_dedupe ON topic_item(bot_id, dedupe_key)")) if not inspector.has_table(MANAGED_NODE_TABLE):
return
existing = {
str(item.get("name"))
for item in inspector.get_indexes(MANAGED_NODE_TABLE)
if item.get("name")
}
for name, table_name, columns in required_indexes:
if name in existing:
continue
conn.execute(text(f"CREATE INDEX {name} ON {table_name} ({', '.join(columns)})"))
conn.commit() conn.commit()
@ -539,84 +580,68 @@ def _ensure_topic_columns() -> None:
required_columns = { required_columns = {
"topic_topic": { "topic_topic": {
"name": { "name": {
"sqlite": "TEXT NOT NULL DEFAULT ''",
"postgresql": "TEXT NOT NULL DEFAULT ''", "postgresql": "TEXT NOT NULL DEFAULT ''",
"mysql": "VARCHAR(255) NOT NULL DEFAULT ''", "mysql": "VARCHAR(255) NOT NULL DEFAULT ''",
}, },
"description": { "description": {
"sqlite": "TEXT NOT NULL DEFAULT ''",
"postgresql": "TEXT NOT NULL DEFAULT ''", "postgresql": "TEXT NOT NULL DEFAULT ''",
"mysql": "LONGTEXT", "mysql": "LONGTEXT",
}, },
"is_active": { "is_active": {
"sqlite": "INTEGER NOT NULL DEFAULT 1",
"postgresql": "BOOLEAN NOT NULL DEFAULT TRUE", "postgresql": "BOOLEAN NOT NULL DEFAULT TRUE",
"mysql": "BOOLEAN NOT NULL DEFAULT TRUE", "mysql": "BOOLEAN NOT NULL DEFAULT TRUE",
}, },
"is_default_fallback": { "is_default_fallback": {
"sqlite": "INTEGER NOT NULL DEFAULT 0",
"postgresql": "BOOLEAN NOT NULL DEFAULT FALSE", "postgresql": "BOOLEAN NOT NULL DEFAULT FALSE",
"mysql": "BOOLEAN NOT NULL DEFAULT FALSE", "mysql": "BOOLEAN NOT NULL DEFAULT FALSE",
}, },
"routing_json": { "routing_json": {
"sqlite": "TEXT NOT NULL DEFAULT '{}'",
"postgresql": "TEXT NOT NULL DEFAULT '{}'", "postgresql": "TEXT NOT NULL DEFAULT '{}'",
"mysql": "LONGTEXT", "mysql": "LONGTEXT",
}, },
"view_schema_json": { "view_schema_json": {
"sqlite": "TEXT NOT NULL DEFAULT '{}'",
"postgresql": "TEXT NOT NULL DEFAULT '{}'", "postgresql": "TEXT NOT NULL DEFAULT '{}'",
"mysql": "LONGTEXT", "mysql": "LONGTEXT",
}, },
"created_at": { "created_at": {
"sqlite": "DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP",
"postgresql": "TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP", "postgresql": "TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP",
"mysql": "DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP", "mysql": "DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP",
}, },
"updated_at": { "updated_at": {
"sqlite": "DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP",
"postgresql": "TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP", "postgresql": "TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP",
"mysql": "DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP", "mysql": "DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP",
}, },
}, },
"topic_item": { "topic_item": {
"title": { "title": {
"sqlite": "TEXT NOT NULL DEFAULT ''",
"postgresql": "TEXT NOT NULL DEFAULT ''", "postgresql": "TEXT NOT NULL DEFAULT ''",
"mysql": "VARCHAR(2000) NOT NULL DEFAULT ''", "mysql": "VARCHAR(2000) NOT NULL DEFAULT ''",
}, },
"level": { "level": {
"sqlite": "TEXT NOT NULL DEFAULT 'info'",
"postgresql": "TEXT NOT NULL DEFAULT 'info'", "postgresql": "TEXT NOT NULL DEFAULT 'info'",
"mysql": "VARCHAR(32) NOT NULL DEFAULT 'info'", "mysql": "VARCHAR(32) NOT NULL DEFAULT 'info'",
}, },
"tags_json": { "tags_json": {
"sqlite": "TEXT",
"postgresql": "TEXT", "postgresql": "TEXT",
"mysql": "LONGTEXT", "mysql": "LONGTEXT",
}, },
"view_json": { "view_json": {
"sqlite": "TEXT",
"postgresql": "TEXT", "postgresql": "TEXT",
"mysql": "LONGTEXT", "mysql": "LONGTEXT",
}, },
"source": { "source": {
"sqlite": "TEXT NOT NULL DEFAULT 'mcp'",
"postgresql": "TEXT NOT NULL DEFAULT 'mcp'", "postgresql": "TEXT NOT NULL DEFAULT 'mcp'",
"mysql": "VARCHAR(64) NOT NULL DEFAULT 'mcp'", "mysql": "VARCHAR(64) NOT NULL DEFAULT 'mcp'",
}, },
"dedupe_key": { "dedupe_key": {
"sqlite": "TEXT",
"postgresql": "TEXT", "postgresql": "TEXT",
"mysql": "VARCHAR(200)", "mysql": "VARCHAR(200)",
}, },
"is_read": { "is_read": {
"sqlite": "INTEGER NOT NULL DEFAULT 0",
"postgresql": "BOOLEAN NOT NULL DEFAULT FALSE", "postgresql": "BOOLEAN NOT NULL DEFAULT FALSE",
"mysql": "BOOLEAN NOT NULL DEFAULT FALSE", "mysql": "BOOLEAN NOT NULL DEFAULT FALSE",
}, },
"created_at": { "created_at": {
"sqlite": "DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP",
"postgresql": "TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP", "postgresql": "TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP",
"mysql": "DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP", "mysql": "DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP",
}, },
@ -636,7 +661,7 @@ def _ensure_topic_columns() -> None:
for col, ddl_map in cols.items(): for col, ddl_map in cols.items():
if col in existing: if col in existing:
continue continue
ddl = ddl_map.get(dialect) or ddl_map.get("sqlite") ddl = ddl_map.get(dialect) or ddl_map.get("postgresql")
conn.execute(text(f"ALTER TABLE {table_name} ADD COLUMN {col} {ddl}")) conn.execute(text(f"ALTER TABLE {table_name} ADD COLUMN {col} {ddl}"))
conn.commit() conn.commit()
@ -783,10 +808,11 @@ def init_database() -> None:
_drop_legacy_skill_tables() _drop_legacy_skill_tables()
_ensure_sys_setting_columns() _ensure_sys_setting_columns()
_ensure_bot_request_usage_columns() _ensure_bot_request_usage_columns()
_ensure_managed_node_columns()
_ensure_botinstance_columns() _ensure_botinstance_columns()
_ensure_botinstance_indexes()
_ensure_managed_node_indexes()
_drop_legacy_botinstance_columns() _drop_legacy_botinstance_columns()
_ensure_botmessage_columns()
_ensure_topic_tables_sqlite()
_repair_postgres_topic_foreign_keys() _repair_postgres_topic_foreign_keys()
_ensure_topic_columns() _ensure_topic_columns()
_ensure_topic_indexes() _ensure_topic_indexes()
@ -794,6 +820,8 @@ def init_database() -> None:
_cleanup_legacy_default_topics() _cleanup_legacy_default_topics()
_drop_legacy_tables() _drop_legacy_tables()
align_postgres_sequences() align_postgres_sequences()
with Session(engine) as session:
seed_sys_auth(session)
finally: finally:
_release_migration_lock(lock_conn) _release_migration_lock(lock_conn)

View File

@ -703,6 +703,12 @@ class BotDockerManager:
if response_match: if response_match:
channel = response_match.group(1).strip().lower() channel = response_match.group(1).strip().lower()
action_msg = response_match.group(2).strip() action_msg = response_match.group(2).strip()
if channel == "dashboard":
return {
"type": "ASSISTANT_MESSAGE",
"channel": "dashboard",
"text": action_msg[:4000],
}
return { return {
"type": "AGENT_STATE", "type": "AGENT_STATE",
"channel": channel, "channel": channel,

View File

@ -1,6 +1,7 @@
import json import json
import os import os
import re import re
import shutil
from pathlib import Path from pathlib import Path
from typing import Final from typing import Final
from urllib.parse import urlsplit, urlunsplit from urllib.parse import urlsplit, urlunsplit
@ -119,21 +120,11 @@ BOTS_WORKSPACE_ROOT: Final[str] = _normalize_dir_path(
def _normalize_database_url(url: str) -> str: def _normalize_database_url(url: str) -> str:
raw = str(url or "").strip() return str(url or "").strip()
prefix = "sqlite:///"
if not raw.startswith(prefix):
return raw
path_part = raw[len(prefix) :]
if not path_part or path_part.startswith("/"):
return raw
abs_path = (BACKEND_ROOT / path_part).resolve()
return f"{prefix}{abs_path.as_posix()}"
def _database_engine(url: str) -> str: def _database_engine(url: str) -> str:
raw = str(url or "").strip().lower() raw = str(url or "").strip().lower()
if raw.startswith("sqlite"):
return "sqlite"
if raw.startswith("postgresql"): if raw.startswith("postgresql"):
return "postgresql" return "postgresql"
if raw.startswith("mysql"): if raw.startswith("mysql"):
@ -147,7 +138,7 @@ def _database_engine(url: str) -> str:
def _mask_database_url(url: str) -> str: def _mask_database_url(url: str) -> str:
raw = str(url or "").strip() raw = str(url or "").strip()
if not raw or raw.startswith("sqlite"): if not raw:
return raw return raw
try: try:
parsed = urlsplit(raw) parsed = urlsplit(raw)
@ -168,10 +159,12 @@ def _mask_database_url(url: str) -> str:
_db_env = str(os.getenv("DATABASE_URL") or "").strip() _db_env = str(os.getenv("DATABASE_URL") or "").strip()
DATABASE_URL: Final[str] = _normalize_database_url( if not _db_env:
_db_env if _db_env else f"sqlite:///{Path(DATA_ROOT) / 'nanobot_dashboard.db'}" raise RuntimeError("DATABASE_URL is required")
) DATABASE_URL: Final[str] = _normalize_database_url(_db_env)
DATABASE_ENGINE: Final[str] = _database_engine(DATABASE_URL) DATABASE_ENGINE: Final[str] = _database_engine(DATABASE_URL)
if DATABASE_ENGINE not in {"postgresql", "mysql"}:
raise RuntimeError(f"Unsupported DATABASE_URL engine: {DATABASE_ENGINE}")
DATABASE_URL_DISPLAY: Final[str] = _mask_database_url(DATABASE_URL) DATABASE_URL_DISPLAY: Final[str] = _mask_database_url(DATABASE_URL)
DATABASE_ECHO: Final[bool] = _env_bool("DATABASE_ECHO", True) DATABASE_ECHO: Final[bool] = _env_bool("DATABASE_ECHO", True)
DATABASE_POOL_SIZE: Final[int] = _env_int("DATABASE_POOL_SIZE", 20, 1, 200) DATABASE_POOL_SIZE: Final[int] = _env_int("DATABASE_POOL_SIZE", 20, 1, 200)
@ -221,11 +214,33 @@ REDIS_ENABLED: Final[bool] = _env_bool("REDIS_ENABLED", False)
REDIS_URL: Final[str] = str(os.getenv("REDIS_URL") or "").strip() REDIS_URL: Final[str] = str(os.getenv("REDIS_URL") or "").strip()
REDIS_PREFIX: Final[str] = str(os.getenv("REDIS_PREFIX") or "dashboard_nanobot").strip() or "dashboard_nanobot" REDIS_PREFIX: Final[str] = str(os.getenv("REDIS_PREFIX") or "dashboard_nanobot").strip() or "dashboard_nanobot"
REDIS_DEFAULT_TTL: Final[int] = _env_int("REDIS_DEFAULT_TTL", 60, 1, 86400) REDIS_DEFAULT_TTL: Final[int] = _env_int("REDIS_DEFAULT_TTL", 60, 1, 86400)
PANEL_ACCESS_PASSWORD: Final[str] = str(os.getenv("PANEL_ACCESS_PASSWORD") or "").strip() JWT_ALGORITHM: Final[str] = "HS256"
JWT_SECRET: Final[str] = str(
os.getenv("JWT_SECRET")
or f"{PROJECT_ROOT.name}:{REDIS_PREFIX}:jwt"
).strip()
TEMPLATE_ROOT: Final[Path] = (BACKEND_ROOT / "templates").resolve() LEGACY_TEMPLATE_ROOT: Final[Path] = (BACKEND_ROOT / "templates").resolve()
AGENT_MD_TEMPLATES_FILE: Final[Path] = TEMPLATE_ROOT / "agent_md_templates.json" TEMPLATE_ROOT: Final[Path] = (Path(DATA_ROOT) / "templates").resolve()
TOPIC_PRESETS_TEMPLATES_FILE: Final[Path] = TEMPLATE_ROOT / "topic_presets.json" TEMPLATE_ROOT.mkdir(parents=True, exist_ok=True)
def _resolve_template_file(filename: str) -> Path:
target = (TEMPLATE_ROOT / filename).resolve()
legacy = (LEGACY_TEMPLATE_ROOT / filename).resolve()
if target.exists():
return target
if legacy.exists():
try:
shutil.copy2(legacy, target)
return target
except Exception:
return legacy
return target
AGENT_MD_TEMPLATES_FILE: Final[Path] = _resolve_template_file("agent_md_templates.json")
TOPIC_PRESETS_TEMPLATES_FILE: Final[Path] = _resolve_template_file("topic_presets.json")
_agent_md_templates_raw = _load_json_object(AGENT_MD_TEMPLATES_FILE) _agent_md_templates_raw = _load_json_object(AGENT_MD_TEMPLATES_FILE)
DEFAULT_AGENTS_MD: Final[str] = _env_text( DEFAULT_AGENTS_MD: Final[str] = _env_text(

View File

@ -9,7 +9,7 @@ from pathlib import Path
from typing import Any, Dict, Optional from typing import Any, Dict, Optional
from core.settings import STT_DEVICE, STT_MODEL, STT_MODEL_DIR from core.settings import STT_DEVICE, STT_MODEL, STT_MODEL_DIR
from services.platform_service import get_speech_runtime_settings from services.platform_settings_service import get_speech_runtime_settings
class SpeechServiceError(RuntimeError): class SpeechServiceError(RuntimeError):

0
backend/data.db 100644
View File

File diff suppressed because it is too large Load Diff

View File

@ -14,6 +14,10 @@ class BotInstance(SQLModel, table=True):
current_state: Optional[str] = Field(default="IDLE") current_state: Optional[str] = Field(default="IDLE")
last_action: Optional[str] = Field(default=None) last_action: Optional[str] = Field(default=None)
image_tag: str = Field(default="nanobot-base:v0.1.4") # 记录该机器人使用的镜像版本 image_tag: str = Field(default="nanobot-base:v0.1.4") # 记录该机器人使用的镜像版本
node_id: str = Field(default="local", index=True)
transport_kind: str = Field(default="direct", index=True)
runtime_kind: str = Field(default="docker", index=True)
core_adapter: str = Field(default="nanobot", index=True)
created_at: datetime = Field(default_factory=datetime.utcnow) created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow) updated_at: datetime = Field(default_factory=datetime.utcnow)

View File

@ -19,6 +19,25 @@ class PlatformSetting(SQLModel, table=True):
updated_at: datetime = Field(default_factory=datetime.utcnow, index=True) updated_at: datetime = Field(default_factory=datetime.utcnow, index=True)
class ManagedNodeRecord(SQLModel, table=True):
__tablename__ = "managed_node"
node_id: str = Field(primary_key=True, max_length=120)
display_name: str = Field(default="", max_length=200)
base_url: str = Field(default="")
enabled: bool = Field(default=True, index=True)
auth_token: str = Field(default="")
transport_kind: str = Field(default="direct", max_length=32, index=True)
runtime_kind: str = Field(default="docker", max_length=32, index=True)
core_adapter: str = Field(default="nanobot", max_length=64, index=True)
metadata_json: str = Field(default="{}")
capabilities_json: str = Field(default="{}")
resources_json: str = Field(default="{}")
last_seen_at: Optional[datetime] = Field(default=None, index=True)
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow, index=True)
class BotRequestUsage(SQLModel, table=True): class BotRequestUsage(SQLModel, table=True):
__tablename__ = "bot_request_usage" __tablename__ = "bot_request_usage"

View File

@ -0,0 +1,115 @@
from datetime import datetime
from typing import Optional
from sqlalchemy import UniqueConstraint
from sqlmodel import Field, SQLModel
class SysRole(SQLModel, table=True):
__tablename__ = "sys_role"
__table_args__ = (
UniqueConstraint("role_key", name="uq_sys_role_role_key"),
)
id: Optional[int] = Field(default=None, primary_key=True)
role_key: str = Field(index=True, max_length=64)
name: str = Field(default="", max_length=120)
description: str = Field(default="")
is_active: bool = Field(default=True, index=True)
sort_order: int = Field(default=100, index=True)
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow, index=True)
class SysUser(SQLModel, table=True):
__tablename__ = "sys_user"
__table_args__ = (
UniqueConstraint("username", name="uq_sys_user_username"),
)
id: Optional[int] = Field(default=None, primary_key=True)
username: str = Field(index=True, max_length=64)
display_name: str = Field(default="", max_length=120)
password_hash: str = Field(default="", max_length=255)
password_salt: str = Field(default="", max_length=64)
role_id: Optional[int] = Field(default=None, foreign_key="sys_role.id", index=True)
is_active: bool = Field(default=True, index=True)
last_login_at: Optional[datetime] = Field(default=None, index=True)
current_token_hash: Optional[str] = Field(default=None, index=True, max_length=255)
current_token_expires_at: Optional[datetime] = Field(default=None, index=True)
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow, index=True)
class SysMenu(SQLModel, table=True):
__tablename__ = "sys_menu"
__table_args__ = (
UniqueConstraint("menu_key", name="uq_sys_menu_menu_key"),
)
id: Optional[int] = Field(default=None, primary_key=True)
menu_key: str = Field(index=True, max_length=64)
parent_key: str = Field(default="", index=True, max_length=64)
title: str = Field(default="", max_length=120)
title_en: str = Field(default="", max_length=120)
menu_type: str = Field(default="item", max_length=32, index=True)
route_path: str = Field(default="", max_length=255)
icon: str = Field(default="", max_length=64)
permission_key: str = Field(default="", max_length=120)
visible: bool = Field(default=True, index=True)
sort_order: int = Field(default=100, index=True)
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow, index=True)
class SysPermission(SQLModel, table=True):
__tablename__ = "sys_permission"
__table_args__ = (
UniqueConstraint("permission_key", name="uq_sys_permission_permission_key"),
)
id: Optional[int] = Field(default=None, primary_key=True)
permission_key: str = Field(index=True, max_length=120)
name: str = Field(default="", max_length=120)
menu_key: str = Field(default="", index=True, max_length=64)
action: str = Field(default="view", max_length=32, index=True)
description: str = Field(default="")
sort_order: int = Field(default=100, index=True)
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow, index=True)
class SysRoleMenu(SQLModel, table=True):
__tablename__ = "sys_role_menu"
__table_args__ = (
UniqueConstraint("role_id", "menu_id", name="uq_sys_role_menu_role_menu"),
)
id: Optional[int] = Field(default=None, primary_key=True)
role_id: int = Field(foreign_key="sys_role.id", index=True)
menu_id: int = Field(foreign_key="sys_menu.id", index=True)
created_at: datetime = Field(default_factory=datetime.utcnow)
class SysRolePermission(SQLModel, table=True):
__tablename__ = "sys_role_permission"
__table_args__ = (
UniqueConstraint("role_id", "permission_id", name="uq_sys_role_permission_role_permission"),
)
id: Optional[int] = Field(default=None, primary_key=True)
role_id: int = Field(foreign_key="sys_role.id", index=True)
permission_id: int = Field(foreign_key="sys_permission.id", index=True)
created_at: datetime = Field(default_factory=datetime.utcnow)
class SysUserBot(SQLModel, table=True):
__tablename__ = "sys_user_bot"
__table_args__ = (
UniqueConstraint("user_id", "bot_id", name="uq_sys_user_bot_user_bot"),
)
id: Optional[int] = Field(default=None, primary_key=True)
user_id: int = Field(foreign_key="sys_user.id", index=True)
bot_id: str = Field(foreign_key="bot_instance.id", index=True, max_length=120)
created_at: datetime = Field(default_factory=datetime.utcnow)

View File

@ -0,0 +1 @@
# Provider package for runtime/workspace/provision abstractions.

View File

@ -0,0 +1,4 @@
from providers.provision.base import ProvisionProvider
from providers.provision.local import LocalProvisionProvider
__all__ = ["ProvisionProvider", "LocalProvisionProvider"]

View File

@ -0,0 +1,18 @@
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional
from sqlmodel import Session
class ProvisionProvider(ABC):
@abstractmethod
def sync_bot_workspace(
self,
*,
session: Session,
bot_id: str,
channels_override: Optional[List[Dict[str, Any]]] = None,
global_delivery_override: Optional[Dict[str, Any]] = None,
runtime_overrides: Optional[Dict[str, Any]] = None,
) -> None:
raise NotImplementedError

View File

@ -0,0 +1,105 @@
from typing import Any, Callable, Dict, List, Optional
from fastapi import HTTPException
from sqlmodel import Session
from clients.edge.base import EdgeClient
from models.bot import BotInstance
from providers.provision.base import ProvisionProvider
from providers.target import ProviderTarget
class EdgeProvisionProvider(ProvisionProvider):
def __init__(
self,
*,
read_provider_target: Callable[[str], ProviderTarget],
resolve_edge_client: Callable[[ProviderTarget], EdgeClient],
read_runtime_snapshot: Callable[[BotInstance], Dict[str, Any]],
read_bot_channels: Callable[[BotInstance], List[Dict[str, Any]]],
read_node_metadata: Callable[[str], Dict[str, Any]],
) -> None:
self._read_provider_target = read_provider_target
self._resolve_edge_client = resolve_edge_client
self._read_runtime_snapshot = read_runtime_snapshot
self._read_bot_channels = read_bot_channels
self._read_node_metadata = read_node_metadata
def sync_bot_workspace(
self,
*,
session: Session,
bot_id: str,
channels_override: Optional[List[Dict[str, Any]]] = None,
global_delivery_override: Optional[Dict[str, Any]] = None,
runtime_overrides: Optional[Dict[str, Any]] = None,
) -> None:
bot = session.get(BotInstance, bot_id)
if bot is None:
raise HTTPException(status_code=404, detail="Bot not found")
snapshot = dict(self._read_runtime_snapshot(bot))
merged_runtime = dict(snapshot)
if isinstance(runtime_overrides, dict):
for key, value in runtime_overrides.items():
if key in {"api_key", "llm_provider", "llm_model"}:
text = str(value or "").strip()
if not text:
continue
merged_runtime[key] = text
continue
if key == "api_base":
merged_runtime[key] = str(value or "").strip()
continue
merged_runtime[key] = value
target = self._read_provider_target(bot_id)
merged_runtime.update(self._node_runtime_overrides(target.node_id, target.runtime_kind))
resolved_delivery = dict(global_delivery_override or {})
if "sendProgress" not in resolved_delivery:
resolved_delivery["sendProgress"] = bool(merged_runtime.get("send_progress", False))
if "sendToolHints" not in resolved_delivery:
resolved_delivery["sendToolHints"] = bool(merged_runtime.get("send_tool_hints", False))
self._client_for_target(target).sync_bot_workspace(
bot_id=bot_id,
channels_override=channels_override if channels_override is not None else self._read_bot_channels(bot),
global_delivery_override=resolved_delivery,
runtime_overrides=merged_runtime,
)
def _client_for_bot(self, bot_id: str) -> EdgeClient:
target = self._read_provider_target(bot_id)
return self._client_for_target(target)
def _client_for_target(self, target: ProviderTarget) -> EdgeClient:
if target.transport_kind != "edge":
raise HTTPException(status_code=400, detail=f"edge provision provider requires edge transport, got {target.transport_kind}")
return self._resolve_edge_client(target)
def _node_runtime_overrides(self, node_id: str, runtime_kind: str) -> Dict[str, str]:
metadata = dict(self._read_node_metadata(str(node_id or "").strip().lower()) or {})
payload: Dict[str, str] = {}
workspace_root = str(metadata.get("workspace_root") or "").strip()
if workspace_root:
payload["workspace_root"] = workspace_root
if str(runtime_kind or "").strip().lower() != "native":
return payload
native_sandbox_mode = self._normalize_native_sandbox_mode(metadata.get("native_sandbox_mode"))
if native_sandbox_mode != "inherit":
payload["native_sandbox_mode"] = native_sandbox_mode
native_command = str(metadata.get("native_command") or "").strip()
native_workdir = str(metadata.get("native_workdir") or "").strip()
if native_command:
payload["native_command"] = native_command
if native_workdir:
payload["native_workdir"] = native_workdir
return payload
@staticmethod
def _normalize_native_sandbox_mode(raw_value: Any) -> str:
text = str(raw_value or "").strip().lower()
if text in {"workspace", "sandbox", "strict"}:
return "workspace"
if text in {"full_access", "full-access", "danger-full-access", "escape"}:
return "full_access"
return "inherit"

View File

@ -0,0 +1,34 @@
from typing import Any, Callable, Dict, List, Optional
from sqlmodel import Session
from providers.provision.base import ProvisionProvider
class LocalProvisionProvider(ProvisionProvider):
def __init__(
self,
*,
sync_workspace_func: Callable[
[Session, str, Optional[List[Dict[str, Any]]], Optional[Dict[str, Any]], Optional[Dict[str, Any]]],
None,
],
) -> None:
self._sync_workspace_func = sync_workspace_func
def sync_bot_workspace(
self,
*,
session: Session,
bot_id: str,
channels_override: Optional[List[Dict[str, Any]]] = None,
global_delivery_override: Optional[Dict[str, Any]] = None,
runtime_overrides: Optional[Dict[str, Any]] = None,
) -> None:
self._sync_workspace_func(
session,
bot_id,
channels_override,
global_delivery_override,
runtime_overrides,
)

View File

@ -0,0 +1,47 @@
from dataclasses import dataclass, field
from typing import Dict, Optional
from providers.target import ProviderTarget
from providers.provision.base import ProvisionProvider
from providers.runtime.base import RuntimeProvider
from providers.workspace.base import WorkspaceProvider
@dataclass
class ProviderRegistry:
runtime: Dict[str, RuntimeProvider] = field(default_factory=dict)
workspace: Dict[str, WorkspaceProvider] = field(default_factory=dict)
provision: Dict[str, ProvisionProvider] = field(default_factory=dict)
def register_bundle(
self,
*,
key: str,
runtime_provider: RuntimeProvider,
workspace_provider: WorkspaceProvider,
provision_provider: ProvisionProvider,
) -> None:
self.runtime[key] = runtime_provider
self.workspace[key] = workspace_provider
self.provision[key] = provision_provider
def resolve_bundle_key(self, target: ProviderTarget) -> Optional[str]:
exact = target.key
if exact in self.runtime and exact in self.workspace and exact in self.provision:
return exact
for key in self.runtime.keys():
if key not in self.workspace or key not in self.provision:
continue
parts = str(key or "").split(":")
if len(parts) < 4:
continue
_, transport_kind, runtime_kind, core_adapter = parts[0], parts[1], parts[2], ":".join(parts[3:])
if (
str(transport_kind or "").strip().lower() == str(target.transport_kind or "").strip().lower()
and str(runtime_kind or "").strip().lower() == str(target.runtime_kind or "").strip().lower()
and str(core_adapter or "").strip().lower() == str(target.core_adapter or "").strip().lower()
):
return key
return None

View File

@ -0,0 +1,4 @@
from providers.runtime.base import RuntimeProvider
from providers.runtime.local import LocalRuntimeProvider
__all__ = ["RuntimeProvider", "LocalRuntimeProvider"]

View File

@ -0,0 +1,40 @@
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional
from sqlmodel import Session
from models.bot import BotInstance
class RuntimeProvider(ABC):
@abstractmethod
async def start_bot(self, *, session: Session, bot: BotInstance) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def stop_bot(self, *, session: Session, bot: BotInstance) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def deliver_command(self, *, bot_id: str, command: str, media: Optional[List[str]] = None) -> Optional[str]:
raise NotImplementedError
@abstractmethod
def get_recent_logs(self, *, bot_id: str, tail: int = 300) -> List[str]:
raise NotImplementedError
@abstractmethod
def ensure_monitor(self, *, bot_id: str) -> bool:
raise NotImplementedError
@abstractmethod
def get_monitor_packets(self, *, bot_id: str, after_seq: int = 0, limit: int = 200) -> List[Dict[str, Any]]:
raise NotImplementedError
@abstractmethod
def get_runtime_status(self, *, bot_id: str) -> str:
raise NotImplementedError
@abstractmethod
def get_resource_snapshot(self, *, bot_id: str) -> Dict[str, Any]:
raise NotImplementedError

View File

@ -0,0 +1,136 @@
from typing import Any, Callable, Dict, List, Optional
from fastapi import HTTPException
from sqlmodel import Session
from clients.edge.base import EdgeClient
from models.bot import BotInstance
from providers.runtime.base import RuntimeProvider
from providers.target import ProviderTarget, provider_target_to_dict
class EdgeRuntimeProvider(RuntimeProvider):
def __init__(
self,
*,
read_provider_target: Callable[[str], ProviderTarget],
resolve_edge_client: Callable[[ProviderTarget], EdgeClient],
read_runtime_snapshot: Callable[[BotInstance], Dict[str, Any]],
resolve_env_params: Callable[[str], Dict[str, str]],
read_bot_channels: Callable[[BotInstance], List[Dict[str, Any]]],
read_node_metadata: Callable[[str], Dict[str, Any]],
) -> None:
self._read_provider_target = read_provider_target
self._resolve_edge_client = resolve_edge_client
self._read_runtime_snapshot = read_runtime_snapshot
self._resolve_env_params = resolve_env_params
self._read_bot_channels = read_bot_channels
self._read_node_metadata = read_node_metadata
async def start_bot(self, *, session: Session, bot: BotInstance) -> Dict[str, Any]:
bot_id = str(bot.id or "").strip()
if not bot_id:
raise HTTPException(status_code=400, detail="Bot id is required")
if not bool(getattr(bot, "enabled", True)):
raise HTTPException(status_code=403, detail="Bot is disabled. Enable it first.")
runtime_snapshot = self._read_runtime_snapshot(bot)
target = self._read_provider_target(bot_id)
client = self._client_for_target(target)
node_runtime_overrides = self._node_runtime_overrides(target.node_id, target.runtime_kind)
workspace_runtime = {
**dict(runtime_snapshot),
**provider_target_to_dict(target),
**node_runtime_overrides,
}
client.sync_bot_workspace(
bot_id=bot_id,
channels_override=self._read_bot_channels(bot),
global_delivery_override={
"sendProgress": bool(runtime_snapshot.get("send_progress")),
"sendToolHints": bool(runtime_snapshot.get("send_tool_hints")),
},
runtime_overrides=workspace_runtime,
)
result = await client.start_bot(
bot=bot,
start_payload={
"image_tag": bot.image_tag,
"runtime_kind": target.runtime_kind,
"env_vars": self._resolve_env_params(bot_id),
"cpu_cores": runtime_snapshot.get("cpu_cores"),
"memory_mb": runtime_snapshot.get("memory_mb"),
"storage_gb": runtime_snapshot.get("storage_gb"),
**node_runtime_overrides,
},
)
bot.docker_status = "RUNNING"
session.add(bot)
session.commit()
return result
def stop_bot(self, *, session: Session, bot: BotInstance) -> Dict[str, Any]:
bot_id = str(bot.id or "").strip()
if not bot_id:
raise HTTPException(status_code=400, detail="Bot id is required")
if not bool(getattr(bot, "enabled", True)):
raise HTTPException(status_code=403, detail="Bot is disabled. Enable it first.")
result = self._client_for_bot(bot_id).stop_bot(bot=bot)
bot.docker_status = "STOPPED"
session.add(bot)
session.commit()
return result
def deliver_command(self, *, bot_id: str, command: str, media: Optional[List[str]] = None) -> Optional[str]:
return self._client_for_bot(bot_id).deliver_command(bot_id=bot_id, command=command, media=media)
def get_recent_logs(self, *, bot_id: str, tail: int = 300) -> List[str]:
return self._client_for_bot(bot_id).get_recent_logs(bot_id=bot_id, tail=tail)
def ensure_monitor(self, *, bot_id: str) -> bool:
return bool(self._client_for_bot(bot_id).ensure_monitor(bot_id=bot_id))
def get_monitor_packets(self, *, bot_id: str, after_seq: int = 0, limit: int = 200) -> List[Dict[str, Any]]:
return list(self._client_for_bot(bot_id).get_monitor_packets(bot_id=bot_id, after_seq=after_seq, limit=limit) or [])
def get_runtime_status(self, *, bot_id: str) -> str:
return str(self._client_for_bot(bot_id).get_runtime_status(bot_id=bot_id) or "STOPPED").upper()
def get_resource_snapshot(self, *, bot_id: str) -> Dict[str, Any]:
return dict(self._client_for_bot(bot_id).get_resource_snapshot(bot_id=bot_id) or {})
def _client_for_bot(self, bot_id: str) -> EdgeClient:
target = self._read_provider_target(bot_id)
return self._client_for_target(target)
def _client_for_target(self, target: ProviderTarget) -> EdgeClient:
if target.transport_kind != "edge":
raise HTTPException(status_code=400, detail=f"edge runtime provider requires edge transport, got {target.transport_kind}")
return self._resolve_edge_client(target)
def _node_runtime_overrides(self, node_id: str, runtime_kind: str) -> Dict[str, str]:
metadata = dict(self._read_node_metadata(str(node_id or "").strip().lower()) or {})
payload: Dict[str, str] = {}
workspace_root = str(metadata.get("workspace_root") or "").strip()
if workspace_root:
payload["workspace_root"] = workspace_root
if str(runtime_kind or "").strip().lower() != "native":
return payload
native_sandbox_mode = self._normalize_native_sandbox_mode(metadata.get("native_sandbox_mode"))
if native_sandbox_mode != "inherit":
payload["native_sandbox_mode"] = native_sandbox_mode
native_command = str(metadata.get("native_command") or "").strip()
native_workdir = str(metadata.get("native_workdir") or "").strip()
if native_command:
payload["native_command"] = native_command
if native_workdir:
payload["native_workdir"] = native_workdir
return payload
@staticmethod
def _normalize_native_sandbox_mode(raw_value: Any) -> str:
text = str(raw_value or "").strip().lower()
if text in {"workspace", "sandbox", "strict"}:
return "workspace"
if text in {"full_access", "full-access", "danger-full-access", "escape"}:
return "full_access"
return "inherit"

View File

@ -0,0 +1,117 @@
import asyncio
from typing import Any, Awaitable, Callable, Dict, List, Optional
from fastapi import HTTPException
from sqlmodel import Session
from models.bot import BotInstance
from providers.provision.base import ProvisionProvider
from providers.runtime.base import RuntimeProvider
from services.platform_activity_service import record_activity_event
class LocalRuntimeProvider(RuntimeProvider):
def __init__(
self,
*,
docker_manager: Any,
on_state_change: Callable[[str, dict], None],
provision_provider: ProvisionProvider,
read_runtime_snapshot: Callable[[BotInstance], Dict[str, Any]],
resolve_env_params: Callable[[str], Dict[str, str]],
write_env_store: Callable[[str, Dict[str, str]], None],
invalidate_bot_cache: Callable[[str], None],
record_agent_loop_ready_warning: Callable[[str], Awaitable[None]],
safe_float: Callable[[Any, float], float],
safe_int: Callable[[Any, int], int],
) -> None:
self._docker_manager = docker_manager
self._on_state_change = on_state_change
self._provision_provider = provision_provider
self._read_runtime_snapshot = read_runtime_snapshot
self._resolve_env_params = resolve_env_params
self._write_env_store = write_env_store
self._invalidate_bot_cache = invalidate_bot_cache
self._record_agent_loop_ready_warning = record_agent_loop_ready_warning
self._safe_float = safe_float
self._safe_int = safe_int
async def start_bot(self, *, session: Session, bot: BotInstance) -> Dict[str, Any]:
bot_id = str(bot.id or "").strip()
if not bot_id:
raise HTTPException(status_code=400, detail="Bot id is required")
if not bool(getattr(bot, "enabled", True)):
raise HTTPException(status_code=403, detail="Bot is disabled. Enable it first.")
self._provision_provider.sync_bot_workspace(session=session, bot_id=bot_id)
runtime_snapshot = self._read_runtime_snapshot(bot)
env_params = self._resolve_env_params(bot_id)
self._write_env_store(bot_id, env_params)
success = self._docker_manager.start_bot(
bot_id,
image_tag=bot.image_tag,
on_state_change=self._on_state_change,
env_vars=env_params,
cpu_cores=self._safe_float(runtime_snapshot.get("cpu_cores"), 1.0),
memory_mb=self._safe_int(runtime_snapshot.get("memory_mb"), 1024),
storage_gb=self._safe_int(runtime_snapshot.get("storage_gb"), 10),
)
if not success:
bot.docker_status = "STOPPED"
session.add(bot)
session.commit()
raise HTTPException(status_code=500, detail=f"Failed to start container with image {bot.image_tag}")
actual_status = self._docker_manager.get_bot_status(bot_id)
bot.docker_status = actual_status
if actual_status != "RUNNING":
session.add(bot)
session.commit()
self._invalidate_bot_cache(bot_id)
raise HTTPException(
status_code=500,
detail="Bot container failed shortly after startup. Check bot logs/config.",
)
asyncio.create_task(self._record_agent_loop_ready_warning(bot_id))
session.add(bot)
record_activity_event(session, bot_id, "bot_started", channel="system", detail=f"Container started for {bot_id}")
session.commit()
self._invalidate_bot_cache(bot_id)
return {"status": "started"}
def stop_bot(self, *, session: Session, bot: BotInstance) -> Dict[str, Any]:
bot_id = str(bot.id or "").strip()
if not bot_id:
raise HTTPException(status_code=400, detail="Bot id is required")
if not bool(getattr(bot, "enabled", True)):
raise HTTPException(status_code=403, detail="Bot is disabled. Enable it first.")
self._docker_manager.stop_bot(bot_id)
bot.docker_status = "STOPPED"
session.add(bot)
record_activity_event(session, bot_id, "bot_stopped", channel="system", detail=f"Container stopped for {bot_id}")
session.commit()
self._invalidate_bot_cache(bot_id)
return {"status": "stopped"}
def deliver_command(self, *, bot_id: str, command: str, media: Optional[List[str]] = None) -> Optional[str]:
success = self._docker_manager.send_command(bot_id, command, media=media)
if success:
return None
return self._docker_manager.get_last_delivery_error(bot_id) or "command delivery failed"
def get_recent_logs(self, *, bot_id: str, tail: int = 300) -> List[str]:
return list(self._docker_manager.get_recent_logs(bot_id, tail=tail) or [])
def ensure_monitor(self, *, bot_id: str) -> bool:
return bool(self._docker_manager.ensure_monitor(bot_id, self._on_state_change))
def get_monitor_packets(self, *, bot_id: str, after_seq: int = 0, limit: int = 200) -> List[Dict[str, Any]]:
return []
def get_runtime_status(self, *, bot_id: str) -> str:
return str(self._docker_manager.get_bot_status(bot_id) or "STOPPED").upper()
def get_resource_snapshot(self, *, bot_id: str) -> Dict[str, Any]:
return dict(self._docker_manager.get_bot_resource_snapshot(bot_id) or {})

View File

@ -0,0 +1,59 @@
from typing import Any
from models.bot import BotInstance
from providers.registry import ProviderRegistry
from providers.provision.base import ProvisionProvider
from providers.runtime.base import RuntimeProvider
from providers.target import resolve_provider_target
from providers.workspace.base import WorkspaceProvider
def _require_provider(value: Any, label: str):
if value is None:
raise RuntimeError(f"{label} is not configured")
return value
def _get_registry(app_state: Any) -> ProviderRegistry | None:
registry = getattr(app_state, "provider_registry", None)
if registry is None:
return None
if not isinstance(registry, ProviderRegistry):
raise RuntimeError("provider registry is misconfigured")
return registry
def get_runtime_provider(app_state: Any, bot: BotInstance) -> RuntimeProvider:
registry = _get_registry(app_state)
if registry is not None:
target = resolve_provider_target(app_state, bot)
bundle_key = registry.resolve_bundle_key(target)
provider = registry.runtime.get(bundle_key) if bundle_key else None
if provider is not None:
return provider
raise RuntimeError(f"runtime provider is not configured for target {target.key}")
return _require_provider(getattr(app_state, "runtime_provider", None), "runtime provider")
def get_workspace_provider(app_state: Any, bot: BotInstance) -> WorkspaceProvider:
registry = _get_registry(app_state)
if registry is not None:
target = resolve_provider_target(app_state, bot)
bundle_key = registry.resolve_bundle_key(target)
provider = registry.workspace.get(bundle_key) if bundle_key else None
if provider is not None:
return provider
raise RuntimeError(f"workspace provider is not configured for target {target.key}")
return _require_provider(getattr(app_state, "workspace_provider", None), "workspace provider")
def get_provision_provider(app_state: Any, bot: BotInstance) -> ProvisionProvider:
registry = _get_registry(app_state)
if registry is not None:
target = resolve_provider_target(app_state, bot)
bundle_key = registry.resolve_bundle_key(target)
provider = registry.provision.get(bundle_key) if bundle_key else None
if provider is not None:
return provider
raise RuntimeError(f"provision provider is not configured for target {target.key}")
return _require_provider(getattr(app_state, "provision_provider", None), "provision provider")

View File

@ -0,0 +1,118 @@
from dataclasses import dataclass
from typing import Any
from models.bot import BotInstance
DEFAULT_NODE_ID = "local"
DEFAULT_TRANSPORT_KIND = "direct"
DEFAULT_RUNTIME_KIND = "docker"
DEFAULT_CORE_ADAPTER = "nanobot"
TARGET_CONFIG_KEY = "dashboardRuntime"
SUPPORTED_TRANSPORT_KINDS = {"direct", "edge"}
SUPPORTED_RUNTIME_KINDS = {"docker", "native"}
def _normalize_target_part(value: Any, fallback: str) -> str:
text = str(value or "").strip().lower()
return text or fallback
@dataclass(frozen=True)
class ProviderTarget:
node_id: str = DEFAULT_NODE_ID
transport_kind: str = DEFAULT_TRANSPORT_KIND
runtime_kind: str = DEFAULT_RUNTIME_KIND
core_adapter: str = DEFAULT_CORE_ADAPTER
@property
def key(self) -> str:
return ":".join([self.node_id, self.transport_kind, self.runtime_kind, self.core_adapter])
def normalize_provider_target(value: Any, fallback: ProviderTarget | None = None) -> ProviderTarget:
base = fallback or ProviderTarget()
if isinstance(value, ProviderTarget):
raw_node_id = value.node_id
raw_transport_kind = value.transport_kind
raw_runtime_kind = value.runtime_kind
raw_core_adapter = value.core_adapter
elif isinstance(value, dict):
raw_node_id = value.get("node_id", value.get("nodeId"))
raw_transport_kind = value.get("transport_kind", value.get("transportKind"))
raw_runtime_kind = value.get("runtime_kind", value.get("runtimeKind"))
raw_core_adapter = value.get("core_adapter", value.get("coreAdapter"))
else:
raw_node_id = None
raw_transport_kind = None
raw_runtime_kind = None
raw_core_adapter = None
transport_kind = _normalize_target_part(raw_transport_kind, base.transport_kind)
if transport_kind not in SUPPORTED_TRANSPORT_KINDS:
transport_kind = base.transport_kind
runtime_kind = _normalize_target_part(raw_runtime_kind, base.runtime_kind)
if runtime_kind not in SUPPORTED_RUNTIME_KINDS:
runtime_kind = base.runtime_kind
return ProviderTarget(
node_id=_normalize_target_part(raw_node_id, base.node_id),
transport_kind=transport_kind,
runtime_kind=runtime_kind,
core_adapter=_normalize_target_part(raw_core_adapter, base.core_adapter),
)
def provider_target_to_dict(target: ProviderTarget) -> dict[str, str]:
return {
"node_id": target.node_id,
"transport_kind": target.transport_kind,
"runtime_kind": target.runtime_kind,
"core_adapter": target.core_adapter,
}
def provider_target_from_config(config_data: Any, fallback: ProviderTarget | None = None) -> ProviderTarget:
if not isinstance(config_data, dict):
return normalize_provider_target(None, fallback=fallback)
return normalize_provider_target(config_data.get(TARGET_CONFIG_KEY), fallback=fallback)
def write_provider_target_config(config_data: dict[str, Any], target: ProviderTarget) -> dict[str, Any]:
config_data[TARGET_CONFIG_KEY] = {
"nodeId": target.node_id,
"transportKind": target.transport_kind,
"runtimeKind": target.runtime_kind,
"coreAdapter": target.core_adapter,
}
return config_data
def resolve_provider_target(app_state: Any, bot: BotInstance) -> ProviderTarget:
fallback = ProviderTarget(
node_id=_normalize_target_part(getattr(app_state, "provider_default_node_id", None), DEFAULT_NODE_ID),
transport_kind=_normalize_target_part(
getattr(app_state, "provider_default_transport_kind", None),
DEFAULT_TRANSPORT_KIND,
),
runtime_kind=_normalize_target_part(
getattr(app_state, "provider_default_runtime_kind", None),
DEFAULT_RUNTIME_KIND,
),
core_adapter=_normalize_target_part(
getattr(app_state, "provider_default_core_adapter", None),
DEFAULT_CORE_ADAPTER,
),
)
resolver = getattr(app_state, "resolve_bot_provider_target", None)
if callable(resolver):
return normalize_provider_target(resolver(bot), fallback=fallback)
return normalize_provider_target(
{
"node_id": getattr(bot, "node_id", None),
"transport_kind": getattr(bot, "transport_kind", None),
"runtime_kind": getattr(bot, "runtime_kind", None),
"core_adapter": getattr(bot, "core_adapter", None),
},
fallback=fallback,
)

View File

@ -15,5 +15,7 @@ watchfiles==0.21.0
urllib3==1.26.18 urllib3==1.26.18
requests==2.31.0 requests==2.31.0
redis==5.0.8 redis==5.0.8
bcrypt==4.2.1
PyJWT==2.10.1
opencc-purepy==1.1.0 opencc-purepy==1.1.0
pywhispercpp==1.3.1 pywhispercpp==1.3.1

View File

@ -0,0 +1,122 @@
from typing import Any, Dict, List, Optional
from pydantic import BaseModel
class ChannelConfigRequest(BaseModel):
channel_type: str
external_app_id: Optional[str] = None
app_secret: Optional[str] = None
internal_port: Optional[int] = None
is_active: bool = True
extra_config: Optional[Dict[str, Any]] = None
class ChannelConfigUpdateRequest(BaseModel):
channel_type: Optional[str] = None
external_app_id: Optional[str] = None
app_secret: Optional[str] = None
internal_port: Optional[int] = None
is_active: Optional[bool] = None
extra_config: Optional[Dict[str, Any]] = None
class BotCreateRequest(BaseModel):
id: str
name: str
enabled: Optional[bool] = True
llm_provider: str
llm_model: str
api_key: str
image_tag: Optional[str] = None
system_prompt: Optional[str] = None
api_base: Optional[str] = None
temperature: float = 0.2
top_p: float = 1.0
max_tokens: int = 8192
cpu_cores: float = 1.0
memory_mb: int = 1024
storage_gb: int = 10
system_timezone: Optional[str] = None
soul_md: Optional[str] = None
agents_md: Optional[str] = None
user_md: Optional[str] = None
tools_md: Optional[str] = None
tools_config: Optional[Dict[str, Any]] = None
env_params: Optional[Dict[str, str]] = None
identity_md: Optional[str] = None
channels: Optional[List[ChannelConfigRequest]] = None
send_progress: Optional[bool] = None
send_tool_hints: Optional[bool] = None
node_id: Optional[str] = None
transport_kind: Optional[str] = None
runtime_kind: Optional[str] = None
core_adapter: Optional[str] = None
class BotUpdateRequest(BaseModel):
name: Optional[str] = None
enabled: Optional[bool] = None
llm_provider: Optional[str] = None
llm_model: Optional[str] = None
api_key: Optional[str] = None
api_base: Optional[str] = None
image_tag: Optional[str] = None
system_prompt: Optional[str] = None
temperature: Optional[float] = None
top_p: Optional[float] = None
max_tokens: Optional[int] = None
cpu_cores: Optional[float] = None
memory_mb: Optional[int] = None
storage_gb: Optional[int] = None
system_timezone: Optional[str] = None
soul_md: Optional[str] = None
agents_md: Optional[str] = None
user_md: Optional[str] = None
tools_md: Optional[str] = None
tools_config: Optional[Dict[str, Any]] = None
env_params: Optional[Dict[str, str]] = None
identity_md: Optional[str] = None
send_progress: Optional[bool] = None
send_tool_hints: Optional[bool] = None
node_id: Optional[str] = None
transport_kind: Optional[str] = None
runtime_kind: Optional[str] = None
core_adapter: Optional[str] = None
class BotDeployRequest(BaseModel):
node_id: str
runtime_kind: Optional[str] = None
image_tag: Optional[str] = None
auto_start: bool = False
class BotToolsConfigUpdateRequest(BaseModel):
tools_config: Optional[Dict[str, Any]] = None
class BotMcpConfigUpdateRequest(BaseModel):
mcp_servers: Optional[Dict[str, Any]] = None
class BotEnvParamsUpdateRequest(BaseModel):
env_params: Optional[Dict[str, str]] = None
class CommandRequest(BaseModel):
command: Optional[str] = None
attachments: Optional[List[str]] = None
class MessageFeedbackRequest(BaseModel):
feedback: Optional[str] = None
class WorkspaceFileUpdateRequest(BaseModel):
content: str
class SystemTemplatesUpdateRequest(BaseModel):
agent_md_templates: Optional[Dict[str, str]] = None
topic_presets: Optional[Dict[str, Any]] = None

View File

@ -0,0 +1,128 @@
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
NODE_PROTOCOL_VERSION = "1"
class EdgeNodeIdentityBase(BaseModel):
protocol_version: str = NODE_PROTOCOL_VERSION
node_id: str
display_name: str
service: str = "dashboard-edge"
transport_kind: str = "edge"
runtime_kind: str = "docker"
core_adapter: str = "nanobot"
class EdgeStatusResponse(BaseModel):
status: str
class EdgeStateWriteRequest(BaseModel):
data: Dict[str, Any] = Field(default_factory=dict)
workspace_root: Optional[str] = None
class EdgeStateResponse(BaseModel):
bot_id: str
state_key: str
data: Dict[str, Any] = Field(default_factory=dict)
class EdgeNativePreflightRequest(BaseModel):
native_command: Optional[str] = None
native_workdir: Optional[str] = None
class EdgeNativePreflightResponse(BaseModel):
ok: bool = False
command: List[str] = Field(default_factory=list)
workdir: str = ""
command_available: bool = False
workdir_exists: bool = False
detail: str = ""
class EdgeStartBotRequest(BaseModel):
image_tag: str
runtime_kind: str = "docker"
env_vars: Dict[str, str] = Field(default_factory=dict)
workspace_root: Optional[str] = None
native_command: Optional[str] = None
native_workdir: Optional[str] = None
cpu_cores: float = 1.0
memory_mb: int = 1024
storage_gb: int = 10
class EdgeCommandRequest(BaseModel):
command: str
media: List[str] = Field(default_factory=list)
class EdgeLogsResponse(BaseModel):
bot_id: str
logs: List[str] = Field(default_factory=list)
class EdgeMonitorEnsureResponse(BaseModel):
ensured: bool = False
class EdgeMonitorPacket(BaseModel):
protocol_version: str = NODE_PROTOCOL_VERSION
node_id: str = ""
bot_id: str = ""
seq: int = 0
captured_at: str = ""
packet: Dict[str, Any] = Field(default_factory=dict)
class EdgeMonitorPacketsResponse(BaseModel):
protocol_version: str = NODE_PROTOCOL_VERSION
node_id: str = ""
bot_id: str
latest_seq: int = 0
packets: List[EdgeMonitorPacket] = Field(default_factory=list)
class EdgeWorkspaceSyncRequest(BaseModel):
channels_override: Optional[List[Dict[str, Any]]] = None
global_delivery_override: Optional[Dict[str, Any]] = None
runtime_overrides: Optional[Dict[str, Any]] = None
class EdgeMarkdownWriteRequest(BaseModel):
content: str = ""
class EdgeNodeResourcesResponse(BaseModel):
protocol_version: str = NODE_PROTOCOL_VERSION
node_id: str
display_name: str = ""
service: str = "dashboard-edge"
transport_kind: str = "edge"
runtime_kind: str = "docker"
core_adapter: str = "nanobot"
resources: Dict[str, Any] = Field(default_factory=dict)
reported_at: str = ""
class EdgeNodeSelfResponse(BaseModel):
protocol_version: str = NODE_PROTOCOL_VERSION
node_id: str
display_name: str
service: str = "dashboard-edge"
transport_kind: str = "edge"
runtime_kind: str = "docker"
core_adapter: str = "nanobot"
capabilities: Dict[str, Any] = Field(default_factory=dict)
resources: Dict[str, Any] = Field(default_factory=dict)
reported_at: str = ""
class EdgeNodeHeartbeatResponse(EdgeNodeIdentityBase):
capabilities: Dict[str, Any] = Field(default_factory=dict)
resources: Dict[str, Any] = Field(default_factory=dict)
reported_at: str = ""

View File

@ -75,6 +75,36 @@ class PlatformActivityItem(BaseModel):
created_at: str created_at: str
class PlatformActivityListResponse(BaseModel):
items: List[PlatformActivityItem] = Field(default_factory=list)
total: int = 0
limit: int = 20
offset: int = 0
has_more: bool = False
class PlatformDashboardUsagePoint(BaseModel):
bucket_at: str
label: str
call_count: int = 0
class PlatformDashboardUsageSeries(BaseModel):
model: str
total_calls: int = 0
points: List[PlatformDashboardUsagePoint] = Field(default_factory=list)
class PlatformDashboardAnalyticsResponse(BaseModel):
total_request_count: int = 0
total_model_count: int = 0
granularity: str = "day"
since_days: int = 7
events_page_size: int = 20
series: List[PlatformDashboardUsageSeries] = Field(default_factory=list)
recent_events: List[PlatformActivityItem] = Field(default_factory=list)
class SystemSettingPayload(BaseModel): class SystemSettingPayload(BaseModel):
key: str key: str
name: str = "" name: str = ""
@ -97,3 +127,39 @@ class SystemSettingItem(BaseModel):
sort_order: int = 100 sort_order: int = 100
created_at: str created_at: str
updated_at: str updated_at: str
class ManagedNodePayload(BaseModel):
node_id: str
display_name: str = ""
base_url: str = ""
enabled: bool = True
auth_token: str = ""
transport_kind: str = "edge"
runtime_kind: str = "docker"
core_adapter: str = "nanobot"
workspace_root: str = ""
native_command: str = ""
native_workdir: str = ""
native_sandbox_mode: str = "inherit"
class ManagedNodeConnectivityResult(BaseModel):
ok: bool
status: str
latency_ms: int = 0
detail: str = ""
node_self: Optional[Dict[str, Any]] = None
class ManagedNodeNativePreflightResult(BaseModel):
ok: bool
status: str
latency_ms: int = 0
detail: str = ""
command: List[str] = Field(default_factory=list)
workdir: str = ""
command_available: bool = False
workdir_exists: bool = False
runtime_native_supported: bool = False
node_self: Optional[Dict[str, Any]] = None

View File

@ -0,0 +1,153 @@
from typing import List, Optional
from pydantic import BaseModel, Field
class SysAuthLoginRequest(BaseModel):
username: str
password: str
class SysAuthMenuItem(BaseModel):
menu_key: str
parent_key: str = ""
title: str
title_en: str = ""
menu_type: str = "item"
route_path: str = ""
icon: str = ""
permission_key: str = ""
sort_order: int = 100
children: List["SysAuthMenuItem"] = Field(default_factory=list)
class SysAuthRolePayload(BaseModel):
id: int = 0
role_key: str
name: str
class SysAuthUserPayload(BaseModel):
id: int
username: str
display_name: str
role: Optional[SysAuthRolePayload] = None
class SysAssignedBotPayload(BaseModel):
id: str
name: str
enabled: bool = True
node_id: str = ""
node_display_name: str = ""
docker_status: str = "STOPPED"
image_tag: str = ""
class SysRoleSummaryResponse(BaseModel):
id: int
role_key: str
name: str
description: str = ""
is_active: bool = True
sort_order: int = 100
user_count: int = 0
menu_keys: List[str] = Field(default_factory=list)
permission_keys: List[str] = Field(default_factory=list)
class SysRoleListResponse(BaseModel):
items: List[SysRoleSummaryResponse] = Field(default_factory=list)
class SysRoleUpsertRequest(BaseModel):
role_key: str
name: str
description: str = ""
is_active: bool = True
sort_order: int = 100
menu_keys: List[str] = Field(default_factory=list)
permission_keys: List[str] = Field(default_factory=list)
class SysRoleGrantMenuItem(BaseModel):
menu_key: str
parent_key: str = ""
title: str
title_en: str = ""
menu_type: str = "item"
route_path: str = ""
icon: str = ""
sort_order: int = 100
children: List["SysRoleGrantMenuItem"] = Field(default_factory=list)
class SysPermissionSummaryResponse(BaseModel):
id: int
permission_key: str
name: str
menu_key: str = ""
action: str = "view"
description: str = ""
sort_order: int = 100
class SysRoleGrantBootstrapResponse(BaseModel):
menus: List[SysRoleGrantMenuItem] = Field(default_factory=list)
permissions: List[SysPermissionSummaryResponse] = Field(default_factory=list)
class SysUserSummaryResponse(BaseModel):
id: int
username: str
display_name: str
is_active: bool = True
last_login_at: Optional[str] = None
role: Optional[SysAuthRolePayload] = None
bot_ids: List[str] = Field(default_factory=list)
class SysUserListResponse(BaseModel):
items: List[SysUserSummaryResponse] = Field(default_factory=list)
class SysUserCreateRequest(BaseModel):
username: str
display_name: str
password: str
role_id: int
is_active: bool = True
bot_ids: List[str] = Field(default_factory=list)
class SysUserUpdateRequest(BaseModel):
display_name: str
password: str = ""
role_id: int
is_active: bool = True
bot_ids: List[str] = Field(default_factory=list)
class SysProfileUpdateRequest(BaseModel):
display_name: str
password: str = ""
class SysAuthBootstrapResponse(BaseModel):
token: str = ""
expires_at: Optional[str] = None
user: SysAuthUserPayload
menus: List[SysAuthMenuItem] = Field(default_factory=list)
permissions: List[str] = Field(default_factory=list)
home_path: str = "/dashboard"
assigned_bots: List[SysAssignedBotPayload] = Field(default_factory=list)
class SysAuthStatusResponse(BaseModel):
enabled: bool = True
user_count: int = 0
default_username: str = "admin"
SysAuthMenuItem.model_rebuild()
SysRoleGrantMenuItem.model_rebuild()

View File

@ -0,0 +1,165 @@
import asyncio
from typing import Any, Callable
from fastapi import HTTPException, WebSocket, WebSocketDisconnect
from sqlmodel import Session, select
from models.bot import BotInstance
from models.platform import BotRequestUsage
class AppLifecycleService:
def __init__(
self,
*,
app: Any,
engine: Any,
cache: Any,
logger: Any,
project_root: str,
database_engine: str,
database_echo: Any,
database_url_display: str,
redis_enabled: bool,
init_database: Callable[[], None],
node_registry_service: Any,
local_managed_node: Callable[[], Any],
prune_expired_activity_events: Callable[..., int],
migrate_bot_resources_store: Callable[[str], None],
resolve_bot_provider_target_for_instance: Callable[[Any], Any],
default_provider_target: Callable[[], Any],
set_bot_provider_target: Callable[[str, Any], None],
apply_provider_target_to_bot: Callable[[Any, Any], None],
normalize_provider_target: Callable[[Any], Any],
runtime_service: Any,
runtime_event_service: Any,
clear_provider_target_overrides: Callable[[], None],
) -> None:
self._app = app
self._engine = engine
self._cache = cache
self._logger = logger
self._project_root = project_root
self._database_engine = database_engine
self._database_echo = database_echo
self._database_url_display = database_url_display
self._redis_enabled = redis_enabled
self._init_database = init_database
self._node_registry_service = node_registry_service
self._local_managed_node = local_managed_node
self._prune_expired_activity_events = prune_expired_activity_events
self._migrate_bot_resources_store = migrate_bot_resources_store
self._resolve_bot_provider_target_for_instance = resolve_bot_provider_target_for_instance
self._default_provider_target = default_provider_target
self._set_bot_provider_target = set_bot_provider_target
self._apply_provider_target_to_bot = apply_provider_target_to_bot
self._normalize_provider_target = normalize_provider_target
self._runtime_service = runtime_service
self._runtime_event_service = runtime_event_service
self._clear_provider_target_overrides = clear_provider_target_overrides
async def on_startup(self) -> None:
self._app.state.main_loop = asyncio.get_running_loop()
self._clear_provider_target_overrides()
self._logger.info(
"startup project_root=%s db_engine=%s db_echo=%s db_url=%s redis=%s",
self._project_root,
self._database_engine,
self._database_echo,
self._database_url_display,
"enabled" if self._cache.ping() else ("disabled" if self._redis_enabled else "not_configured"),
)
self._init_database()
self._cache.delete_prefix("")
with Session(self._engine) as session:
self._node_registry_service.load_from_session(session)
self._node_registry_service.upsert_node(session, self._local_managed_node())
pruned_events = self._prune_expired_activity_events(session, force=True)
if pruned_events > 0:
session.commit()
target_dirty = False
for bot in session.exec(select(BotInstance)).all():
self._migrate_bot_resources_store(bot.id)
target = self._resolve_bot_provider_target_for_instance(bot)
if str(target.transport_kind or "").strip().lower() != "edge":
target = self._normalize_provider_target(
{
"node_id": target.node_id,
"transport_kind": "edge",
"runtime_kind": target.runtime_kind,
"core_adapter": target.core_adapter,
},
fallback=self._default_provider_target(),
)
self._set_bot_provider_target(bot.id, target)
if (
str(getattr(bot, "node_id", "") or "").strip().lower() != target.node_id
or str(getattr(bot, "transport_kind", "") or "").strip().lower() != target.transport_kind
or str(getattr(bot, "runtime_kind", "") or "").strip().lower() != target.runtime_kind
or str(getattr(bot, "core_adapter", "") or "").strip().lower() != target.core_adapter
):
self._apply_provider_target_to_bot(bot, target)
session.add(bot)
target_dirty = True
if target_dirty:
session.commit()
running_bots = session.exec(select(BotInstance).where(BotInstance.docker_status == "RUNNING")).all()
for bot in running_bots:
try:
self._runtime_service.ensure_monitor(app_state=self._app.state, bot=bot)
pending_usage = session.exec(
select(BotRequestUsage)
.where(BotRequestUsage.bot_id == str(bot.id or "").strip())
.where(BotRequestUsage.status == "PENDING")
.order_by(BotRequestUsage.started_at.desc(), BotRequestUsage.id.desc())
.limit(1)
).first()
if pending_usage and str(getattr(pending_usage, "request_id", "") or "").strip():
self._runtime_service.sync_edge_monitor_packets(
app_state=self._app.state,
bot=bot,
request_id=str(pending_usage.request_id or "").strip(),
)
except HTTPException as exc:
self._logger.warning(
"Skip runtime monitor restore on startup for bot_id=%s due to unavailable runtime backend: %s",
str(bot.id or ""),
str(getattr(exc, "detail", "") or exc),
)
except Exception:
self._logger.exception("Failed to restore runtime monitor on startup for bot_id=%s", str(bot.id or ""))
async def handle_websocket(self, websocket: WebSocket, bot_id: str) -> None:
with Session(self._engine) as session:
bot = session.get(BotInstance, bot_id)
if not bot:
await websocket.close(code=4404, reason="Bot not found")
return
connected = False
try:
await self._runtime_event_service.manager.connect(bot_id, websocket)
connected = True
except Exception as exc:
self._logger.warning("websocket connect failed bot_id=%s detail=%s", bot_id, exc)
try:
await websocket.close(code=1011, reason="WebSocket accept failed")
except Exception:
pass
return
self._runtime_service.ensure_monitor(app_state=websocket.app.state, bot=bot)
try:
while True:
await websocket.receive_text()
except WebSocketDisconnect:
pass
except RuntimeError as exc:
msg = str(exc or "").lower()
if "need to call \"accept\" first" not in msg and "not connected" not in msg:
self._logger.exception("websocket runtime error bot_id=%s", bot_id)
except Exception:
self._logger.exception("websocket unexpected error bot_id=%s", bot_id)
finally:
if connected:
self._runtime_event_service.manager.disconnect(bot_id, websocket)

View File

@ -0,0 +1,175 @@
from typing import Any, Callable, Dict, List
from fastapi import HTTPException
from sqlmodel import Session
from models.bot import BotInstance
ReadBotConfig = Callable[[str], Dict[str, Any]]
WriteBotConfig = Callable[[str, Dict[str, Any]], None]
SyncBotWorkspace = Callable[[Session, BotInstance], None]
InvalidateBotCache = Callable[[str], None]
GetBotChannels = Callable[[BotInstance], List[Dict[str, Any]]]
NormalizeChannelExtra = Callable[[Any], Dict[str, Any]]
ChannelApiToCfg = Callable[[Dict[str, Any]], Dict[str, Any]]
ReadGlobalDeliveryFlags = Callable[[Any], tuple[bool, bool]]
class BotChannelService:
def __init__(
self,
*,
read_bot_config: ReadBotConfig,
write_bot_config: WriteBotConfig,
sync_bot_workspace_via_provider: SyncBotWorkspace,
invalidate_bot_detail_cache: InvalidateBotCache,
get_bot_channels_from_config: GetBotChannels,
normalize_channel_extra: NormalizeChannelExtra,
channel_api_to_cfg: ChannelApiToCfg,
read_global_delivery_flags: ReadGlobalDeliveryFlags,
) -> None:
self._read_bot_config = read_bot_config
self._write_bot_config = write_bot_config
self._sync_bot_workspace_via_provider = sync_bot_workspace_via_provider
self._invalidate_bot_detail_cache = invalidate_bot_detail_cache
self._get_bot_channels_from_config = get_bot_channels_from_config
self._normalize_channel_extra = normalize_channel_extra
self._channel_api_to_cfg = channel_api_to_cfg
self._read_global_delivery_flags = read_global_delivery_flags
def _require_bot(self, *, session: Session, bot_id: str) -> BotInstance:
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return bot
def list_channels(self, *, session: Session, bot_id: str) -> List[Dict[str, Any]]:
bot = self._require_bot(session=session, bot_id=bot_id)
return self._get_bot_channels_from_config(bot)
def create_channel(self, *, session: Session, bot_id: str, payload: Any) -> Dict[str, Any]:
bot = self._require_bot(session=session, bot_id=bot_id)
ctype = str(getattr(payload, "channel_type", "") or "").strip().lower()
if not ctype:
raise HTTPException(status_code=400, detail="channel_type is required")
if ctype == "dashboard":
raise HTTPException(status_code=400, detail="dashboard channel is built-in and cannot be created manually")
current_rows = self._get_bot_channels_from_config(bot)
if any(str(row.get("channel_type") or "").lower() == ctype for row in current_rows):
raise HTTPException(status_code=400, detail=f"Channel already exists: {ctype}")
new_row = {
"id": ctype,
"bot_id": bot_id,
"channel_type": ctype,
"external_app_id": str(getattr(payload, "external_app_id", "") or "").strip() or f"{ctype}-{bot_id}",
"app_secret": str(getattr(payload, "app_secret", "") or "").strip(),
"internal_port": max(1, min(int(getattr(payload, "internal_port", 8080) or 8080), 65535)),
"is_active": bool(getattr(payload, "is_active", True)),
"extra_config": self._normalize_channel_extra(getattr(payload, "extra_config", None)),
"locked": False,
}
config_data = self._read_bot_config(bot_id)
channels_cfg = config_data.get("channels")
if not isinstance(channels_cfg, dict):
channels_cfg = {}
config_data["channels"] = channels_cfg
channels_cfg[ctype] = self._channel_api_to_cfg(new_row)
self._write_bot_config(bot_id, config_data)
self._sync_bot_workspace_via_provider(session, bot)
self._invalidate_bot_detail_cache(bot_id)
return new_row
def update_channel(
self,
*,
session: Session,
bot_id: str,
channel_id: str,
payload: Any,
) -> Dict[str, Any]:
bot = self._require_bot(session=session, bot_id=bot_id)
channel_key = str(channel_id or "").strip().lower()
rows = self._get_bot_channels_from_config(bot)
row = next((r for r in rows if str(r.get("id") or "").lower() == channel_key), None)
if not row:
raise HTTPException(status_code=404, detail="Channel not found")
if str(row.get("channel_type") or "").strip().lower() == "dashboard" or bool(row.get("locked")):
raise HTTPException(status_code=400, detail="dashboard channel is built-in and cannot be modified")
update_data = payload.model_dump(exclude_unset=True)
existing_type = str(row.get("channel_type") or "").strip().lower()
new_type = existing_type
if "channel_type" in update_data and update_data["channel_type"] is not None:
new_type = str(update_data["channel_type"]).strip().lower()
if not new_type:
raise HTTPException(status_code=400, detail="channel_type cannot be empty")
if existing_type == "dashboard" and new_type != "dashboard":
raise HTTPException(status_code=400, detail="dashboard channel type cannot be changed")
if new_type != existing_type and any(str(r.get("channel_type") or "").lower() == new_type for r in rows):
raise HTTPException(status_code=400, detail=f"Channel already exists: {new_type}")
if "external_app_id" in update_data and update_data["external_app_id"] is not None:
row["external_app_id"] = str(update_data["external_app_id"]).strip()
if "app_secret" in update_data and update_data["app_secret"] is not None:
row["app_secret"] = str(update_data["app_secret"]).strip()
if "internal_port" in update_data and update_data["internal_port"] is not None:
row["internal_port"] = max(1, min(int(update_data["internal_port"]), 65535))
if "is_active" in update_data and update_data["is_active"] is not None:
next_active = bool(update_data["is_active"])
if existing_type == "dashboard" and not next_active:
raise HTTPException(status_code=400, detail="dashboard channel must remain enabled")
row["is_active"] = next_active
if "extra_config" in update_data:
row["extra_config"] = self._normalize_channel_extra(update_data.get("extra_config"))
row["channel_type"] = new_type
row["id"] = new_type
row["locked"] = new_type == "dashboard"
config_data = self._read_bot_config(bot_id)
channels_cfg = config_data.get("channels")
if not isinstance(channels_cfg, dict):
channels_cfg = {}
config_data["channels"] = channels_cfg
current_send_progress, current_send_tool_hints = self._read_global_delivery_flags(channels_cfg)
if new_type == "dashboard":
extra = self._normalize_channel_extra(row.get("extra_config"))
channels_cfg["sendProgress"] = bool(extra.get("sendProgress", current_send_progress))
channels_cfg["sendToolHints"] = bool(extra.get("sendToolHints", current_send_tool_hints))
else:
channels_cfg["sendProgress"] = current_send_progress
channels_cfg["sendToolHints"] = current_send_tool_hints
channels_cfg.pop("dashboard", None)
if existing_type != "dashboard" and existing_type in channels_cfg and existing_type != new_type:
channels_cfg.pop(existing_type, None)
if new_type != "dashboard":
channels_cfg[new_type] = self._channel_api_to_cfg(row)
self._write_bot_config(bot_id, config_data)
session.commit()
self._sync_bot_workspace_via_provider(session, bot)
self._invalidate_bot_detail_cache(bot_id)
return row
def delete_channel(self, *, session: Session, bot_id: str, channel_id: str) -> Dict[str, Any]:
bot = self._require_bot(session=session, bot_id=bot_id)
channel_key = str(channel_id or "").strip().lower()
rows = self._get_bot_channels_from_config(bot)
row = next((r for r in rows if str(r.get("id") or "").lower() == channel_key), None)
if not row:
raise HTTPException(status_code=404, detail="Channel not found")
if str(row.get("channel_type") or "").lower() == "dashboard":
raise HTTPException(status_code=400, detail="dashboard channel cannot be deleted")
config_data = self._read_bot_config(bot_id)
channels_cfg = config_data.get("channels")
if not isinstance(channels_cfg, dict):
channels_cfg = {}
config_data["channels"] = channels_cfg
channels_cfg.pop(str(row.get("channel_type") or "").lower(), None)
self._write_bot_config(bot_id, config_data)
session.commit()
self._sync_bot_workspace_via_provider(session, bot)
self._invalidate_bot_detail_cache(bot_id)
return {"status": "deleted"}

View File

@ -0,0 +1,333 @@
import asyncio
import os
import threading
import time
from datetime import datetime, timezone
from typing import Any, Callable, Dict, List, Optional
from fastapi import HTTPException
from sqlmodel import Session
from models.bot import BotInstance
from providers.runtime.base import RuntimeProvider
class BotCommandService:
def __init__(
self,
*,
read_runtime_snapshot: Callable[[BotInstance], Dict[str, Any]],
normalize_media_list: Callable[[Any, str], List[str]],
resolve_workspace_path: Callable[[str, Optional[str]], tuple[str, str]],
is_visual_attachment_path: Callable[[str], bool],
is_video_attachment_path: Callable[[str], bool],
create_usage_request: Callable[..., str],
record_activity_event: Callable[..., None],
fail_latest_usage: Callable[[Session, str, str], None],
persist_runtime_packet: Callable[[str, Dict[str, Any]], Optional[int]],
get_main_loop: Callable[[Any], Any],
broadcast_packet: Callable[[str, Dict[str, Any], Any], None],
) -> None:
self._read_runtime_snapshot = read_runtime_snapshot
self._normalize_media_list = normalize_media_list
self._resolve_workspace_path = resolve_workspace_path
self._is_visual_attachment_path = is_visual_attachment_path
self._is_video_attachment_path = is_video_attachment_path
self._create_usage_request = create_usage_request
self._record_activity_event = record_activity_event
self._fail_latest_usage = fail_latest_usage
self._persist_runtime_packet = persist_runtime_packet
self._get_main_loop = get_main_loop
self._broadcast_packet = broadcast_packet
self._monitor_sync_threads: Dict[tuple[str, str], threading.Thread] = {}
self._monitor_sync_lock = threading.Lock()
self._monitor_sync_seq_lock = threading.Lock()
self._monitor_sync_last_seq: Dict[str, int] = {}
def execute(
self,
*,
session: Session,
bot_id: str,
bot: BotInstance,
payload: Any,
runtime_provider: RuntimeProvider,
app_state: Any,
) -> Dict[str, Any]:
runtime_snapshot = self._read_runtime_snapshot(bot)
attachments = self._normalize_media_list(getattr(payload, "attachments", None), bot_id)
command = str(getattr(payload, "command", None) or "").strip()
if not command and not attachments:
raise HTTPException(status_code=400, detail="Command or attachments is required")
checked_attachments: List[str] = []
transport_kind = str(getattr(bot, "transport_kind", "") or "").strip().lower()
for rel in attachments:
if transport_kind != "edge":
_, target = self._resolve_workspace_path(bot_id, rel)
if not os.path.isfile(target):
raise HTTPException(status_code=400, detail=f"attachment not found: {rel}")
checked_attachments.append(rel)
delivery_media = [f"/root/.nanobot/workspace/{p.lstrip('/')}" for p in checked_attachments]
display_command = command if command else "[attachment message]"
delivery_command = self._build_delivery_command(command=command, checked_attachments=checked_attachments)
request_id = self._create_usage_request(
session,
bot_id,
display_command,
attachments=checked_attachments,
channel="dashboard",
metadata={"attachment_count": len(checked_attachments)},
provider=str(runtime_snapshot.get("llm_provider") or "").strip() or None,
model=str(runtime_snapshot.get("llm_model") or "").strip() or None,
)
self._record_activity_event(
session,
bot_id,
"command_submitted",
request_id=request_id,
channel="dashboard",
detail="command submitted",
metadata={
"attachment_count": len(checked_attachments),
"has_text": bool(command),
},
)
session.commit()
outbound_user_packet: Optional[Dict[str, Any]] = None
if display_command or checked_attachments:
outbound_user_packet = {
"type": "USER_COMMAND",
"channel": "dashboard",
"text": display_command,
"media": checked_attachments,
"request_id": request_id,
}
self._persist_runtime_packet(bot_id, outbound_user_packet)
loop = self._get_main_loop(app_state)
if loop and loop.is_running() and outbound_user_packet:
self._broadcast_packet(bot_id, outbound_user_packet, loop)
detail = runtime_provider.deliver_command(bot_id=bot_id, command=delivery_command, media=delivery_media)
if detail is not None:
self._fail_latest_usage(session, bot_id, detail or "command delivery failed")
self._record_activity_event(
session,
bot_id,
"command_failed",
request_id=request_id,
channel="dashboard",
detail=(detail or "command delivery failed")[:400],
)
session.commit()
if loop and loop.is_running():
self._broadcast_packet(
bot_id,
{
"type": "AGENT_STATE",
"channel": "dashboard",
"payload": {
"state": "ERROR",
"action_msg": detail or "command delivery failed",
},
},
loop,
)
raise HTTPException(
status_code=502,
detail=f"Failed to deliver command to bot dashboard channel{': ' + detail if detail else ''}",
)
self._maybe_sync_edge_monitor_packets(
runtime_provider=runtime_provider,
bot_id=bot_id,
request_id=request_id,
after_seq=self._resolve_monitor_baseline_seq(runtime_provider, bot_id),
app_state=app_state,
)
return {"success": True}
def _maybe_sync_edge_monitor_packets(
self,
*,
runtime_provider: RuntimeProvider,
bot_id: str,
request_id: str,
after_seq: int,
app_state: Any,
) -> None:
provider_name = runtime_provider.__class__.__name__.strip().lower()
if provider_name != "edgeruntimeprovider":
return
bot_key = str(bot_id or "").strip()
if not bot_key:
return
request_key = str(request_id or "").strip() or f"seq:{int(after_seq or 0)}"
thread_key = (bot_key, request_key)
with self._monitor_sync_lock:
existing = self._monitor_sync_threads.get(thread_key)
if existing and existing.is_alive():
return
thread = threading.Thread(
target=self._sync_edge_monitor_packets,
args=(runtime_provider, bot_key, request_id, after_seq, app_state),
daemon=True,
)
self._monitor_sync_threads[thread_key] = thread
thread.start()
def sync_edge_monitor_packets(
self,
*,
runtime_provider: RuntimeProvider,
bot_id: str,
request_id: str,
app_state: Any,
) -> None:
self._maybe_sync_edge_monitor_packets(
runtime_provider=runtime_provider,
bot_id=bot_id,
request_id=request_id,
after_seq=0,
app_state=app_state,
)
def _sync_edge_monitor_packets(
self,
runtime_provider: RuntimeProvider,
bot_id: str,
request_id: str,
after_seq: int,
app_state: Any,
) -> None:
loop = self._get_main_loop(app_state)
last_seq = max(0, int(after_seq or 0))
deadline = time.monotonic() + 18.0
request_id_norm = str(request_id or "").strip()
try:
while time.monotonic() < deadline:
try:
rows = runtime_provider.get_monitor_packets(bot_id=bot_id, after_seq=last_seq, limit=200)
except Exception:
time.sleep(0.5)
continue
for row in rows or []:
try:
seq = int(row.get("seq") or 0)
except Exception:
seq = 0
packet = dict(row.get("packet") or {})
if not packet:
continue
packet_type = str(packet.get("type") or "").strip().upper()
packet_request_id = str(packet.get("request_id") or "").strip()
if packet_type == "USER_COMMAND":
continue
if packet_type in {"ASSISTANT_MESSAGE", "BUS_EVENT"} and request_id_norm and packet_request_id and packet_request_id != request_id_norm:
continue
if not self._mark_monitor_seq(bot_id, seq):
continue
last_seq = max(last_seq, seq)
self._persist_runtime_packet(bot_id, packet)
if loop and loop.is_running():
self._broadcast_packet(bot_id, packet, loop)
time.sleep(0.5)
finally:
with self._monitor_sync_lock:
request_key = request_id_norm or f"seq:{int(after_seq or 0)}"
existing = self._monitor_sync_threads.get((bot_id, request_key))
if existing is threading.current_thread():
self._monitor_sync_threads.pop((bot_id, request_key), None)
def _resolve_monitor_baseline_seq(self, runtime_provider: RuntimeProvider, bot_id: str) -> int:
try:
rows = runtime_provider.get_monitor_packets(bot_id=bot_id, after_seq=0, limit=1000)
except Exception:
return self._get_monitor_seq(bot_id)
latest_seq = 0
for row in rows or []:
try:
seq = int(row.get("seq") or 0)
except Exception:
seq = 0
latest_seq = max(latest_seq, seq)
return max(latest_seq, self._get_monitor_seq(bot_id))
def _mark_monitor_seq(self, bot_id: str, seq: int) -> bool:
if seq <= 0:
return False
bot_key = str(bot_id or "").strip()
with self._monitor_sync_seq_lock:
current = int(self._monitor_sync_last_seq.get(bot_key, 0) or 0)
if seq <= current:
return False
self._monitor_sync_last_seq[bot_key] = seq
return True
def _get_monitor_seq(self, bot_id: str) -> int:
bot_key = str(bot_id or "").strip()
with self._monitor_sync_seq_lock:
return int(self._monitor_sync_last_seq.get(bot_key, 0) or 0)
def _build_delivery_command(self, *, command: str, checked_attachments: List[str]) -> str:
display_command = command if command else "[attachment message]"
delivery_command = display_command
if not checked_attachments:
return delivery_command
attachment_block = "\n".join(f"- {p}" for p in checked_attachments)
all_visual = all(self._is_visual_attachment_path(p) for p in checked_attachments)
if all_visual:
has_video = any(self._is_video_attachment_path(p) for p in checked_attachments)
media_label = "图片/视频" if has_video else "图片"
capability_hint = (
"1) 附件已随请求附带;图片在可用时可直接作为多模态输入理解,视频请按附件路径处理。\n"
if has_video
else "1) 附件中的图片已作为多模态输入提供,优先直接理解并回答。\n"
)
if command:
return (
f"{command}\n\n"
"[Attached files]\n"
f"{attachment_block}\n\n"
"【附件处理要求】\n"
f"{capability_hint}"
"2) 若当前模型或接口不支持直接理解该附件,请明确说明后再调用工具解析。\n"
"3) 除非用户明确要求,不要先调用工具读取附件文件。\n"
"4) 回复语言必须遵循 USER.md若未指定则与用户当前输入语言保持一致。\n"
"5) 仅基于可见内容回答;看不清或无法确认的部分请明确说明,不要猜测。"
)
return (
"请先处理已附带的附件列表:\n"
f"{attachment_block}\n\n"
f"请直接分析已附带的{media_label}并总结关键信息。\n"
f"{'图片在可用时可直接作为多模态输入理解,视频请按附件路径处理。' if has_video else ''}\n"
"若当前模型或接口不支持直接理解该附件,请明确说明后再调用工具解析。\n"
"回复语言必须遵循 USER.md若未指定则与用户当前输入语言保持一致。\n"
"仅基于可见内容回答;看不清或无法确认的部分请明确说明,不要猜测。"
)
command_has_paths = all(p in command for p in checked_attachments) if command else False
if command and not command_has_paths:
return (
f"{command}\n\n"
"[Attached files]\n"
f"{attachment_block}\n\n"
"Please process the attached file(s) listed above when answering this request.\n"
"Reply language must follow USER.md. If not specified, use the same language as the user input."
)
if not command:
return (
"Please process the uploaded file(s) listed below:\n"
f"{attachment_block}\n\n"
"Reply language must follow USER.md. If not specified, use the same language as the user input."
)
return delivery_command

View File

@ -0,0 +1,320 @@
import json
import os
import re
from datetime import datetime
from typing import Any, Callable, Dict, List
from fastapi import HTTPException
from sqlmodel import Session
from models.bot import BotInstance
ReadEdgeStateData = Callable[..., Dict[str, Any]]
WriteEdgeStateData = Callable[..., bool]
ReadBotConfig = Callable[[str], Dict[str, Any]]
WriteBotConfig = Callable[[str, Dict[str, Any]], None]
InvalidateBotCache = Callable[[str], None]
PathResolver = Callable[[str], str]
NormalizeEnvParams = Callable[[Any], Dict[str, str]]
class BotConfigStateService:
_MCP_SERVER_NAME_RE = re.compile(r"^[A-Za-z0-9._-]{1,64}$")
def __init__(
self,
*,
read_edge_state_data: ReadEdgeStateData,
write_edge_state_data: WriteEdgeStateData,
read_bot_config: ReadBotConfig,
write_bot_config: WriteBotConfig,
invalidate_bot_detail_cache: InvalidateBotCache,
env_store_path: PathResolver,
cron_store_path: PathResolver,
normalize_env_params: NormalizeEnvParams,
) -> None:
self._read_edge_state_data = read_edge_state_data
self._write_edge_state_data = write_edge_state_data
self._read_bot_config = read_bot_config
self._write_bot_config = write_bot_config
self._invalidate_bot_detail_cache = invalidate_bot_detail_cache
self._env_store_path = env_store_path
self._cron_store_path = cron_store_path
self._normalize_env_params = normalize_env_params
def _require_bot(self, *, session: Session, bot_id: str) -> BotInstance:
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return bot
def read_env_store(self, bot_id: str) -> Dict[str, str]:
data = self._read_edge_state_data(bot_id=bot_id, state_key="env", default_payload={})
if data:
return self._normalize_env_params(data)
path = self._env_store_path(bot_id)
if not os.path.isfile(path):
return {}
try:
with open(path, "r", encoding="utf-8") as file:
payload = json.load(file)
return self._normalize_env_params(payload)
except Exception:
return {}
def write_env_store(self, bot_id: str, env_params: Dict[str, str]) -> None:
normalized_env = self._normalize_env_params(env_params)
if self._write_edge_state_data(bot_id=bot_id, state_key="env", data=normalized_env):
return
path = self._env_store_path(bot_id)
os.makedirs(os.path.dirname(path), exist_ok=True)
tmp_path = f"{path}.tmp"
with open(tmp_path, "w", encoding="utf-8") as file:
json.dump(normalized_env, file, ensure_ascii=False, indent=2)
os.replace(tmp_path, path)
def get_env_params(self, bot_id: str) -> Dict[str, Any]:
return {
"bot_id": bot_id,
"env_params": self.read_env_store(bot_id),
}
def get_env_params_for_bot(self, *, session: Session, bot_id: str) -> Dict[str, Any]:
self._require_bot(session=session, bot_id=bot_id)
return self.get_env_params(bot_id)
def update_env_params(self, bot_id: str, env_params: Any) -> Dict[str, Any]:
normalized = self._normalize_env_params(env_params)
self.write_env_store(bot_id, normalized)
self._invalidate_bot_detail_cache(bot_id)
return {
"status": "updated",
"bot_id": bot_id,
"env_params": normalized,
"restart_required": True,
}
def update_env_params_for_bot(self, *, session: Session, bot_id: str, env_params: Any) -> Dict[str, Any]:
self._require_bot(session=session, bot_id=bot_id)
return self.update_env_params(bot_id, env_params)
def normalize_mcp_servers(self, raw: Any) -> Dict[str, Dict[str, Any]]:
if not isinstance(raw, dict):
return {}
rows: Dict[str, Dict[str, Any]] = {}
for server_name, server_cfg in raw.items():
name = str(server_name or "").strip()
if not name or not self._MCP_SERVER_NAME_RE.fullmatch(name):
continue
if not isinstance(server_cfg, dict):
continue
url = str(server_cfg.get("url") or "").strip()
if not url:
continue
transport_type = str(server_cfg.get("type") or "streamableHttp").strip()
if transport_type not in {"streamableHttp", "sse"}:
transport_type = "streamableHttp"
headers_raw = server_cfg.get("headers")
headers: Dict[str, str] = {}
if isinstance(headers_raw, dict):
for key, value in headers_raw.items():
header_key = str(key or "").strip()
if not header_key:
continue
headers[header_key] = str(value or "").strip()
timeout_raw = server_cfg.get("toolTimeout", 60)
try:
timeout = int(timeout_raw)
except Exception:
timeout = 60
timeout = max(1, min(timeout, 600))
rows[name] = {
"type": transport_type,
"url": url,
"headers": headers,
"toolTimeout": timeout,
}
return rows
def _merge_mcp_servers_preserving_extras(
self,
current_raw: Any,
normalized: Dict[str, Dict[str, Any]],
) -> Dict[str, Dict[str, Any]]:
current_map = current_raw if isinstance(current_raw, dict) else {}
merged: Dict[str, Dict[str, Any]] = {}
for name, normalized_cfg in normalized.items():
base = current_map.get(name)
base_cfg = dict(base) if isinstance(base, dict) else {}
next_cfg = dict(base_cfg)
next_cfg.update(normalized_cfg)
merged[name] = next_cfg
return merged
def _sanitize_mcp_servers_in_config_data(self, config_data: Dict[str, Any]) -> Dict[str, Dict[str, Any]]:
if not isinstance(config_data, dict):
return {}
tools_cfg = config_data.get("tools")
if not isinstance(tools_cfg, dict):
tools_cfg = {}
current_raw = tools_cfg.get("mcpServers")
normalized = self.normalize_mcp_servers(current_raw)
merged = self._merge_mcp_servers_preserving_extras(current_raw, normalized)
tools_cfg["mcpServers"] = merged
config_data["tools"] = tools_cfg
return merged
def get_mcp_config(self, bot_id: str) -> Dict[str, Any]:
config_data = self._read_bot_config(bot_id)
tools_cfg = config_data.get("tools") if isinstance(config_data, dict) else {}
if not isinstance(tools_cfg, dict):
tools_cfg = {}
mcp_servers = self.normalize_mcp_servers(tools_cfg.get("mcpServers"))
return {
"bot_id": bot_id,
"mcp_servers": mcp_servers,
"locked_servers": [],
"restart_required": True,
}
def get_mcp_config_for_bot(self, *, session: Session, bot_id: str) -> Dict[str, Any]:
self._require_bot(session=session, bot_id=bot_id)
return self.get_mcp_config(bot_id)
def update_mcp_config(self, bot_id: str, mcp_servers: Any) -> Dict[str, Any]:
config_data = self._read_bot_config(bot_id)
if not isinstance(config_data, dict):
config_data = {}
tools_cfg = config_data.get("tools")
if not isinstance(tools_cfg, dict):
tools_cfg = {}
normalized_mcp_servers = self.normalize_mcp_servers(mcp_servers or {})
current_mcp_servers = tools_cfg.get("mcpServers")
merged_mcp_servers = self._merge_mcp_servers_preserving_extras(current_mcp_servers, normalized_mcp_servers)
tools_cfg["mcpServers"] = merged_mcp_servers
config_data["tools"] = tools_cfg
sanitized_after_save = self._sanitize_mcp_servers_in_config_data(config_data)
self._write_bot_config(bot_id, config_data)
self._invalidate_bot_detail_cache(bot_id)
return {
"status": "updated",
"bot_id": bot_id,
"mcp_servers": self.normalize_mcp_servers(sanitized_after_save),
"locked_servers": [],
"restart_required": True,
}
def update_mcp_config_for_bot(self, *, session: Session, bot_id: str, mcp_servers: Any) -> Dict[str, Any]:
self._require_bot(session=session, bot_id=bot_id)
return self.update_mcp_config(bot_id, mcp_servers)
def read_cron_store(self, bot_id: str) -> Dict[str, Any]:
data = self._read_edge_state_data(
bot_id=bot_id,
state_key="cron",
default_payload={"version": 1, "jobs": []},
)
if isinstance(data, dict) and data:
jobs = data.get("jobs")
if not isinstance(jobs, list):
jobs = []
try:
version = int(data.get("version", 1) or 1)
except Exception:
version = 1
return {"version": max(1, version), "jobs": jobs}
path = self._cron_store_path(bot_id)
if not os.path.isfile(path):
return {"version": 1, "jobs": []}
try:
with open(path, "r", encoding="utf-8") as file:
payload = json.load(file)
if not isinstance(payload, dict):
return {"version": 1, "jobs": []}
jobs = payload.get("jobs")
if not isinstance(jobs, list):
payload["jobs"] = []
if "version" not in payload:
payload["version"] = 1
return payload
except Exception:
return {"version": 1, "jobs": []}
def write_cron_store(self, bot_id: str, store: Dict[str, Any]) -> None:
normalized_store = dict(store if isinstance(store, dict) else {})
jobs = normalized_store.get("jobs")
if not isinstance(jobs, list):
normalized_store["jobs"] = []
try:
normalized_store["version"] = max(1, int(normalized_store.get("version", 1) or 1))
except Exception:
normalized_store["version"] = 1
if self._write_edge_state_data(bot_id=bot_id, state_key="cron", data=normalized_store):
return
path = self._cron_store_path(bot_id)
os.makedirs(os.path.dirname(path), exist_ok=True)
tmp_path = f"{path}.tmp"
with open(tmp_path, "w", encoding="utf-8") as file:
json.dump(normalized_store, file, ensure_ascii=False, indent=2)
os.replace(tmp_path, path)
def list_cron_jobs(self, bot_id: str, include_disabled: bool = True) -> Dict[str, Any]:
store = self.read_cron_store(bot_id)
rows = []
for row in store.get("jobs", []):
if not isinstance(row, dict):
continue
enabled = bool(row.get("enabled", True))
if not include_disabled and not enabled:
continue
rows.append(row)
rows.sort(key=lambda value: int(((value.get("state") or {}).get("nextRunAtMs")) or 2**62))
return {"bot_id": bot_id, "version": int(store.get("version", 1) or 1), "jobs": rows}
def list_cron_jobs_for_bot(self, *, session: Session, bot_id: str, include_disabled: bool = True) -> Dict[str, Any]:
self._require_bot(session=session, bot_id=bot_id)
return self.list_cron_jobs(bot_id, include_disabled=include_disabled)
def stop_cron_job(self, bot_id: str, job_id: str) -> Dict[str, Any]:
store = self.read_cron_store(bot_id)
jobs = store.get("jobs", [])
if not isinstance(jobs, list):
jobs = []
found = None
for row in jobs:
if isinstance(row, dict) and str(row.get("id")) == job_id:
found = row
break
if not found:
raise HTTPException(status_code=404, detail="Cron job not found")
found["enabled"] = False
found["updatedAtMs"] = int(datetime.utcnow().timestamp() * 1000)
self.write_cron_store(bot_id, {"version": int(store.get("version", 1) or 1), "jobs": jobs})
return {"status": "stopped", "job_id": job_id}
def stop_cron_job_for_bot(self, *, session: Session, bot_id: str, job_id: str) -> Dict[str, Any]:
self._require_bot(session=session, bot_id=bot_id)
return self.stop_cron_job(bot_id, job_id)
def delete_cron_job(self, bot_id: str, job_id: str) -> Dict[str, Any]:
store = self.read_cron_store(bot_id)
jobs = store.get("jobs", [])
if not isinstance(jobs, list):
jobs = []
kept = [row for row in jobs if not (isinstance(row, dict) and str(row.get("id")) == job_id)]
if len(kept) == len(jobs):
raise HTTPException(status_code=404, detail="Cron job not found")
self.write_cron_store(bot_id, {"version": int(store.get("version", 1) or 1), "jobs": kept})
return {"status": "deleted", "job_id": job_id}
def delete_cron_job_for_bot(self, *, session: Session, bot_id: str, job_id: str) -> Dict[str, Any]:
self._require_bot(session=session, bot_id=bot_id)
return self.delete_cron_job(bot_id, job_id)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,611 @@
import logging
import os
import re
import shutil
from datetime import datetime
from typing import Any, Callable, Dict, Optional
from fastapi import HTTPException
from sqlmodel import Session, select
from core.settings import (
BOTS_WORKSPACE_ROOT,
DEFAULT_AGENTS_MD,
DEFAULT_IDENTITY_MD,
DEFAULT_SOUL_MD,
DEFAULT_TOOLS_MD,
DEFAULT_USER_MD,
)
from models.bot import BotInstance, BotMessage
from models.platform import BotActivityEvent, BotRequestUsage
from models.skill import BotSkillInstall
from models.topic import TopicItem, TopicTopic
from providers.target import ProviderTarget, normalize_provider_target, provider_target_to_dict
from services.runtime_service import RuntimeService
RefreshBotRuntimeStatus = Callable[[Any, BotInstance], str]
ResolveBotProviderTarget = Callable[[BotInstance], ProviderTarget]
ProviderTargetFromNode = Callable[[Optional[str]], Optional[ProviderTarget]]
DefaultProviderTarget = Callable[[], ProviderTarget]
EnsureProviderTargetSupported = Callable[[ProviderTarget], None]
RequireReadyImage = Callable[..., Any]
SyncBotWorkspaceViaProvider = Callable[..., None]
ApplyProviderTargetToBot = Callable[[BotInstance, ProviderTarget], None]
SerializeProviderTargetSummary = Callable[[ProviderTarget], Dict[str, Any]]
SerializeBot = Callable[[BotInstance], Dict[str, Any]]
NodeDisplayName = Callable[[str], str]
InvalidateBotCache = Callable[[str], None]
RecordActivityEvent = Callable[..., None]
NormalizeEnvParams = Callable[[Any], Dict[str, str]]
NormalizeSystemTimezone = Callable[[Any], str]
NormalizeResourceLimits = Callable[[Any, Any, Any], Dict[str, Any]]
WriteEnvStore = Callable[[str, Dict[str, str]], None]
ResolveBotEnvParams = Callable[[str], Dict[str, str]]
ClearProviderTargetOverride = Callable[[str], None]
NormalizeInitialChannels = Callable[[str, Any], Any]
ExpectedEdgeOfflineError = Callable[[Exception], bool]
SummarizeEdgeException = Callable[[Exception], str]
ResolveEdgeClient = Callable[[ProviderTarget], Any]
NodeMetadata = Callable[[str], Dict[str, Any]]
LogEdgeFailure = Callable[..., None]
InvalidateBotMessagesCache = Callable[[str], None]
class BotLifecycleService:
def __init__(
self,
*,
bot_id_pattern: re.Pattern[str],
runtime_service: RuntimeService,
refresh_bot_runtime_status: RefreshBotRuntimeStatus,
resolve_bot_provider_target: ResolveBotProviderTarget,
provider_target_from_node: ProviderTargetFromNode,
default_provider_target: DefaultProviderTarget,
ensure_provider_target_supported: EnsureProviderTargetSupported,
require_ready_image: RequireReadyImage,
sync_bot_workspace_via_provider: SyncBotWorkspaceViaProvider,
apply_provider_target_to_bot: ApplyProviderTargetToBot,
serialize_provider_target_summary: SerializeProviderTargetSummary,
serialize_bot: SerializeBot,
node_display_name: NodeDisplayName,
invalidate_bot_detail_cache: InvalidateBotCache,
record_activity_event: RecordActivityEvent,
normalize_env_params: NormalizeEnvParams,
normalize_system_timezone: NormalizeSystemTimezone,
normalize_resource_limits: NormalizeResourceLimits,
write_env_store: WriteEnvStore,
resolve_bot_env_params: ResolveBotEnvParams,
clear_provider_target_override: ClearProviderTargetOverride,
normalize_initial_channels: NormalizeInitialChannels,
is_expected_edge_offline_error: ExpectedEdgeOfflineError,
summarize_edge_exception: SummarizeEdgeException,
resolve_edge_client: ResolveEdgeClient,
node_metadata: NodeMetadata,
log_edge_failure: LogEdgeFailure,
invalidate_bot_messages_cache: InvalidateBotMessagesCache,
logger: logging.Logger,
) -> None:
self._bot_id_pattern = bot_id_pattern
self._runtime_service = runtime_service
self._refresh_bot_runtime_status = refresh_bot_runtime_status
self._resolve_bot_provider_target = resolve_bot_provider_target
self._provider_target_from_node = provider_target_from_node
self._default_provider_target = default_provider_target
self._ensure_provider_target_supported = ensure_provider_target_supported
self._require_ready_image = require_ready_image
self._sync_bot_workspace_via_provider = sync_bot_workspace_via_provider
self._apply_provider_target_to_bot = apply_provider_target_to_bot
self._serialize_provider_target_summary = serialize_provider_target_summary
self._serialize_bot = serialize_bot
self._node_display_name = node_display_name
self._invalidate_bot_detail_cache = invalidate_bot_detail_cache
self._record_activity_event = record_activity_event
self._normalize_env_params = normalize_env_params
self._normalize_system_timezone = normalize_system_timezone
self._normalize_resource_limits = normalize_resource_limits
self._write_env_store = write_env_store
self._resolve_bot_env_params = resolve_bot_env_params
self._clear_provider_target_override = clear_provider_target_override
self._normalize_initial_channels = normalize_initial_channels
self._is_expected_edge_offline_error = is_expected_edge_offline_error
self._summarize_edge_exception = summarize_edge_exception
self._resolve_edge_client = resolve_edge_client
self._node_metadata = node_metadata
self._log_edge_failure = log_edge_failure
self._invalidate_bot_messages_cache = invalidate_bot_messages_cache
self._logger = logger
def _require_bot(self, *, session: Session, bot_id: str) -> BotInstance:
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return bot
def create_bot(self, *, session: Session, payload: Any) -> Dict[str, Any]:
normalized_bot_id = str(getattr(payload, "id", "") or "").strip()
if not normalized_bot_id:
raise HTTPException(status_code=400, detail="Bot ID is required")
if not self._bot_id_pattern.fullmatch(normalized_bot_id):
raise HTTPException(status_code=400, detail="Bot ID can only contain letters, numbers, and underscores")
if session.get(BotInstance, normalized_bot_id):
raise HTTPException(status_code=409, detail=f"Bot ID already exists: {normalized_bot_id}")
normalized_env_params = self._normalize_env_params(getattr(payload, "env_params", None))
try:
normalized_env_params["TZ"] = self._normalize_system_timezone(getattr(payload, "system_timezone", None))
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
provider_target = normalize_provider_target(
{
"node_id": getattr(payload, "node_id", None),
"transport_kind": getattr(payload, "transport_kind", None),
"runtime_kind": getattr(payload, "runtime_kind", None),
"core_adapter": getattr(payload, "core_adapter", None),
},
fallback=self._provider_target_from_node(getattr(payload, "node_id", None)) or self._default_provider_target(),
)
self._ensure_provider_target_supported(provider_target)
normalized_image_tag = str(getattr(payload, "image_tag", "") or "").strip()
if provider_target.runtime_kind == "docker":
self._require_ready_image(session, normalized_image_tag, require_local_image=True)
bot = BotInstance(
id=normalized_bot_id,
name=getattr(payload, "name", None),
enabled=bool(getattr(payload, "enabled", True)) if getattr(payload, "enabled", None) is not None else True,
access_password="",
image_tag=normalized_image_tag,
node_id=provider_target.node_id,
transport_kind=provider_target.transport_kind,
runtime_kind=provider_target.runtime_kind,
core_adapter=provider_target.core_adapter,
workspace_dir=os.path.join(BOTS_WORKSPACE_ROOT, normalized_bot_id),
)
session.add(bot)
session.commit()
session.refresh(bot)
resource_limits = self._normalize_resource_limits(
getattr(payload, "cpu_cores", None),
getattr(payload, "memory_mb", None),
getattr(payload, "storage_gb", None),
)
workspace_synced = True
sync_error_detail = ""
try:
self._write_env_store(normalized_bot_id, normalized_env_params)
self._sync_bot_workspace_via_provider(
session,
bot,
target_override=provider_target,
channels_override=self._normalize_initial_channels(normalized_bot_id, getattr(payload, "channels", None)),
global_delivery_override={
"sendProgress": bool(getattr(payload, "send_progress", None))
if getattr(payload, "send_progress", None) is not None
else False,
"sendToolHints": bool(getattr(payload, "send_tool_hints", None))
if getattr(payload, "send_tool_hints", None) is not None
else False,
},
runtime_overrides={
"llm_provider": getattr(payload, "llm_provider", None),
"llm_model": getattr(payload, "llm_model", None),
"api_key": getattr(payload, "api_key", None),
"api_base": getattr(payload, "api_base", "") or "",
"temperature": getattr(payload, "temperature", None),
"top_p": getattr(payload, "top_p", None),
"max_tokens": getattr(payload, "max_tokens", None),
"cpu_cores": resource_limits["cpu_cores"],
"memory_mb": resource_limits["memory_mb"],
"storage_gb": resource_limits["storage_gb"],
"node_id": provider_target.node_id,
"transport_kind": provider_target.transport_kind,
"runtime_kind": provider_target.runtime_kind,
"core_adapter": provider_target.core_adapter,
"system_prompt": getattr(payload, "system_prompt", None) or getattr(payload, "soul_md", None) or DEFAULT_SOUL_MD,
"soul_md": getattr(payload, "soul_md", None) or getattr(payload, "system_prompt", None) or DEFAULT_SOUL_MD,
"agents_md": getattr(payload, "agents_md", None) or DEFAULT_AGENTS_MD,
"user_md": getattr(payload, "user_md", None) or DEFAULT_USER_MD,
"tools_md": getattr(payload, "tools_md", None) or DEFAULT_TOOLS_MD,
"identity_md": getattr(payload, "identity_md", None) or DEFAULT_IDENTITY_MD,
"send_progress": bool(getattr(payload, "send_progress", None))
if getattr(payload, "send_progress", None) is not None
else False,
"send_tool_hints": bool(getattr(payload, "send_tool_hints", None))
if getattr(payload, "send_tool_hints", None) is not None
else False,
},
)
except Exception as exc:
if self._is_expected_edge_offline_error(exc):
workspace_synced = False
sync_error_detail = self._summarize_edge_exception(exc)
self._logger.info(
"Create bot pending sync due to offline edge bot_id=%s node=%s detail=%s",
normalized_bot_id,
provider_target.node_id,
sync_error_detail,
)
else:
detail = self._summarize_edge_exception(exc)
try:
doomed = session.get(BotInstance, normalized_bot_id)
if doomed is not None:
session.delete(doomed)
session.commit()
self._clear_provider_target_override(normalized_bot_id)
except Exception:
session.rollback()
raise HTTPException(status_code=502, detail=f"Failed to initialize bot workspace: {detail}") from exc
session.refresh(bot)
self._record_activity_event(
session,
normalized_bot_id,
"bot_created",
channel="system",
detail=f"Bot {normalized_bot_id} created",
metadata={
"image_tag": normalized_image_tag,
"workspace_synced": workspace_synced,
"sync_error": sync_error_detail if not workspace_synced else "",
},
)
if not workspace_synced:
self._record_activity_event(
session,
normalized_bot_id,
"bot_warning",
channel="system",
detail="Bot created, but node is offline. Workspace sync is pending.",
metadata={"sync_error": sync_error_detail, "node_id": provider_target.node_id},
)
session.commit()
self._invalidate_bot_detail_cache(normalized_bot_id)
return self._serialize_bot(bot)
def update_bot(self, *, session: Session, bot_id: str, payload: Any) -> Dict[str, Any]:
bot = self._require_bot(session=session, bot_id=bot_id)
update_data = payload.model_dump(exclude_unset=True)
env_params = update_data.pop("env_params", None) if isinstance(update_data, dict) else None
system_timezone = update_data.pop("system_timezone", None) if isinstance(update_data, dict) else None
normalized_system_timezone: Optional[str] = None
if system_timezone is not None:
try:
normalized_system_timezone = self._normalize_system_timezone(system_timezone)
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
runtime_overrides: Dict[str, Any] = {}
update_data.pop("tools_config", None) if isinstance(update_data, dict) else None
runtime_fields = {
"llm_provider",
"llm_model",
"api_key",
"api_base",
"temperature",
"top_p",
"max_tokens",
"cpu_cores",
"memory_mb",
"storage_gb",
"soul_md",
"agents_md",
"user_md",
"tools_md",
"identity_md",
"send_progress",
"send_tool_hints",
"system_prompt",
}
execution_target_fields = {
"node_id",
"transport_kind",
"runtime_kind",
"core_adapter",
}
deploy_only_fields = {"image_tag", *execution_target_fields}
if deploy_only_fields & set(update_data.keys()):
raise HTTPException(
status_code=400,
detail=f"Use /api/bots/{bot_id}/deploy for execution target or image changes",
)
for field in runtime_fields:
if field in update_data:
runtime_overrides[field] = update_data.pop(field)
next_target: Optional[ProviderTarget] = None
for text_field in ("llm_provider", "llm_model", "api_key"):
if text_field in runtime_overrides:
text = str(runtime_overrides.get(text_field) or "").strip()
if not text:
runtime_overrides.pop(text_field, None)
else:
runtime_overrides[text_field] = text
if "api_base" in runtime_overrides:
runtime_overrides["api_base"] = str(runtime_overrides.get("api_base") or "").strip()
if "system_prompt" in runtime_overrides and "soul_md" not in runtime_overrides:
runtime_overrides["soul_md"] = runtime_overrides["system_prompt"]
if "soul_md" in runtime_overrides and "system_prompt" not in runtime_overrides:
runtime_overrides["system_prompt"] = runtime_overrides["soul_md"]
if {"cpu_cores", "memory_mb", "storage_gb"} & set(runtime_overrides.keys()):
normalized_resources = self._normalize_resource_limits(
runtime_overrides.get("cpu_cores"),
runtime_overrides.get("memory_mb"),
runtime_overrides.get("storage_gb"),
)
runtime_overrides.update(normalized_resources)
db_fields = {"name", "enabled"}
for key, value in update_data.items():
if key in db_fields:
setattr(bot, key, value)
previous_env_params: Optional[Dict[str, str]] = None
next_env_params: Optional[Dict[str, str]] = None
if env_params is not None or normalized_system_timezone is not None:
previous_env_params = self._resolve_bot_env_params(bot_id)
next_env_params = dict(previous_env_params)
if env_params is not None:
next_env_params = self._normalize_env_params(env_params)
if normalized_system_timezone is not None:
next_env_params["TZ"] = normalized_system_timezone
global_delivery_override: Optional[Dict[str, Any]] = None
if "send_progress" in runtime_overrides or "send_tool_hints" in runtime_overrides:
global_delivery_override = {}
if "send_progress" in runtime_overrides:
global_delivery_override["sendProgress"] = bool(runtime_overrides.get("send_progress"))
if "send_tool_hints" in runtime_overrides:
global_delivery_override["sendToolHints"] = bool(runtime_overrides.get("send_tool_hints"))
self._sync_bot_workspace_via_provider(
session,
bot,
target_override=next_target,
runtime_overrides=runtime_overrides if runtime_overrides else None,
global_delivery_override=global_delivery_override,
)
try:
if next_env_params is not None:
self._write_env_store(bot_id, next_env_params)
if next_target is not None:
self._apply_provider_target_to_bot(bot, next_target)
session.add(bot)
session.commit()
except Exception:
session.rollback()
if previous_env_params is not None:
self._write_env_store(bot_id, previous_env_params)
raise
session.refresh(bot)
self._invalidate_bot_detail_cache(bot_id)
return self._serialize_bot(bot)
async def start_bot(self, *, app_state: Any, session: Session, bot_id: str) -> Dict[str, Any]:
bot = self._require_bot(session=session, bot_id=bot_id)
return await self._runtime_service.start_bot(app_state=app_state, session=session, bot=bot)
def stop_bot(self, *, app_state: Any, session: Session, bot_id: str) -> Dict[str, Any]:
bot = self._require_bot(session=session, bot_id=bot_id)
return self._runtime_service.stop_bot(app_state=app_state, session=session, bot=bot)
def enable_bot(self, *, session: Session, bot_id: str) -> Dict[str, Any]:
bot = self._require_bot(session=session, bot_id=bot_id)
bot.enabled = True
session.add(bot)
self._record_activity_event(session, bot_id, "bot_enabled", channel="system", detail=f"Bot {bot_id} enabled")
session.commit()
self._invalidate_bot_detail_cache(bot_id)
return {"status": "enabled", "enabled": True}
def disable_bot(self, *, app_state: Any, session: Session, bot_id: str) -> Dict[str, Any]:
bot = self._require_bot(session=session, bot_id=bot_id)
self._set_inactive(app_state=app_state, session=session, bot=bot, activity_type="bot_disabled", detail="disabled")
return {"status": "disabled", "enabled": False}
def deactivate_bot(self, *, app_state: Any, session: Session, bot_id: str) -> Dict[str, Any]:
bot = self._require_bot(session=session, bot_id=bot_id)
self._set_inactive(
app_state=app_state,
session=session,
bot=bot,
activity_type="bot_deactivated",
detail="deactivated",
)
return {"status": "deactivated"}
def delete_bot(
self,
*,
app_state: Any,
session: Session,
bot_id: str,
delete_workspace: bool = True,
) -> Dict[str, Any]:
bot = self._require_bot(session=session, bot_id=bot_id)
target = self._resolve_bot_provider_target(bot)
try:
self._runtime_service.stop_bot(app_state=app_state, session=session, bot=bot)
except Exception:
pass
workspace_deleted = not bool(delete_workspace)
if delete_workspace:
if target.transport_kind == "edge":
try:
workspace_root = str(self._node_metadata(target.node_id).get("workspace_root") or "").strip() or None
purge_result = self._resolve_edge_client(target).purge_workspace(
bot_id=bot_id,
workspace_root=workspace_root,
)
workspace_deleted = str(purge_result.get("status") or "").strip().lower() in {"deleted", "not_found"}
except Exception as exc:
self._log_edge_failure(
self._logger,
key=f"bot-delete-workspace:{bot_id}",
exc=exc,
message=f"Failed to purge edge workspace for bot_id={bot_id}",
)
workspace_deleted = False
workspace_root = os.path.join(BOTS_WORKSPACE_ROOT, bot_id)
if os.path.isdir(workspace_root):
shutil.rmtree(workspace_root, ignore_errors=True)
workspace_deleted = True
messages = session.exec(select(BotMessage).where(BotMessage.bot_id == bot_id)).all()
for row in messages:
session.delete(row)
topic_items = session.exec(select(TopicItem).where(TopicItem.bot_id == bot_id)).all()
for row in topic_items:
session.delete(row)
topics = session.exec(select(TopicTopic).where(TopicTopic.bot_id == bot_id)).all()
for row in topics:
session.delete(row)
usage_rows = session.exec(select(BotRequestUsage).where(BotRequestUsage.bot_id == bot_id)).all()
for row in usage_rows:
session.delete(row)
activity_rows = session.exec(select(BotActivityEvent).where(BotActivityEvent.bot_id == bot_id)).all()
for row in activity_rows:
session.delete(row)
skill_install_rows = session.exec(select(BotSkillInstall).where(BotSkillInstall.bot_id == bot_id)).all()
for row in skill_install_rows:
session.delete(row)
session.delete(bot)
session.commit()
self._clear_provider_target_override(bot_id)
self._invalidate_bot_detail_cache(bot_id)
self._invalidate_bot_messages_cache(bot_id)
return {"status": "deleted", "workspace_deleted": workspace_deleted}
async def deploy_bot(
self,
*,
app_state: Any,
session: Session,
bot_id: str,
node_id: str,
runtime_kind: Optional[str] = None,
image_tag: Optional[str] = None,
auto_start: bool = False,
) -> Dict[str, Any]:
bot = self._require_bot(session=session, bot_id=bot_id)
actual_status = self._refresh_bot_runtime_status(app_state, bot)
session.add(bot)
session.commit()
if actual_status == "RUNNING":
raise HTTPException(status_code=409, detail="Stop the bot before deploy or migrate")
current_target = self._resolve_bot_provider_target(bot)
next_target_base = self._provider_target_from_node(node_id)
if next_target_base is None:
raise HTTPException(status_code=400, detail=f"Managed node not found: {node_id}")
next_target = normalize_provider_target(
{
"node_id": node_id,
"runtime_kind": runtime_kind,
},
fallback=next_target_base,
)
self._ensure_provider_target_supported(next_target)
existing_image_tag = str(bot.image_tag or "").strip()
requested_image_tag = str(image_tag or "").strip()
if next_target.runtime_kind == "docker":
requested_image_tag = requested_image_tag or existing_image_tag
image_changed = requested_image_tag != str(bot.image_tag or "").strip()
target_changed = next_target.key != current_target.key
if not image_changed and not target_changed:
raise HTTPException(status_code=400, detail="No deploy changes detected")
if next_target.runtime_kind == "docker":
self._require_ready_image(
session,
requested_image_tag,
require_local_image=True,
)
self._sync_bot_workspace_via_provider(
session,
bot,
target_override=next_target,
runtime_overrides=provider_target_to_dict(next_target),
)
previous_image_tag = str(bot.image_tag or "").strip()
bot.image_tag = requested_image_tag
self._apply_provider_target_to_bot(bot, next_target)
bot.updated_at = datetime.utcnow()
session.add(bot)
self._record_activity_event(
session,
bot_id,
"bot_deployed",
channel="system",
detail=(
f"Bot {bot_id} deployed to {self._node_display_name(next_target.node_id)}"
if target_changed
else f"Bot {bot_id} redeployed with image {requested_image_tag}"
),
metadata={
"previous_target": self._serialize_provider_target_summary(current_target),
"next_target": self._serialize_provider_target_summary(next_target),
"previous_image_tag": previous_image_tag,
"image_tag": requested_image_tag,
"auto_start": bool(auto_start),
},
)
session.commit()
session.refresh(bot)
started = False
if bool(auto_start):
await self._runtime_service.start_bot(app_state=app_state, session=session, bot=bot)
session.refresh(bot)
started = True
self._invalidate_bot_detail_cache(bot_id)
return {
"status": "deployed",
"bot": self._serialize_bot(bot),
"started": started,
"image_tag": requested_image_tag,
"previous_image_tag": previous_image_tag,
"previous_target": self._serialize_provider_target_summary(current_target),
"next_target": self._serialize_provider_target_summary(next_target),
}
def _set_inactive(
self,
*,
app_state: Any,
session: Session,
bot: BotInstance,
activity_type: str,
detail: str,
) -> None:
bot_id = str(bot.id or "").strip()
try:
self._runtime_service.stop_bot(app_state=app_state, session=session, bot=bot)
except Exception:
pass
bot.enabled = False
bot.docker_status = "STOPPED"
if str(bot.current_state or "").upper() not in {"ERROR"}:
bot.current_state = "IDLE"
session.add(bot)
self._record_activity_event(session, bot_id, activity_type, channel="system", detail=f"Bot {bot_id} {detail}")
session.commit()
self._invalidate_bot_detail_cache(bot_id)

View File

@ -0,0 +1,246 @@
from datetime import datetime
from typing import Any, Callable, Dict, Optional
from fastapi import HTTPException
from sqlmodel import Session, select
from models.bot import BotInstance, BotMessage
CacheKeyMessages = Callable[[str, int], str]
CacheKeyMessagesPage = Callable[[str, int, Optional[int]], str]
SerializeMessageRow = Callable[[str, BotMessage], Dict[str, Any]]
ResolveLocalDayRange = Callable[[str, Optional[int]], tuple[datetime, datetime]]
InvalidateMessagesCache = Callable[[str], None]
GetChatPullPageSize = Callable[[], int]
class BotMessageService:
def __init__(
self,
*,
cache: Any,
cache_key_bot_messages: CacheKeyMessages,
cache_key_bot_messages_page: CacheKeyMessagesPage,
serialize_bot_message_row: SerializeMessageRow,
resolve_local_day_range: ResolveLocalDayRange,
invalidate_bot_messages_cache: InvalidateMessagesCache,
get_chat_pull_page_size: GetChatPullPageSize,
) -> None:
self._cache = cache
self._cache_key_bot_messages = cache_key_bot_messages
self._cache_key_bot_messages_page = cache_key_bot_messages_page
self._serialize_bot_message_row = serialize_bot_message_row
self._resolve_local_day_range = resolve_local_day_range
self._invalidate_bot_messages_cache = invalidate_bot_messages_cache
self._get_chat_pull_page_size = get_chat_pull_page_size
def _require_bot(self, *, session: Session, bot_id: str) -> BotInstance:
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return bot
def list_messages(self, *, session: Session, bot_id: str, limit: int = 200) -> list[Dict[str, Any]]:
self._require_bot(session=session, bot_id=bot_id)
safe_limit = max(1, min(int(limit), 500))
cached = self._cache.get_json(self._cache_key_bot_messages(bot_id, safe_limit))
if isinstance(cached, list):
return cached
rows = session.exec(
select(BotMessage)
.where(BotMessage.bot_id == bot_id)
.order_by(BotMessage.created_at.desc(), BotMessage.id.desc())
.limit(safe_limit)
).all()
ordered = list(reversed(rows))
payload = [self._serialize_bot_message_row(bot_id, row) for row in ordered]
self._cache.set_json(self._cache_key_bot_messages(bot_id, safe_limit), payload, ttl=30)
return payload
def list_messages_page(
self,
*,
session: Session,
bot_id: str,
limit: Optional[int] = None,
before_id: Optional[int] = None,
) -> Dict[str, Any]:
self._require_bot(session=session, bot_id=bot_id)
configured_limit = self._get_chat_pull_page_size()
safe_limit = max(1, min(int(limit if limit is not None else configured_limit), 500))
safe_before_id = int(before_id) if isinstance(before_id, int) and before_id > 0 else None
cache_key = self._cache_key_bot_messages_page(bot_id, safe_limit, safe_before_id)
cached = self._cache.get_json(cache_key)
if isinstance(cached, dict) and isinstance(cached.get("items"), list):
return cached
stmt = (
select(BotMessage)
.where(BotMessage.bot_id == bot_id)
.order_by(BotMessage.created_at.desc(), BotMessage.id.desc())
.limit(safe_limit + 1)
)
if safe_before_id is not None:
stmt = stmt.where(BotMessage.id < safe_before_id)
rows = session.exec(stmt).all()
has_more = len(rows) > safe_limit
if has_more:
rows = rows[:safe_limit]
ordered = list(reversed(rows))
items = [self._serialize_bot_message_row(bot_id, row) for row in ordered]
next_before_id = rows[-1].id if rows else None
payload = {
"items": items,
"has_more": bool(has_more),
"next_before_id": next_before_id,
"limit": safe_limit,
}
self._cache.set_json(cache_key, payload, ttl=30)
return payload
def list_messages_by_date(
self,
*,
session: Session,
bot_id: str,
date: str,
tz_offset_minutes: Optional[int] = None,
limit: Optional[int] = None,
) -> Dict[str, Any]:
self._require_bot(session=session, bot_id=bot_id)
utc_start, utc_end = self._resolve_local_day_range(date, tz_offset_minutes)
configured_limit = max(60, self._get_chat_pull_page_size())
safe_limit = max(12, min(int(limit if limit is not None else configured_limit), 240))
before_limit = max(3, min(18, safe_limit // 4))
after_limit = max(0, safe_limit - before_limit - 1)
exact_anchor = session.exec(
select(BotMessage)
.where(
BotMessage.bot_id == bot_id,
BotMessage.created_at >= utc_start,
BotMessage.created_at < utc_end,
)
.order_by(BotMessage.created_at.asc(), BotMessage.id.asc())
.limit(1)
).first()
anchor = exact_anchor
matched_exact_date = exact_anchor is not None
if anchor is None:
next_row = session.exec(
select(BotMessage)
.where(BotMessage.bot_id == bot_id, BotMessage.created_at >= utc_end)
.order_by(BotMessage.created_at.asc(), BotMessage.id.asc())
.limit(1)
).first()
prev_row = session.exec(
select(BotMessage)
.where(BotMessage.bot_id == bot_id, BotMessage.created_at < utc_start)
.order_by(BotMessage.created_at.desc(), BotMessage.id.desc())
.limit(1)
).first()
if next_row and prev_row:
gap_after = next_row.created_at - utc_end
gap_before = utc_start - prev_row.created_at
anchor = next_row if gap_after <= gap_before else prev_row
else:
anchor = next_row or prev_row
if anchor is None or anchor.id is None:
return {
"items": [],
"anchor_id": None,
"resolved_ts": None,
"matched_exact_date": False,
"has_more_before": False,
"has_more_after": False,
}
before_rows = session.exec(
select(BotMessage)
.where(BotMessage.bot_id == bot_id, BotMessage.id < anchor.id)
.order_by(BotMessage.created_at.desc(), BotMessage.id.desc())
.limit(before_limit)
).all()
after_rows = session.exec(
select(BotMessage)
.where(BotMessage.bot_id == bot_id, BotMessage.id > anchor.id)
.order_by(BotMessage.created_at.asc(), BotMessage.id.asc())
.limit(after_limit)
).all()
ordered = list(reversed(before_rows)) + [anchor] + after_rows
first_row = ordered[0] if ordered else None
last_row = ordered[-1] if ordered else None
has_more_before = False
if first_row is not None and first_row.id is not None:
has_more_before = (
session.exec(
select(BotMessage.id)
.where(BotMessage.bot_id == bot_id, BotMessage.id < first_row.id)
.order_by(BotMessage.id.desc())
.limit(1)
).first()
is not None
)
has_more_after = False
if last_row is not None and last_row.id is not None:
has_more_after = (
session.exec(
select(BotMessage.id)
.where(BotMessage.bot_id == bot_id, BotMessage.id > last_row.id)
.order_by(BotMessage.id.asc())
.limit(1)
).first()
is not None
)
return {
"items": [self._serialize_bot_message_row(bot_id, row) for row in ordered],
"anchor_id": anchor.id,
"resolved_ts": int(anchor.created_at.timestamp() * 1000),
"matched_exact_date": matched_exact_date,
"has_more_before": has_more_before,
"has_more_after": has_more_after,
}
def update_feedback(
self,
*,
session: Session,
bot_id: str,
message_id: int,
feedback: Optional[str],
) -> Dict[str, Any]:
self._require_bot(session=session, bot_id=bot_id)
row = session.get(BotMessage, message_id)
if not row or row.bot_id != bot_id:
raise HTTPException(status_code=404, detail="Message not found")
if row.role != "assistant":
raise HTTPException(status_code=400, detail="Only assistant messages support feedback")
raw = str(feedback or "").strip().lower()
if raw in {"", "none", "null"}:
row.feedback = None
row.feedback_at = None
elif raw in {"up", "down"}:
row.feedback = raw
row.feedback_at = datetime.utcnow()
else:
raise HTTPException(status_code=400, detail="feedback must be 'up' or 'down'")
session.add(row)
session.commit()
self._invalidate_bot_messages_cache(bot_id)
return {
"status": "updated",
"bot_id": bot_id,
"message_id": row.id,
"feedback": row.feedback,
"feedback_at": row.feedback_at.isoformat() if row.feedback_at else None,
}

View File

@ -0,0 +1,180 @@
from datetime import datetime
from typing import Any, Callable, Dict, Optional
from fastapi import HTTPException
from sqlmodel import Session
from clients.edge.errors import log_edge_failure
from models.bot import BotInstance
CacheKeyBotsList = Callable[[Optional[int]], str]
CacheKeyBotDetail = Callable[[str], str]
RefreshBotRuntimeStatus = Callable[[Any, BotInstance], str]
SerializeBot = Callable[[BotInstance], Dict[str, Any]]
SerializeBotListItem = Callable[[BotInstance], Dict[str, Any]]
ReadBotResources = Callable[[str], Dict[str, Any]]
ResolveBotProviderTarget = Callable[[BotInstance], Any]
WorkspaceRoot = Callable[[str], str]
CalcDirSizeBytes = Callable[[str], int]
class BotQueryService:
def __init__(
self,
*,
cache: Any,
cache_key_bots_list: CacheKeyBotsList,
cache_key_bot_detail: CacheKeyBotDetail,
refresh_bot_runtime_status: RefreshBotRuntimeStatus,
serialize_bot: SerializeBot,
serialize_bot_list_item: SerializeBotListItem,
read_bot_resources: ReadBotResources,
resolve_bot_provider_target: ResolveBotProviderTarget,
get_runtime_provider: Callable[[Any, BotInstance], Any],
workspace_root: WorkspaceRoot,
calc_dir_size_bytes: CalcDirSizeBytes,
logger: Any,
) -> None:
self._cache = cache
self._cache_key_bots_list = cache_key_bots_list
self._cache_key_bot_detail = cache_key_bot_detail
self._refresh_bot_runtime_status = refresh_bot_runtime_status
self._serialize_bot = serialize_bot
self._serialize_bot_list_item = serialize_bot_list_item
self._read_bot_resources = read_bot_resources
self._resolve_bot_provider_target = resolve_bot_provider_target
self._get_runtime_provider = get_runtime_provider
self._workspace_root = workspace_root
self._calc_dir_size_bytes = calc_dir_size_bytes
self._logger = logger
def _require_bot(self, *, session: Session, bot_id: str) -> BotInstance:
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return bot
def list_bots(self, *, app_state: Any, session: Session, current_user_id: int) -> list[Dict[str, Any]]:
from models.sys_auth import SysUser
from services.sys_auth_service import list_accessible_bots_for_user
cached = self._cache.get_json(self._cache_key_bots_list(current_user_id))
if isinstance(cached, list):
return cached
current_user = session.get(SysUser, current_user_id) if current_user_id > 0 else None
if current_user is None:
raise HTTPException(status_code=401, detail="Authentication required")
bots = list_accessible_bots_for_user(session, current_user)
dirty = False
for bot in bots:
previous_status = str(bot.docker_status or "").upper()
previous_state = str(bot.current_state or "")
actual_status = self._refresh_bot_runtime_status(app_state, bot)
if previous_status != actual_status or previous_state != str(bot.current_state or ""):
session.add(bot)
dirty = True
if dirty:
session.commit()
for bot in bots:
session.refresh(bot)
rows = [self._serialize_bot_list_item(bot) for bot in bots]
self._cache.set_json(self._cache_key_bots_list(current_user_id), rows, ttl=30)
return rows
def get_bot_detail(self, *, app_state: Any, session: Session, bot_id: str) -> Dict[str, Any]:
cached = self._cache.get_json(self._cache_key_bot_detail(bot_id))
if isinstance(cached, dict):
return cached
bot = self._require_bot(session=session, bot_id=bot_id)
previous_status = str(bot.docker_status or "").upper()
previous_state = str(bot.current_state or "")
actual_status = self._refresh_bot_runtime_status(app_state, bot)
if previous_status != actual_status or previous_state != str(bot.current_state or ""):
session.add(bot)
session.commit()
session.refresh(bot)
row = self._serialize_bot(bot)
self._cache.set_json(self._cache_key_bot_detail(bot_id), row, ttl=30)
return row
def get_bot_resources(self, *, app_state: Any, session: Session, bot_id: str) -> Dict[str, Any]:
bot = self._require_bot(session=session, bot_id=bot_id)
configured = self._read_bot_resources(bot_id)
try:
runtime = self._get_runtime_provider(app_state, bot).get_resource_snapshot(bot_id=bot_id)
except Exception as exc:
log_edge_failure(
self._logger,
key=f"bot-resources:{bot_id}",
exc=exc,
message=f"Failed to refresh bot resources for bot_id={bot_id}",
)
runtime = {"usage": {}, "limits": {}, "docker_status": str(bot.docker_status or "STOPPED").upper()}
runtime_status = str(runtime.get("docker_status") or "").upper()
previous_status = str(bot.docker_status or "").upper()
previous_state = str(bot.current_state or "")
if runtime_status:
bot.docker_status = runtime_status
if runtime_status != "RUNNING" and str(bot.current_state or "").upper() not in {"ERROR"}:
bot.current_state = "IDLE"
if previous_status != str(bot.docker_status or "").upper() or previous_state != str(bot.current_state or ""):
session.add(bot)
session.commit()
session.refresh(bot)
target = self._resolve_bot_provider_target(bot)
usage_payload = dict(runtime.get("usage") or {})
workspace_bytes = int(usage_payload.get("container_rw_bytes") or usage_payload.get("workspace_used_bytes") or 0)
workspace_root = ""
if workspace_bytes <= 0:
workspace_root = self._workspace_root(bot_id)
workspace_bytes = self._calc_dir_size_bytes(workspace_root)
elif target.transport_kind != "edge":
workspace_root = self._workspace_root(bot_id)
configured_storage_bytes = int(configured.get("storage_gb", 0) or 0) * 1024 * 1024 * 1024
workspace_percent = 0.0
if configured_storage_bytes > 0:
workspace_percent = (workspace_bytes / configured_storage_bytes) * 100.0
limits = runtime.get("limits") or {}
cpu_limited = (limits.get("cpu_cores") or 0) > 0
memory_limited = (limits.get("memory_bytes") or 0) > 0
storage_limited = bool(limits.get("storage_bytes")) or bool(limits.get("storage_opt_raw"))
return {
"bot_id": bot_id,
"docker_status": runtime.get("docker_status") or bot.docker_status,
"configured": configured,
"runtime": runtime,
"workspace": {
"path": workspace_root or None,
"usage_bytes": workspace_bytes,
"configured_limit_bytes": configured_storage_bytes if configured_storage_bytes > 0 else None,
"usage_percent": max(0.0, workspace_percent),
},
"enforcement": {
"cpu_limited": cpu_limited,
"memory_limited": memory_limited,
"storage_limited": storage_limited,
},
"note": (
"Resource value 0 means unlimited. CPU/Memory limits come from Docker HostConfig and are enforced by cgroup. "
"Storage limit depends on Docker storage driver support."
),
"collected_at": datetime.utcnow().isoformat() + "Z",
}
def get_tools_config(self, *, session: Session, bot_id: str) -> Dict[str, Any]:
self._require_bot(session=session, bot_id=bot_id)
return {
"bot_id": bot_id,
"tools_config": {},
"managed_by_dashboard": False,
"hint": "Tools config is disabled in dashboard. Configure tool-related env vars manually.",
}
def update_tools_config(self, *, session: Session, bot_id: str, payload: Any) -> Dict[str, Any]:
self._require_bot(session=session, bot_id=bot_id)
raise HTTPException(
status_code=400,
detail="Tools config is no longer managed by dashboard. Please set required env vars manually.",
)

View File

@ -0,0 +1,288 @@
import asyncio
import os
import time
from typing import Any, Callable, Dict
from sqlmodel import Session
from clients.edge.errors import log_edge_failure
from models.bot import BotInstance
from providers.target import provider_target_to_dict
class BotRuntimeSnapshotService:
_AGENT_LOOP_READY_MARKER = "Agent loop started"
def __init__(
self,
*,
engine: Any,
logger: Any,
docker_manager: Any,
default_soul_md: str,
default_agents_md: str,
default_user_md: str,
default_tools_md: str,
default_identity_md: str,
workspace_root: Callable[[str], str],
resolve_edge_state_context: Callable[[str], Any],
read_bot_config: Callable[[str], Dict[str, Any]],
resolve_bot_env_params: Callable[[str], Dict[str, str]],
resolve_bot_provider_target_for_instance: Callable[[BotInstance], Any],
read_global_delivery_flags: Callable[[Any], tuple[bool, bool]],
safe_float: Callable[[Any, float], float],
safe_int: Callable[[Any, int], int],
get_default_system_timezone: Callable[[], str],
read_bot_resources: Callable[[str, Any], Dict[str, Any]],
node_display_name: Callable[[str], str],
get_runtime_provider: Callable[[Any, BotInstance], Any],
invalidate_bot_detail_cache: Callable[[str], None],
record_activity_event: Callable[..., None],
) -> None:
self._engine = engine
self._logger = logger
self._docker_manager = docker_manager
self._default_soul_md = default_soul_md
self._default_agents_md = default_agents_md
self._default_user_md = default_user_md
self._default_tools_md = default_tools_md
self._default_identity_md = default_identity_md
self._workspace_root = workspace_root
self._resolve_edge_state_context = resolve_edge_state_context
self._read_bot_config = read_bot_config
self._resolve_bot_env_params = resolve_bot_env_params
self._resolve_bot_provider_target_for_instance = resolve_bot_provider_target_for_instance
self._read_global_delivery_flags = read_global_delivery_flags
self._safe_float = safe_float
self._safe_int = safe_int
self._get_default_system_timezone = get_default_system_timezone
self._read_bot_resources = read_bot_resources
self._node_display_name = node_display_name
self._get_runtime_provider = get_runtime_provider
self._invalidate_bot_detail_cache = invalidate_bot_detail_cache
self._record_activity_event = record_activity_event
def read_workspace_md(self, bot_id: str, filename: str, default_value: str) -> str:
edge_context = self._resolve_edge_state_context(bot_id)
if edge_context is not None:
client, workspace_root, node_id = edge_context
try:
payload = client.read_file(
bot_id=bot_id,
path=filename,
max_bytes=1_000_000,
workspace_root=workspace_root,
)
if bool(payload.get("is_markdown")):
content = payload.get("content")
if isinstance(content, str):
return content.strip()
except Exception as exc:
log_edge_failure(
self._logger,
key=f"workspace-md-read:{node_id}:{bot_id}:{filename}",
exc=exc,
message=f"Failed to read edge workspace markdown for bot_id={bot_id}, file={filename}",
)
return default_value
path = os.path.join(self._workspace_root(bot_id), filename)
if not os.path.isfile(path):
return default_value
try:
with open(path, "r", encoding="utf-8") as file:
return file.read().strip()
except Exception:
return default_value
def read_bot_runtime_snapshot(self, bot: BotInstance) -> Dict[str, Any]:
config_data = self._read_bot_config(bot.id)
env_params = self._resolve_bot_env_params(bot.id)
target = self._resolve_bot_provider_target_for_instance(bot)
provider_name = ""
provider_cfg: Dict[str, Any] = {}
providers_cfg = config_data.get("providers")
if isinstance(providers_cfg, dict):
for p_name, p_cfg in providers_cfg.items():
provider_name = str(p_name or "").strip()
if isinstance(p_cfg, dict):
provider_cfg = p_cfg
break
agents_defaults: Dict[str, Any] = {}
agents_cfg = config_data.get("agents")
if isinstance(agents_cfg, dict):
defaults = agents_cfg.get("defaults")
if isinstance(defaults, dict):
agents_defaults = defaults
channels_cfg = config_data.get("channels")
send_progress, send_tool_hints = self._read_global_delivery_flags(channels_cfg)
llm_provider = provider_name or "dashscope"
llm_model = str(agents_defaults.get("model") or "")
api_key = str(provider_cfg.get("apiKey") or "").strip()
api_base = str(provider_cfg.get("apiBase") or "").strip()
api_base_lower = api_base.lower()
if llm_provider == "openai" and ("spark-api-open.xf-yun.com" in api_base_lower or "xf-yun.com" in api_base_lower):
llm_provider = "xunfei"
soul_md = self.read_workspace_md(bot.id, "SOUL.md", self._default_soul_md)
resources = self._read_bot_resources(bot.id, config_data=config_data)
return {
**provider_target_to_dict(target),
"llm_provider": llm_provider,
"llm_model": llm_model,
"api_key": api_key,
"api_base": api_base,
"temperature": self._safe_float(agents_defaults.get("temperature"), 0.2),
"top_p": self._safe_float(agents_defaults.get("topP"), 1.0),
"max_tokens": self._safe_int(agents_defaults.get("maxTokens"), 8192),
"cpu_cores": resources["cpu_cores"],
"memory_mb": resources["memory_mb"],
"storage_gb": resources["storage_gb"],
"system_timezone": env_params.get("TZ") or self._get_default_system_timezone(),
"send_progress": send_progress,
"send_tool_hints": send_tool_hints,
"soul_md": soul_md,
"agents_md": self.read_workspace_md(bot.id, "AGENTS.md", self._default_agents_md),
"user_md": self.read_workspace_md(bot.id, "USER.md", self._default_user_md),
"tools_md": self.read_workspace_md(bot.id, "TOOLS.md", self._default_tools_md),
"identity_md": self.read_workspace_md(bot.id, "IDENTITY.md", self._default_identity_md),
"system_prompt": soul_md,
}
def serialize_bot(self, bot: BotInstance) -> Dict[str, Any]:
runtime = self.read_bot_runtime_snapshot(bot)
target = self._resolve_bot_provider_target_for_instance(bot)
return {
"id": bot.id,
"name": bot.name,
"enabled": bool(getattr(bot, "enabled", True)),
"avatar_model": "base",
"avatar_skin": "blue_suit",
"image_tag": bot.image_tag,
"llm_provider": runtime.get("llm_provider") or "",
"llm_model": runtime.get("llm_model") or "",
"system_prompt": runtime.get("system_prompt") or "",
"api_base": runtime.get("api_base") or "",
"temperature": self._safe_float(runtime.get("temperature"), 0.2),
"top_p": self._safe_float(runtime.get("top_p"), 1.0),
"max_tokens": self._safe_int(runtime.get("max_tokens"), 8192),
"cpu_cores": self._safe_float(runtime.get("cpu_cores"), 1.0),
"memory_mb": self._safe_int(runtime.get("memory_mb"), 1024),
"storage_gb": self._safe_int(runtime.get("storage_gb"), 10),
"system_timezone": str(runtime.get("system_timezone") or self._get_default_system_timezone()),
"send_progress": bool(runtime.get("send_progress")),
"send_tool_hints": bool(runtime.get("send_tool_hints")),
"node_id": target.node_id,
"node_display_name": self._node_display_name(target.node_id),
"transport_kind": target.transport_kind,
"runtime_kind": target.runtime_kind,
"core_adapter": target.core_adapter,
"soul_md": runtime.get("soul_md") or "",
"agents_md": runtime.get("agents_md") or "",
"user_md": runtime.get("user_md") or "",
"tools_md": runtime.get("tools_md") or "",
"identity_md": runtime.get("identity_md") or "",
"workspace_dir": bot.workspace_dir,
"docker_status": bot.docker_status,
"current_state": bot.current_state,
"last_action": bot.last_action,
"created_at": bot.created_at,
"updated_at": bot.updated_at,
}
def serialize_bot_list_item(self, bot: BotInstance) -> Dict[str, Any]:
runtime = self.read_bot_runtime_snapshot(bot)
target = self._resolve_bot_provider_target_for_instance(bot)
return {
"id": bot.id,
"name": bot.name,
"enabled": bool(getattr(bot, "enabled", True)),
"image_tag": bot.image_tag,
"llm_provider": runtime.get("llm_provider") or "",
"llm_model": runtime.get("llm_model") or "",
"node_id": target.node_id,
"node_display_name": self._node_display_name(target.node_id),
"transport_kind": target.transport_kind,
"runtime_kind": target.runtime_kind,
"core_adapter": target.core_adapter,
"docker_status": bot.docker_status,
"current_state": bot.current_state,
"last_action": bot.last_action,
"updated_at": bot.updated_at,
}
def refresh_bot_runtime_status(self, app_state: Any, bot: BotInstance) -> str:
current_status = str(bot.docker_status or "STOPPED").upper()
try:
status = str(self._get_runtime_provider(app_state, bot).get_runtime_status(bot_id=str(bot.id or "")) or "STOPPED").upper()
except Exception as exc:
log_edge_failure(
self._logger,
key=f"bot-runtime-status:{bot.id}",
exc=exc,
message=f"Failed to refresh runtime status for bot_id={bot.id}",
)
return current_status
bot.docker_status = status
if status != "RUNNING" and str(bot.current_state or "").upper() not in {"ERROR"}:
bot.current_state = "IDLE"
return status
async def wait_for_agent_loop_ready(
self,
bot_id: str,
timeout_seconds: float = 12.0,
poll_interval_seconds: float = 0.5,
) -> bool:
deadline = time.monotonic() + max(1.0, timeout_seconds)
marker = self._AGENT_LOOP_READY_MARKER.lower()
while time.monotonic() < deadline:
logs = self._docker_manager.get_recent_logs(bot_id, tail=200)
if any(marker in str(line or "").lower() for line in logs):
return True
await asyncio.sleep(max(0.1, poll_interval_seconds))
return False
async def record_agent_loop_ready_warning(
self,
bot_id: str,
timeout_seconds: float = 12.0,
poll_interval_seconds: float = 0.5,
) -> None:
try:
agent_loop_ready = await self.wait_for_agent_loop_ready(
bot_id,
timeout_seconds=timeout_seconds,
poll_interval_seconds=poll_interval_seconds,
)
if agent_loop_ready:
return
if self._docker_manager.get_bot_status(bot_id) != "RUNNING":
return
detail = (
"Bot container started, but ready marker was not found in logs within "
f"{int(timeout_seconds)}s. Check bot logs or MCP config if the bot stays unavailable."
)
self._logger.warning("bot_id=%s agent loop ready marker not found within %ss", bot_id, timeout_seconds)
with Session(self._engine) as background_session:
if not background_session.get(BotInstance, bot_id):
return
self._record_activity_event(
background_session,
bot_id,
"bot_warning",
channel="system",
detail=detail,
metadata={
"kind": "agent_loop_ready_timeout",
"marker": self._AGENT_LOOP_READY_MARKER,
"timeout_seconds": timeout_seconds,
},
)
background_session.commit()
self._invalidate_bot_detail_cache(bot_id)
except Exception:
self._logger.exception("Failed to record agent loop readiness warning for bot_id=%s", bot_id)

View File

@ -0,0 +1,129 @@
from typing import Any, Callable, Optional
from urllib.parse import unquote
from fastapi import Request
from fastapi.responses import JSONResponse
from sqlmodel import Session
from models.bot import BotInstance
class DashboardAuthService:
AUTH_TOKEN_HEADER = "authorization"
AUTH_TOKEN_FALLBACK_HEADER = "x-auth-token"
def __init__(self, *, engine: Any) -> None:
self._engine = engine
def extract_bot_id_from_api_path(self, path: str) -> Optional[str]:
raw = str(path or "").strip()
if not raw.startswith("/api/bots/"):
return None
rest = raw[len("/api/bots/") :]
if not rest:
return None
bot_id_segment = rest.split("/", 1)[0].strip()
if not bot_id_segment:
return None
try:
decoded = unquote(bot_id_segment)
except Exception:
decoded = bot_id_segment
return str(decoded).strip() or None
def get_supplied_auth_token_http(self, request: Request) -> str:
auth_header = str(request.headers.get(self.AUTH_TOKEN_HEADER) or "").strip()
if auth_header.lower().startswith("bearer "):
token = auth_header[7:].strip()
if token:
return token
header_value = str(request.headers.get(self.AUTH_TOKEN_FALLBACK_HEADER) or "").strip()
if header_value:
return header_value
return str(request.query_params.get("auth_token") or "").strip()
@staticmethod
def is_public_api_path(path: str, method: str = "GET") -> bool:
raw = str(path or "").strip()
if not raw.startswith("/api/"):
return False
return raw in {
"/api/sys/auth/status",
"/api/sys/auth/login",
"/api/sys/auth/logout",
"/api/health",
"/api/health/cache",
}
def is_bot_enable_api_path(self, path: str, method: str = "GET") -> bool:
raw = str(path or "").strip()
verb = str(method or "GET").strip().upper()
if verb != "POST":
return False
bot_id = self.extract_bot_id_from_api_path(raw)
if not bot_id:
return False
return raw == f"/api/bots/{bot_id}/enable"
def validate_dashboard_auth(self, request: Request, session: Session) -> Optional[str]:
token = self.get_supplied_auth_token_http(request)
if not token:
return "Authentication required"
from services.sys_auth_service import resolve_user_by_token
user = resolve_user_by_token(session, token)
if user is None:
return "Session expired or invalid"
request.state.sys_auth_mode = "session_token"
request.state.sys_user_id = int(user.id or 0)
request.state.sys_username = str(user.username or "")
return None
@staticmethod
def _json_error(request: Request, *, status_code: int, detail: str) -> JSONResponse:
headers = {"Access-Control-Allow-Origin": "*"}
origin = str(request.headers.get("origin") or "").strip()
if origin:
headers["Vary"] = "Origin"
return JSONResponse(status_code=status_code, content={"detail": detail}, headers=headers)
async def guard(self, request: Request, call_next: Callable[..., Any]):
if request.method.upper() == "OPTIONS":
return await call_next(request)
if self.is_public_api_path(request.url.path, request.method):
return await call_next(request)
current_user_id = 0
with Session(self._engine) as session:
auth_error = self.validate_dashboard_auth(request, session)
if auth_error:
return self._json_error(request, status_code=401, detail=auth_error)
current_user_id = int(getattr(request.state, "sys_user_id", 0) or 0)
bot_id = self.extract_bot_id_from_api_path(request.url.path)
if not bot_id:
return await call_next(request)
with Session(self._engine) as session:
from models.sys_auth import SysUser
from services.sys_auth_service import user_can_access_bot
current_user = session.get(SysUser, current_user_id) if current_user_id > 0 else None
if current_user is None:
return self._json_error(request, status_code=401, detail="Authentication required")
if not user_can_access_bot(session, current_user, bot_id):
return self._json_error(request, status_code=403, detail="You do not have access to this bot")
bot = session.get(BotInstance, bot_id)
if not bot:
return self._json_error(request, status_code=404, detail="Bot not found")
enabled = bool(getattr(bot, "enabled", True))
if not enabled:
is_enable_api = self.is_bot_enable_api_path(request.url.path, request.method)
is_read_api = request.method.upper() == "GET"
if not (is_enable_api or is_read_api):
return self._json_error(request, status_code=403, detail="Bot is disabled. Enable it first.")
return await call_next(request)

View File

@ -0,0 +1,101 @@
from typing import Any, Callable, Dict, List
from fastapi import HTTPException
from sqlmodel import Session, select
from models.bot import BotInstance, NanobotImage
class ImageService:
def __init__(
self,
*,
cache: Any,
cache_key_images: Callable[[], str],
invalidate_images_cache: Callable[[], None],
reconcile_image_registry: Callable[[Session], None],
docker_manager: Any,
) -> None:
self._cache = cache
self._cache_key_images = cache_key_images
self._invalidate_images_cache = invalidate_images_cache
self._reconcile_image_registry = reconcile_image_registry
self._docker_manager = docker_manager
def list_images(self, *, session: Session) -> List[Dict[str, Any]]:
cached = self._cache.get_json(self._cache_key_images())
if isinstance(cached, list) and all(isinstance(row, dict) for row in cached):
return cached
if isinstance(cached, list):
self._invalidate_images_cache()
self._reconcile_image_registry(session)
rows = session.exec(select(NanobotImage)).all()
payload = [row.model_dump() for row in rows]
self._cache.set_json(self._cache_key_images(), payload, ttl=60)
return payload
def delete_image(self, *, session: Session, tag: str) -> Dict[str, Any]:
image = session.get(NanobotImage, tag)
if not image:
raise HTTPException(status_code=404, detail="Image not found")
bots_using = session.exec(select(BotInstance).where(BotInstance.image_tag == tag)).all()
if bots_using:
raise HTTPException(status_code=400, detail=f"Cannot delete image: {len(bots_using)} bots are using it.")
session.delete(image)
session.commit()
self._invalidate_images_cache()
return {"status": "deleted"}
def list_docker_images(self, *, repository: str = "nanobot-base") -> List[Dict[str, Any]]:
return self._docker_manager.list_images_by_repo(repository)
def register_image(self, *, session: Session, payload: Dict[str, Any]) -> NanobotImage:
tag = str(payload.get("tag") or "").strip()
source_dir = str(payload.get("source_dir") or "manual").strip() or "manual"
if not tag:
raise HTTPException(status_code=400, detail="tag is required")
if not self._docker_manager.has_image(tag):
raise HTTPException(status_code=404, detail=f"Docker image not found: {tag}")
version = tag.split(":")[-1].removeprefix("v") if ":" in tag else tag
try:
docker_img = self._docker_manager.client.images.get(tag) if self._docker_manager.client else None
image_id = docker_img.id if docker_img else None
except Exception:
image_id = None
row = session.get(NanobotImage, tag)
if not row:
row = NanobotImage(
tag=tag,
version=version,
status="READY",
source_dir=source_dir,
image_id=image_id,
)
else:
row.version = version
row.status = "READY"
row.source_dir = source_dir
row.image_id = image_id
session.add(row)
session.commit()
session.refresh(row)
self._invalidate_images_cache()
return row
def require_ready_image(self, session: Session, image_tag: str, *, require_local_image: bool) -> NanobotImage:
normalized_tag = str(image_tag or "").strip()
if not normalized_tag:
raise HTTPException(status_code=400, detail="image_tag is required")
image_row = session.get(NanobotImage, normalized_tag)
if not image_row:
raise HTTPException(status_code=400, detail=f"Image not registered in DB: {normalized_tag}")
if image_row.status != "READY":
raise HTTPException(status_code=400, detail=f"Image status is not READY: {normalized_tag} ({image_row.status})")
if require_local_image and not self._docker_manager.has_image(normalized_tag):
raise HTTPException(status_code=400, detail=f"Docker image not found locally: {normalized_tag}")
return image_row

View File

@ -0,0 +1,181 @@
import json
from dataclasses import dataclass, field
from datetime import datetime
from typing import Any, Dict, List, Optional
from sqlmodel import Session, select
from models.platform import ManagedNodeRecord
@dataclass(frozen=True)
class ManagedNode:
node_id: str
display_name: str
base_url: str = ""
enabled: bool = True
auth_token: str = ""
metadata: Dict[str, Any] = field(default_factory=dict)
capabilities: Dict[str, Any] = field(default_factory=dict)
resources: Dict[str, Any] = field(default_factory=dict)
last_seen_at: Optional[str] = None
class NodeRegistryService:
def __init__(self) -> None:
self._nodes: Dict[str, ManagedNode] = {}
def register_node(self, node: ManagedNode) -> None:
self._nodes[str(node.node_id or "").strip().lower()] = self._normalize_node(node)
def list_nodes(self) -> List[ManagedNode]:
return [self._nodes[key] for key in sorted(self._nodes.keys())]
def get_node(self, node_id: str) -> Optional[ManagedNode]:
key = str(node_id or "").strip().lower()
if not key:
return None
return self._nodes.get(key)
def require_node(self, node_id: str) -> ManagedNode:
node = self.get_node(node_id)
if node is None:
raise ValueError(f"Managed node not found: {node_id}")
if not node.enabled:
raise ValueError(f"Managed node is disabled: {node_id}")
return node
def load_from_session(self, session: Session) -> List[ManagedNode]:
rows = session.exec(select(ManagedNodeRecord)).all()
self._nodes = {}
for row in rows:
self.register_node(self._row_to_node(row))
return self.list_nodes()
def upsert_node(self, session: Session, node: ManagedNode) -> ManagedNode:
normalized = self._normalize_node(node)
row = session.get(ManagedNodeRecord, normalized.node_id)
if row is None:
row = ManagedNodeRecord(node_id=normalized.node_id)
metadata = dict(normalized.metadata or {})
row.display_name = normalized.display_name or normalized.node_id
row.base_url = normalized.base_url or ""
row.enabled = bool(normalized.enabled)
row.auth_token = normalized.auth_token or ""
row.transport_kind = str(metadata.get("transport_kind") or "edge").strip().lower() or "edge"
row.runtime_kind = str(metadata.get("runtime_kind") or "docker").strip().lower() or "docker"
row.core_adapter = str(metadata.get("core_adapter") or "nanobot").strip().lower() or "nanobot"
row.metadata_json = json.dumps(metadata, ensure_ascii=False, sort_keys=True)
row.capabilities_json = json.dumps(dict(normalized.capabilities or {}), ensure_ascii=False, sort_keys=True)
row.resources_json = json.dumps(dict(normalized.resources or {}), ensure_ascii=False, sort_keys=True)
row.last_seen_at = self._parse_datetime(normalized.last_seen_at) or row.last_seen_at
row.updated_at = datetime.utcnow()
if row.created_at is None:
row.created_at = datetime.utcnow()
session.add(row)
session.commit()
session.refresh(row)
self.register_node(self._row_to_node(row))
return self.require_node(normalized.node_id)
def mark_node_seen(
self,
session: Session,
*,
node_id: str,
display_name: Optional[str] = None,
capabilities: Optional[Dict[str, Any]] = None,
resources: Optional[Dict[str, Any]] = None,
) -> ManagedNode:
row = session.get(ManagedNodeRecord, str(node_id or "").strip().lower())
if row is None:
raise ValueError(f"Managed node not found: {node_id}")
if str(display_name or "").strip():
row.display_name = str(display_name).strip()
if capabilities is not None:
row.capabilities_json = json.dumps(dict(capabilities or {}), ensure_ascii=False, sort_keys=True)
if resources is not None:
row.resources_json = json.dumps(dict(resources or {}), ensure_ascii=False, sort_keys=True)
row.last_seen_at = datetime.utcnow()
row.updated_at = datetime.utcnow()
session.add(row)
session.commit()
session.refresh(row)
self.register_node(self._row_to_node(row))
return self.require_node(str(node_id or "").strip().lower())
def delete_node(self, session: Session, node_id: str) -> None:
key = str(node_id or "").strip().lower()
if not key:
raise ValueError("node_id is required")
row = session.get(ManagedNodeRecord, key)
if row is None:
raise ValueError(f"Managed node not found: {node_id}")
session.delete(row)
session.commit()
self._nodes.pop(key, None)
@staticmethod
def _normalize_node(node: ManagedNode) -> ManagedNode:
metadata = dict(node.metadata or {})
normalized = ManagedNode(
node_id=str(node.node_id or "").strip().lower(),
display_name=str(node.display_name or node.node_id or "").strip() or str(node.node_id or "").strip().lower(),
base_url=str(node.base_url or "").strip(),
enabled=bool(node.enabled),
auth_token=str(node.auth_token or "").strip(),
metadata=metadata,
capabilities=dict(node.capabilities or {}),
resources=dict(node.resources or {}),
last_seen_at=str(node.last_seen_at or "").strip() or None,
)
return normalized
@staticmethod
def _row_to_node(row: ManagedNodeRecord) -> ManagedNode:
metadata: Dict[str, Any] = {}
capabilities: Dict[str, Any] = {}
try:
loaded = json.loads(str(row.metadata_json or "{}"))
if isinstance(loaded, dict):
metadata = loaded
except Exception:
metadata = {}
try:
loaded_capabilities = json.loads(str(row.capabilities_json or "{}"))
if isinstance(loaded_capabilities, dict):
capabilities = loaded_capabilities
except Exception:
capabilities = {}
resources: Dict[str, Any] = {}
try:
loaded_resources = json.loads(str(row.resources_json or "{}"))
if isinstance(loaded_resources, dict):
resources = loaded_resources
except Exception:
resources = {}
metadata.setdefault("transport_kind", str(row.transport_kind or "").strip().lower() or "edge")
metadata.setdefault("runtime_kind", str(row.runtime_kind or "").strip().lower() or "docker")
metadata.setdefault("core_adapter", str(row.core_adapter or "").strip().lower() or "nanobot")
return ManagedNode(
node_id=str(row.node_id or "").strip().lower(),
display_name=str(row.display_name or row.node_id or "").strip(),
base_url=str(row.base_url or "").strip(),
enabled=bool(row.enabled),
auth_token=str(row.auth_token or "").strip(),
metadata=metadata,
capabilities=capabilities,
resources=resources,
last_seen_at=(row.last_seen_at.isoformat() + "Z") if row.last_seen_at else None,
)
@staticmethod
def _parse_datetime(value: Optional[str]) -> Optional[datetime]:
raw = str(value or "").strip()
if not raw:
return None
normalized = raw[:-1] if raw.endswith("Z") else raw
try:
return datetime.fromisoformat(normalized)
except Exception:
return None

View File

@ -0,0 +1,118 @@
import json
from datetime import datetime, timedelta
from typing import Any, Dict, List, Optional
from sqlalchemy import delete as sql_delete, func
from sqlmodel import Session, select
from models.platform import BotActivityEvent
from schemas.platform import PlatformActivityItem, PlatformActivityListResponse
from services.platform_common import utcnow
from services.platform_settings_service import get_activity_event_retention_days
ACTIVITY_EVENT_PRUNE_INTERVAL = timedelta(minutes=10)
OPERATIONAL_ACTIVITY_EVENT_TYPES = {
"bot_created",
"bot_deployed",
"bot_started",
"bot_stopped",
"bot_warning",
"bot_enabled",
"bot_disabled",
"bot_deactivated",
"command_submitted",
"command_failed",
"history_cleared",
}
_last_activity_event_prune_at: Optional[datetime] = None
def prune_expired_activity_events(session: Session, force: bool = False) -> int:
global _last_activity_event_prune_at
now = utcnow()
if not force and _last_activity_event_prune_at and now - _last_activity_event_prune_at < ACTIVITY_EVENT_PRUNE_INTERVAL:
return 0
retention_days = get_activity_event_retention_days(session)
cutoff = now - timedelta(days=retention_days)
result = session.exec(sql_delete(BotActivityEvent).where(BotActivityEvent.created_at < cutoff))
_last_activity_event_prune_at = now
return int(getattr(result, "rowcount", 0) or 0)
def record_activity_event(
session: Session,
bot_id: str,
event_type: str,
request_id: Optional[str] = None,
channel: str = "dashboard",
detail: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> None:
normalized_event_type = str(event_type or "unknown").strip().lower() or "unknown"
if normalized_event_type not in OPERATIONAL_ACTIVITY_EVENT_TYPES:
return
prune_expired_activity_events(session, force=False)
row = BotActivityEvent(
bot_id=bot_id,
request_id=request_id,
event_type=normalized_event_type,
channel=str(channel or "dashboard").strip().lower() or "dashboard",
detail=(str(detail or "").strip() or None),
metadata_json=json.dumps(metadata or {}, ensure_ascii=False) if metadata else None,
created_at=utcnow(),
)
session.add(row)
def list_activity_events(
session: Session,
bot_id: Optional[str] = None,
limit: int = 100,
offset: int = 0,
) -> Dict[str, Any]:
deleted = prune_expired_activity_events(session, force=False)
if deleted > 0:
session.commit()
safe_limit = max(1, min(int(limit), 500))
safe_offset = max(0, int(offset or 0))
stmt = (
select(BotActivityEvent)
.order_by(BotActivityEvent.created_at.desc(), BotActivityEvent.id.desc())
.offset(safe_offset)
.limit(safe_limit)
)
total_stmt = select(func.count(BotActivityEvent.id))
if bot_id:
stmt = stmt.where(BotActivityEvent.bot_id == bot_id)
total_stmt = total_stmt.where(BotActivityEvent.bot_id == bot_id)
rows = session.exec(stmt).all()
total = int(session.exec(total_stmt).one() or 0)
items: List[Dict[str, Any]] = []
for row in rows:
try:
metadata = json.loads(row.metadata_json or "{}")
except Exception:
metadata = {}
items.append(
PlatformActivityItem(
id=int(row.id or 0),
bot_id=row.bot_id,
request_id=row.request_id,
event_type=row.event_type,
channel=row.channel,
detail=row.detail,
metadata=metadata if isinstance(metadata, dict) else {},
created_at=row.created_at.isoformat() + "Z",
).model_dump()
)
return PlatformActivityListResponse(
items=[PlatformActivityItem.model_validate(item) for item in items],
total=total,
limit=safe_limit,
offset=safe_offset,
has_more=safe_offset + len(items) < total,
).model_dump()

View File

@ -0,0 +1,104 @@
from datetime import timedelta
from typing import Any, Dict
from sqlmodel import Session, select
from models.platform import BotRequestUsage
from schemas.platform import (
PlatformActivityItem,
PlatformDashboardAnalyticsResponse,
PlatformDashboardUsagePoint,
PlatformDashboardUsageSeries,
)
from services.platform_activity_service import list_activity_events
from services.platform_common import utcnow
from services.platform_settings_service import get_platform_settings
def build_dashboard_analytics(
session: Session,
*,
since_days: int = 7,
events_limit: int = 20,
) -> Dict[str, Any]:
safe_since_days = max(1, min(int(since_days or 7), 30))
safe_events_limit = max(1, min(int(events_limit or get_platform_settings(session).page_size), 100))
granularity = "hour" if safe_since_days <= 2 else "day"
now = utcnow()
if granularity == "hour":
current_bucket = now.replace(minute=0, second=0, microsecond=0)
bucket_starts = [current_bucket - timedelta(hours=index) for index in range(max(1, safe_since_days * 24))]
bucket_starts.reverse()
label_format = "%m-%d %H:00"
else:
current_bucket = now.replace(hour=0, minute=0, second=0, microsecond=0)
bucket_starts = [current_bucket - timedelta(days=index) for index in range(safe_since_days)]
bucket_starts.reverse()
label_format = "%m-%d"
bucket_index: Dict[str, int] = {}
points_template: list[PlatformDashboardUsagePoint] = []
for index, bucket_start in enumerate(bucket_starts):
bucket_key = bucket_start.isoformat()
bucket_index[bucket_key] = index
points_template.append(
PlatformDashboardUsagePoint(
bucket_at=bucket_start.isoformat() + "Z",
label=bucket_start.strftime(label_format),
call_count=0,
)
)
since = bucket_starts[0] if bucket_starts else now - timedelta(days=safe_since_days)
rows = session.exec(
select(BotRequestUsage)
.where(BotRequestUsage.started_at >= since)
.order_by(BotRequestUsage.started_at.asc(), BotRequestUsage.id.asc())
).all()
series_map: Dict[str, PlatformDashboardUsageSeries] = {}
total_request_count = 0
for row in rows:
total_request_count += 1
model_name = str(row.model or row.provider or "Unknown").strip() or "Unknown"
point_time = row.started_at or row.created_at or now
if granularity == "hour":
bucket_start = point_time.replace(minute=0, second=0, microsecond=0)
else:
bucket_start = point_time.replace(hour=0, minute=0, second=0, microsecond=0)
bucket_key = bucket_start.isoformat()
bucket_position = bucket_index.get(bucket_key)
if bucket_position is None:
continue
if model_name not in series_map:
series_map[model_name] = PlatformDashboardUsageSeries(
model=model_name,
total_calls=0,
points=[
PlatformDashboardUsagePoint.model_validate(point.model_dump())
for point in points_template
],
)
series = series_map[model_name]
series.total_calls += 1
series.points[bucket_position].call_count += 1
ordered_series = sorted(
series_map.values(),
key=lambda item: (-int(item.total_calls or 0), str(item.model or "").lower()),
)[:8]
return PlatformDashboardAnalyticsResponse(
total_request_count=total_request_count,
total_model_count=len(series_map),
granularity=granularity,
since_days=safe_since_days,
events_page_size=safe_events_limit,
series=ordered_series,
recent_events=[
PlatformActivityItem.model_validate(item)
for item in (list_activity_events(session, limit=safe_events_limit, offset=0).get("items") or [])
],
).model_dump()

View File

@ -0,0 +1,103 @@
import json
import math
import os
import re
from datetime import datetime
from typing import Any, Dict
from core.settings import BOTS_WORKSPACE_ROOT
def utcnow() -> datetime:
return datetime.utcnow()
def bot_workspace_root(bot_id: str) -> str:
return os.path.abspath(os.path.join(BOTS_WORKSPACE_ROOT, bot_id, ".nanobot", "workspace"))
def bot_data_root(bot_id: str) -> str:
return os.path.abspath(os.path.join(BOTS_WORKSPACE_ROOT, bot_id, ".nanobot"))
def calc_dir_size_bytes(path: str) -> int:
total = 0
if not os.path.isdir(path):
return 0
for root, _, files in os.walk(path):
for name in files:
target = os.path.join(root, name)
try:
if os.path.islink(target):
continue
total += int(os.path.getsize(target))
except OSError:
continue
return total
def workspace_usage_bytes(runtime: Dict[str, Any], bot_id: str) -> int:
usage = dict(runtime.get("usage") or {})
value = usage.get("workspace_used_bytes")
if value in {None, 0, "0", ""}:
value = usage.get("container_rw_bytes")
try:
normalized = int(value or 0)
except Exception:
normalized = 0
if normalized > 0:
return normalized
return calc_dir_size_bytes(bot_workspace_root(bot_id))
def read_bot_resources(bot_id: str) -> Dict[str, Any]:
path = os.path.join(bot_data_root(bot_id), "resources.json")
raw: Dict[str, Any] = {}
if os.path.isfile(path):
try:
with open(path, "r", encoding="utf-8") as file:
loaded = json.load(file)
if isinstance(loaded, dict):
raw = loaded
except Exception:
raw = {}
def _safe_float(value: Any, default: float) -> float:
try:
return float(value)
except Exception:
return default
def _safe_int(value: Any, default: int) -> int:
try:
return int(value)
except Exception:
return default
cpu = _safe_float(raw.get("cpuCores", raw.get("cpu_cores", 1.0)), 1.0)
memory = _safe_int(raw.get("memoryMB", raw.get("memory_mb", 1024)), 1024)
storage = _safe_int(raw.get("storageGB", raw.get("storage_gb", 10)), 10)
cpu = 0.0 if cpu == 0 else min(16.0, max(0.1, cpu))
memory = 0 if memory == 0 else min(65536, max(256, memory))
storage = 0 if storage == 0 else min(1024, max(1, storage))
return {
"cpu_cores": cpu,
"memory_mb": memory,
"storage_gb": storage,
}
def estimate_tokens(text: str) -> int:
content = str(text or "").strip()
if not content:
return 0
pieces = re.findall(r"[\u4e00-\u9fff]|[A-Za-z0-9_]+|[^\s]", content)
total = 0
for piece in pieces:
if re.fullmatch(r"[\u4e00-\u9fff]", piece):
total += 1
elif re.fullmatch(r"[A-Za-z0-9_]+", piece):
total += max(1, math.ceil(len(piece) / 4))
else:
total += 1
return max(1, total)

View File

@ -0,0 +1,236 @@
import logging
from typing import Any, Callable, Dict, List, Optional, Tuple
from sqlmodel import Session, select
from clients.edge.errors import log_edge_failure
from models.bot import BotInstance, NanobotImage
from services.platform_activity_service import list_activity_events, prune_expired_activity_events
from services.platform_common import read_bot_resources, workspace_usage_bytes
from services.platform_settings_service import get_platform_settings
from services.platform_usage_service import list_usage
logger = logging.getLogger(__name__)
def build_platform_overview(
session: Session,
read_runtime: Optional[Callable[[BotInstance], Tuple[str, Dict[str, Any]]]] = None,
) -> Dict[str, Any]:
deleted = prune_expired_activity_events(session, force=False)
if deleted > 0:
session.commit()
bots = session.exec(select(BotInstance)).all()
images = session.exec(select(NanobotImage).order_by(NanobotImage.created_at.desc())).all()
settings = get_platform_settings(session)
running = 0
stopped = 0
disabled = 0
configured_cpu_total = 0.0
configured_memory_total = 0
configured_storage_total = 0
workspace_used_total = 0
workspace_limit_total = 0
live_cpu_percent_total = 0.0
live_memory_used_total = 0
live_memory_limit_total = 0
dirty = False
bot_rows: List[Dict[str, Any]] = []
for bot in bots:
enabled = bool(getattr(bot, "enabled", True))
resources = read_bot_resources(bot.id)
runtime_status = str(bot.docker_status or "STOPPED").upper()
runtime: Dict[str, Any] = {"usage": {}, "limits": {}, "docker_status": runtime_status}
if callable(read_runtime):
try:
runtime_status, runtime = read_runtime(bot)
except Exception as exc:
log_edge_failure(
logger,
key=f"platform-overview-runtime:{bot.id}",
exc=exc,
message=f"Failed to read platform runtime snapshot for bot_id={bot.id}",
)
runtime_status = str(runtime_status or runtime.get("docker_status") or "STOPPED").upper()
runtime["docker_status"] = runtime_status
if str(bot.docker_status or "").upper() != runtime_status:
bot.docker_status = runtime_status
session.add(bot)
dirty = True
if runtime_status != "RUNNING" and str(bot.current_state or "").upper() not in {"ERROR"}:
next_state = "IDLE"
if str(bot.current_state or "") != next_state:
bot.current_state = next_state
session.add(bot)
dirty = True
workspace_used = workspace_usage_bytes(runtime, bot.id)
workspace_limit = int(resources["storage_gb"] or 0) * 1024 * 1024 * 1024
configured_cpu_total += float(resources["cpu_cores"] or 0)
configured_memory_total += int(resources["memory_mb"] or 0) * 1024 * 1024
configured_storage_total += workspace_limit
workspace_used_total += workspace_used
workspace_limit_total += workspace_limit
live_cpu_percent_total += float((runtime.get("usage") or {}).get("cpu_percent") or 0.0)
live_memory_used_total += int((runtime.get("usage") or {}).get("memory_bytes") or 0)
live_memory_limit_total += int((runtime.get("usage") or {}).get("memory_limit_bytes") or 0)
if not enabled:
disabled += 1
elif runtime_status == "RUNNING":
running += 1
else:
stopped += 1
bot_rows.append(
{
"id": bot.id,
"name": bot.name,
"enabled": enabled,
"docker_status": runtime_status,
"image_tag": bot.image_tag,
"llm_provider": getattr(bot, "llm_provider", None),
"llm_model": getattr(bot, "llm_model", None),
"current_state": bot.current_state,
"last_action": bot.last_action,
"resources": resources,
"workspace_usage_bytes": workspace_used,
"workspace_limit_bytes": workspace_limit if workspace_limit > 0 else None,
}
)
if dirty:
session.commit()
usage = list_usage(session, limit=20)
events = list_activity_events(session, limit=get_platform_settings(session).page_size, offset=0).get("items") or []
return {
"summary": {
"bots": {
"total": len(bots),
"running": running,
"stopped": stopped,
"disabled": disabled,
},
"images": {
"total": len(images),
"ready": len([row for row in images if row.status == "READY"]),
"abnormal": len([row for row in images if row.status != "READY"]),
},
"resources": {
"configured_cpu_cores": round(configured_cpu_total, 2),
"configured_memory_bytes": configured_memory_total,
"configured_storage_bytes": configured_storage_total,
"live_cpu_percent": round(live_cpu_percent_total, 2),
"live_memory_used_bytes": live_memory_used_total,
"live_memory_limit_bytes": live_memory_limit_total,
"workspace_used_bytes": workspace_used_total,
"workspace_limit_bytes": workspace_limit_total,
},
},
"images": [
{
"tag": row.tag,
"version": row.version,
"status": row.status,
"source_dir": row.source_dir,
"created_at": row.created_at.isoformat() + "Z",
}
for row in images
],
"bots": bot_rows,
"settings": settings.model_dump(),
"usage": usage,
"events": events,
}
def build_node_resource_overview(
session: Session,
*,
node_id: str,
read_runtime: Optional[Callable[[BotInstance], Tuple[str, Dict[str, Any]]]] = None,
) -> Dict[str, Any]:
normalized_node_id = str(node_id or "").strip().lower()
bots = session.exec(select(BotInstance).where(BotInstance.node_id == normalized_node_id)).all()
running = 0
stopped = 0
disabled = 0
configured_cpu_total = 0.0
configured_memory_total = 0
configured_storage_total = 0
workspace_used_total = 0
workspace_limit_total = 0
live_cpu_percent_total = 0.0
live_memory_used_total = 0
live_memory_limit_total = 0
dirty = False
for bot in bots:
enabled = bool(getattr(bot, "enabled", True))
resources = read_bot_resources(bot.id)
runtime_status = str(bot.docker_status or "STOPPED").upper()
runtime: Dict[str, Any] = {"usage": {}, "limits": {}, "docker_status": runtime_status}
if callable(read_runtime):
try:
runtime_status, runtime = read_runtime(bot)
except Exception as exc:
log_edge_failure(
logger,
key=f"platform-node-runtime:{normalized_node_id}:{bot.id}",
exc=exc,
message=f"Failed to read node runtime snapshot for bot_id={bot.id}",
)
runtime_status = str(runtime_status or runtime.get("docker_status") or "STOPPED").upper()
runtime["docker_status"] = runtime_status
if str(bot.docker_status or "").upper() != runtime_status:
bot.docker_status = runtime_status
session.add(bot)
dirty = True
workspace_used = workspace_usage_bytes(runtime, bot.id)
workspace_limit = int(resources["storage_gb"] or 0) * 1024 * 1024 * 1024
configured_cpu_total += float(resources["cpu_cores"] or 0)
configured_memory_total += int(resources["memory_mb"] or 0) * 1024 * 1024
configured_storage_total += workspace_limit
workspace_used_total += workspace_used
workspace_limit_total += workspace_limit
live_cpu_percent_total += float((runtime.get("usage") or {}).get("cpu_percent") or 0.0)
live_memory_used_total += int((runtime.get("usage") or {}).get("memory_bytes") or 0)
live_memory_limit_total += int((runtime.get("usage") or {}).get("memory_limit_bytes") or 0)
if not enabled:
disabled += 1
elif runtime_status == "RUNNING":
running += 1
else:
stopped += 1
if dirty:
session.commit()
return {
"node_id": normalized_node_id,
"bots": {
"total": len(bots),
"running": running,
"stopped": stopped,
"disabled": disabled,
},
"resources": {
"configured_cpu_cores": round(configured_cpu_total, 2),
"configured_memory_bytes": configured_memory_total,
"configured_storage_bytes": configured_storage_total,
"live_cpu_percent": round(live_cpu_percent_total, 2),
"live_memory_used_bytes": live_memory_used_total,
"live_memory_limit_bytes": live_memory_limit_total,
"workspace_used_bytes": workspace_used_total,
"workspace_limit_bytes": workspace_limit_total,
},
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,262 @@
import json
from typing import Any, Dict, List
from sqlmodel import Session, select
from core.database import engine
from models.platform import PlatformSetting
from schemas.platform import LoadingPageSettings, PlatformSettingsPayload, SystemSettingPayload
from services.platform_common import utcnow
from services.platform_settings_support import (
ACTIVITY_EVENT_RETENTION_SETTING_KEY,
DEFAULT_ACTIVITY_EVENT_RETENTION_DAYS,
DEPRECATED_SETTING_KEYS,
PROTECTED_SETTING_KEYS,
SETTING_KEYS,
SYS_AUTH_TOKEN_TTL_DAYS_SETTING_KEY,
SYSTEM_SETTING_DEFINITIONS,
bootstrap_platform_setting_values,
build_speech_runtime_settings,
default_platform_settings,
normalize_activity_event_retention_days,
normalize_extension_list,
normalize_setting_key,
read_setting_value,
setting_item_from_row,
upsert_setting_row,
)
def ensure_default_system_settings(session: Session) -> None:
bootstrap_values = bootstrap_platform_setting_values()
legacy_row = session.get(PlatformSetting, "global")
if legacy_row is not None:
try:
legacy_data = json.loads(legacy_row.value_json or "{}")
except Exception:
legacy_data = {}
if isinstance(legacy_data, dict):
for key in SETTING_KEYS:
meta = SYSTEM_SETTING_DEFINITIONS[key]
upsert_setting_row(
session,
key,
name=str(meta["name"]),
category=str(meta["category"]),
description=str(meta["description"]),
value_type=str(meta["value_type"]),
value=legacy_data.get(key, bootstrap_values.get(key, meta["value"])),
is_public=bool(meta["is_public"]),
sort_order=int(meta["sort_order"]),
)
session.delete(legacy_row)
session.commit()
dirty = False
for key in DEPRECATED_SETTING_KEYS:
legacy_row = session.get(PlatformSetting, key)
if legacy_row is not None:
session.delete(legacy_row)
dirty = True
for key, meta in SYSTEM_SETTING_DEFINITIONS.items():
row = session.get(PlatformSetting, key)
if row is None:
upsert_setting_row(
session,
key,
name=str(meta["name"]),
category=str(meta["category"]),
description=str(meta["description"]),
value_type=str(meta["value_type"]),
value=bootstrap_values.get(key, meta["value"]),
is_public=bool(meta["is_public"]),
sort_order=int(meta["sort_order"]),
)
dirty = True
continue
changed = False
for field in ("name", "category", "description", "value_type"):
value = str(meta[field])
if not getattr(row, field):
setattr(row, field, value)
changed = True
if getattr(row, "sort_order", None) is None:
row.sort_order = int(meta["sort_order"])
changed = True
if getattr(row, "is_public", None) is None:
row.is_public = bool(meta["is_public"])
changed = True
if changed:
row.updated_at = utcnow()
session.add(row)
dirty = True
if dirty:
session.commit()
def list_system_settings(session: Session, search: str = "") -> List[Dict[str, Any]]:
ensure_default_system_settings(session)
stmt = select(PlatformSetting).order_by(PlatformSetting.sort_order.asc(), PlatformSetting.key.asc())
rows = session.exec(stmt).all()
keyword = str(search or "").strip().lower()
items = [setting_item_from_row(row) for row in rows]
if not keyword:
return items
return [
item
for item in items
if keyword in str(item["key"]).lower()
or keyword in str(item["name"]).lower()
or keyword in str(item["category"]).lower()
or keyword in str(item["description"]).lower()
]
def create_or_update_system_setting(session: Session, payload: SystemSettingPayload) -> Dict[str, Any]:
ensure_default_system_settings(session)
normalized_key = normalize_setting_key(payload.key)
definition = SYSTEM_SETTING_DEFINITIONS.get(normalized_key, {})
row = upsert_setting_row(
session,
payload.key,
name=payload.name or str(definition.get("name") or payload.key),
category=payload.category or str(definition.get("category") or "general"),
description=payload.description or str(definition.get("description") or ""),
value_type=payload.value_type or str(definition.get("value_type") or "json"),
value=payload.value if payload.value is not None else definition.get("value"),
is_public=payload.is_public,
sort_order=payload.sort_order or int(definition.get("sort_order") or 100),
)
if normalized_key == ACTIVITY_EVENT_RETENTION_SETTING_KEY:
from services.platform_activity_service import prune_expired_activity_events
prune_expired_activity_events(session, force=True)
session.commit()
session.refresh(row)
return setting_item_from_row(row)
def delete_system_setting(session: Session, key: str) -> None:
normalized_key = normalize_setting_key(key)
if normalized_key in PROTECTED_SETTING_KEYS:
raise ValueError("Core platform settings cannot be deleted")
row = session.get(PlatformSetting, normalized_key)
if row is None:
raise ValueError("Setting not found")
session.delete(row)
session.commit()
def get_platform_settings(session: Session) -> PlatformSettingsPayload:
defaults = default_platform_settings()
ensure_default_system_settings(session)
rows = session.exec(select(PlatformSetting).where(PlatformSetting.key.in_(SETTING_KEYS))).all()
data: Dict[str, Any] = {row.key: read_setting_value(row) for row in rows}
merged = defaults.model_dump()
merged["page_size"] = max(1, min(100, int(data.get("page_size") or merged["page_size"])))
merged["chat_pull_page_size"] = max(10, min(500, int(data.get("chat_pull_page_size") or merged["chat_pull_page_size"])))
merged["command_auto_unlock_seconds"] = max(
1,
min(600, int(data.get("command_auto_unlock_seconds") or merged["command_auto_unlock_seconds"])),
)
merged["upload_max_mb"] = int(data.get("upload_max_mb") or merged["upload_max_mb"])
merged["allowed_attachment_extensions"] = normalize_extension_list(
data.get("allowed_attachment_extensions", merged["allowed_attachment_extensions"]),
)
merged["workspace_download_extensions"] = normalize_extension_list(
data.get("workspace_download_extensions", merged["workspace_download_extensions"]),
)
merged["speech_enabled"] = bool(data.get("speech_enabled", merged["speech_enabled"]))
loading_page = data.get("loading_page")
if isinstance(loading_page, dict):
current = dict(merged["loading_page"])
for key in ("title", "subtitle", "description"):
value = str(loading_page.get(key) or "").strip()
if value:
current[key] = value
merged["loading_page"] = current
return PlatformSettingsPayload.model_validate(merged)
def save_platform_settings(session: Session, payload: PlatformSettingsPayload) -> PlatformSettingsPayload:
normalized = PlatformSettingsPayload(
page_size=max(1, min(100, int(payload.page_size))),
chat_pull_page_size=max(10, min(500, int(payload.chat_pull_page_size))),
command_auto_unlock_seconds=max(1, min(600, int(payload.command_auto_unlock_seconds))),
upload_max_mb=payload.upload_max_mb,
allowed_attachment_extensions=normalize_extension_list(payload.allowed_attachment_extensions),
workspace_download_extensions=normalize_extension_list(payload.workspace_download_extensions),
speech_enabled=bool(payload.speech_enabled),
loading_page=LoadingPageSettings.model_validate(payload.loading_page.model_dump()),
)
payload_by_key = normalized.model_dump()
for key in SETTING_KEYS:
definition = SYSTEM_SETTING_DEFINITIONS[key]
upsert_setting_row(
session,
key,
name=str(definition["name"]),
category=str(definition["category"]),
description=str(definition["description"]),
value_type=str(definition["value_type"]),
value=payload_by_key[key],
is_public=bool(definition["is_public"]),
sort_order=int(definition["sort_order"]),
)
session.commit()
return normalized
def get_platform_settings_snapshot() -> PlatformSettingsPayload:
with Session(engine) as session:
return get_platform_settings(session)
def get_upload_max_mb() -> int:
return get_platform_settings_snapshot().upload_max_mb
def get_allowed_attachment_extensions() -> List[str]:
return get_platform_settings_snapshot().allowed_attachment_extensions
def get_workspace_download_extensions() -> List[str]:
return get_platform_settings_snapshot().workspace_download_extensions
def get_page_size() -> int:
return get_platform_settings_snapshot().page_size
def get_chat_pull_page_size() -> int:
return get_platform_settings_snapshot().chat_pull_page_size
def get_speech_runtime_settings() -> Dict[str, Any]:
return build_speech_runtime_settings(get_platform_settings_snapshot())
def get_activity_event_retention_days(session: Session) -> int:
row = session.get(PlatformSetting, ACTIVITY_EVENT_RETENTION_SETTING_KEY)
if row is None:
return DEFAULT_ACTIVITY_EVENT_RETENTION_DAYS
try:
value = read_setting_value(row)
except Exception:
value = DEFAULT_ACTIVITY_EVENT_RETENTION_DAYS
return normalize_activity_event_retention_days(value)
def get_sys_auth_token_ttl_days(session: Session) -> int:
ensure_default_system_settings(session)
row = session.get(PlatformSetting, SYS_AUTH_TOKEN_TTL_DAYS_SETTING_KEY)
if row is None:
return 7
try:
value = int(read_setting_value(row))
except Exception:
value = 7
return max(1, min(365, value))

View File

@ -0,0 +1,352 @@
import json
import os
import re
from typing import Any, Dict, List
from sqlmodel import Session
from core.settings import (
DEFAULT_CHAT_PULL_PAGE_SIZE,
DEFAULT_COMMAND_AUTO_UNLOCK_SECONDS,
DEFAULT_PAGE_SIZE,
DEFAULT_STT_AUDIO_FILTER,
DEFAULT_STT_AUDIO_PREPROCESS,
DEFAULT_STT_DEFAULT_LANGUAGE,
DEFAULT_STT_FORCE_SIMPLIFIED,
DEFAULT_STT_INITIAL_PROMPT,
DEFAULT_STT_MAX_AUDIO_SECONDS,
DEFAULT_UPLOAD_MAX_MB,
DEFAULT_WORKSPACE_DOWNLOAD_EXTENSIONS,
STT_DEVICE,
STT_ENABLED_DEFAULT,
STT_MODEL,
)
from models.platform import PlatformSetting
from schemas.platform import LoadingPageSettings, PlatformSettingsPayload, SystemSettingItem
from services.platform_common import utcnow
DEFAULT_ALLOWED_ATTACHMENT_EXTENSIONS: tuple[str, ...] = ()
DEFAULT_ACTIVITY_EVENT_RETENTION_DAYS = 7
ACTIVITY_EVENT_RETENTION_SETTING_KEY = "activity_event_retention_days"
SYS_AUTH_TOKEN_TTL_DAYS_SETTING_KEY = "sys_auth_token_ttl_days"
SETTING_KEYS = (
"page_size",
"chat_pull_page_size",
"command_auto_unlock_seconds",
"upload_max_mb",
"allowed_attachment_extensions",
"workspace_download_extensions",
"speech_enabled",
)
PROTECTED_SETTING_KEYS = set(SETTING_KEYS) | {
ACTIVITY_EVENT_RETENTION_SETTING_KEY,
}
DEPRECATED_SETTING_KEYS = {
"loading_page",
"speech_max_audio_seconds",
"speech_default_language",
"speech_force_simplified",
"speech_audio_preprocess",
"speech_audio_filter",
"speech_initial_prompt",
"dashboard_activity_page_size",
}
SYSTEM_SETTING_DEFINITIONS: Dict[str, Dict[str, Any]] = {
"page_size": {
"name": "分页大小",
"category": "ui",
"description": "平台各类列表默认每页条数。",
"value_type": "integer",
"value": DEFAULT_PAGE_SIZE,
"is_public": True,
"sort_order": 5,
},
"chat_pull_page_size": {
"name": "对话懒加载条数",
"category": "chat",
"description": "Bot 对话区向上懒加载时每次读取的消息条数。",
"value_type": "integer",
"value": DEFAULT_CHAT_PULL_PAGE_SIZE,
"is_public": True,
"sort_order": 8,
},
"command_auto_unlock_seconds": {
"name": "发送按钮自动恢复秒数",
"category": "chat",
"description": "对话发送后按钮保持停止态的最长秒数,超时后自动恢复为可发送状态。",
"value_type": "integer",
"value": DEFAULT_COMMAND_AUTO_UNLOCK_SECONDS,
"is_public": True,
"sort_order": 9,
},
"upload_max_mb": {
"name": "上传大小限制",
"category": "upload",
"description": "单文件上传大小限制,单位 MB。",
"value_type": "integer",
"value": DEFAULT_UPLOAD_MAX_MB,
"is_public": False,
"sort_order": 10,
},
"allowed_attachment_extensions": {
"name": "允许附件后缀",
"category": "upload",
"description": "允许上传的附件后缀列表,留空表示不限制。",
"value_type": "json",
"value": list(DEFAULT_ALLOWED_ATTACHMENT_EXTENSIONS),
"is_public": False,
"sort_order": 20,
},
"workspace_download_extensions": {
"name": "工作区下载后缀",
"category": "workspace",
"description": "命中后缀的工作区文件默认走下载模式。",
"value_type": "json",
"value": list(DEFAULT_WORKSPACE_DOWNLOAD_EXTENSIONS),
"is_public": False,
"sort_order": 30,
},
"speech_enabled": {
"name": "语音识别开关",
"category": "speech",
"description": "控制 Bot 语音转写功能是否启用。",
"value_type": "boolean",
"value": STT_ENABLED_DEFAULT,
"is_public": True,
"sort_order": 32,
},
ACTIVITY_EVENT_RETENTION_SETTING_KEY: {
"name": "活动事件保留天数",
"category": "maintenance",
"description": "bot_activity_event 运维事件的保留天数,超期记录会自动清理。",
"value_type": "integer",
"value": DEFAULT_ACTIVITY_EVENT_RETENTION_DAYS,
"is_public": False,
"sort_order": 34,
},
SYS_AUTH_TOKEN_TTL_DAYS_SETTING_KEY: {
"name": "登录令牌有效天数",
"category": "auth",
"description": "用户登录 JWT 的失效天数,默认 7 天,同时作为 Redis 会话 TTL。",
"value_type": "integer",
"value": 7,
"is_public": False,
"sort_order": 36,
},
}
def normalize_activity_event_retention_days(raw: Any) -> int:
try:
value = int(raw)
except Exception:
value = DEFAULT_ACTIVITY_EVENT_RETENTION_DAYS
return max(1, min(3650, value))
def normalize_extension(raw: Any) -> str:
text = str(raw or "").strip().lower()
if not text:
return ""
if text.startswith("*."):
text = text[1:]
if not text.startswith("."):
text = f".{text}"
if not re.fullmatch(r"\.[a-z0-9][a-z0-9._+-]{0,31}", text):
return ""
return text
def normalize_extension_list(rows: Any) -> List[str]:
if not isinstance(rows, list):
return []
normalized: List[str] = []
for item in rows:
ext = normalize_extension(item)
if ext and ext not in normalized:
normalized.append(ext)
return normalized
def legacy_env_int(name: str, default: int, min_value: int, max_value: int) -> int:
raw = os.getenv(name)
if raw is None:
return default
try:
value = int(str(raw).strip())
except Exception:
value = default
return max(min_value, min(max_value, value))
def legacy_env_bool(name: str, default: bool) -> bool:
raw = os.getenv(name)
if raw is None:
return default
return str(raw).strip().lower() in {"1", "true", "yes", "on"}
def legacy_env_extensions(name: str, default: List[str]) -> List[str]:
raw = os.getenv(name)
if raw is None:
return list(default)
source = re.split(r"[,;\s]+", str(raw))
normalized: List[str] = []
for item in source:
ext = normalize_extension(item)
if ext and ext not in normalized:
normalized.append(ext)
return normalized
def bootstrap_platform_setting_values() -> Dict[str, Any]:
return {
"page_size": legacy_env_int("PAGE_SIZE", DEFAULT_PAGE_SIZE, 1, 100),
"chat_pull_page_size": legacy_env_int(
"CHAT_PULL_PAGE_SIZE",
DEFAULT_CHAT_PULL_PAGE_SIZE,
10,
500,
),
"command_auto_unlock_seconds": legacy_env_int(
"COMMAND_AUTO_UNLOCK_SECONDS",
DEFAULT_COMMAND_AUTO_UNLOCK_SECONDS,
1,
600,
),
"upload_max_mb": legacy_env_int("UPLOAD_MAX_MB", DEFAULT_UPLOAD_MAX_MB, 1, 2048),
"allowed_attachment_extensions": legacy_env_extensions(
"ALLOWED_ATTACHMENT_EXTENSIONS",
list(DEFAULT_ALLOWED_ATTACHMENT_EXTENSIONS),
),
"workspace_download_extensions": legacy_env_extensions(
"WORKSPACE_DOWNLOAD_EXTENSIONS",
list(DEFAULT_WORKSPACE_DOWNLOAD_EXTENSIONS),
),
"speech_enabled": legacy_env_bool("STT_ENABLED", STT_ENABLED_DEFAULT),
}
def default_platform_settings() -> PlatformSettingsPayload:
bootstrap = bootstrap_platform_setting_values()
return PlatformSettingsPayload(
page_size=int(bootstrap["page_size"]),
chat_pull_page_size=int(bootstrap["chat_pull_page_size"]),
command_auto_unlock_seconds=int(bootstrap["command_auto_unlock_seconds"]),
upload_max_mb=int(bootstrap["upload_max_mb"]),
allowed_attachment_extensions=list(bootstrap["allowed_attachment_extensions"]),
workspace_download_extensions=list(bootstrap["workspace_download_extensions"]),
speech_enabled=bool(bootstrap["speech_enabled"]),
speech_max_audio_seconds=DEFAULT_STT_MAX_AUDIO_SECONDS,
speech_default_language=DEFAULT_STT_DEFAULT_LANGUAGE,
speech_force_simplified=DEFAULT_STT_FORCE_SIMPLIFIED,
speech_audio_preprocess=DEFAULT_STT_AUDIO_PREPROCESS,
speech_audio_filter=DEFAULT_STT_AUDIO_FILTER,
speech_initial_prompt=DEFAULT_STT_INITIAL_PROMPT,
loading_page=LoadingPageSettings(),
)
def normalize_setting_key(raw: Any) -> str:
text = str(raw or "").strip()
return re.sub(r"[^a-zA-Z0-9_.-]+", "_", text).strip("._-").lower()
def normalize_setting_value(value: Any, value_type: str) -> Any:
normalized_type = str(value_type or "json").strip().lower() or "json"
if normalized_type == "integer":
return int(value or 0)
if normalized_type == "float":
return float(value or 0)
if normalized_type == "boolean":
if isinstance(value, bool):
return value
return str(value or "").strip().lower() in {"1", "true", "yes", "on"}
if normalized_type == "string":
return str(value or "")
if normalized_type == "json":
return value
raise ValueError(f"Unsupported value_type: {normalized_type}")
def read_setting_value(row: PlatformSetting) -> Any:
try:
value = json.loads(row.value_json or "null")
except Exception:
value = None
return normalize_setting_value(value, row.value_type)
def setting_item_from_row(row: PlatformSetting) -> Dict[str, Any]:
return SystemSettingItem(
key=row.key,
name=row.name,
category=row.category,
description=row.description,
value_type=row.value_type,
value=read_setting_value(row),
is_public=bool(row.is_public),
sort_order=int(row.sort_order or 100),
created_at=row.created_at.isoformat() + "Z",
updated_at=row.updated_at.isoformat() + "Z",
).model_dump()
def upsert_setting_row(
session: Session,
key: str,
*,
name: str,
category: str,
description: str,
value_type: str,
value: Any,
is_public: bool,
sort_order: int,
) -> PlatformSetting:
normalized_key = normalize_setting_key(key)
if not normalized_key:
raise ValueError("Setting key is required")
normalized_type = str(value_type or "json").strip().lower() or "json"
normalized_value = normalize_setting_value(value, normalized_type)
now = utcnow()
row = session.get(PlatformSetting, normalized_key)
if row is None:
row = PlatformSetting(
key=normalized_key,
name=str(name or normalized_key),
category=str(category or "general"),
description=str(description or ""),
value_type=normalized_type,
value_json=json.dumps(normalized_value, ensure_ascii=False),
is_public=bool(is_public),
sort_order=int(sort_order or 100),
created_at=now,
updated_at=now,
)
else:
row.name = str(name or row.name or normalized_key)
row.category = str(category or row.category or "general")
row.description = str(description or row.description or "")
row.value_type = normalized_type
row.value_json = json.dumps(normalized_value, ensure_ascii=False)
row.is_public = bool(is_public)
row.sort_order = int(sort_order or row.sort_order or 100)
row.updated_at = now
session.add(row)
return row
def build_speech_runtime_settings(settings: PlatformSettingsPayload) -> Dict[str, Any]:
return {
"enabled": bool(settings.speech_enabled),
"max_audio_seconds": int(DEFAULT_STT_MAX_AUDIO_SECONDS),
"default_language": str(DEFAULT_STT_DEFAULT_LANGUAGE or "zh").strip().lower() or "zh",
"force_simplified": bool(DEFAULT_STT_FORCE_SIMPLIFIED),
"audio_preprocess": bool(DEFAULT_STT_AUDIO_PREPROCESS),
"audio_filter": str(DEFAULT_STT_AUDIO_FILTER or "").strip(),
"initial_prompt": str(DEFAULT_STT_INITIAL_PROMPT or "").strip(),
"model": STT_MODEL,
"device": STT_DEVICE,
}

View File

@ -0,0 +1,230 @@
import json
import uuid
from datetime import timedelta
from typing import Any, Dict, List, Optional
from sqlalchemy import func
from sqlmodel import Session, select
from models.platform import BotRequestUsage
from schemas.platform import PlatformUsageItem, PlatformUsageResponse, PlatformUsageSummary
from services.platform_common import estimate_tokens, utcnow
def create_usage_request(
session: Session,
bot_id: str,
command: str,
attachments: Optional[List[str]] = None,
channel: str = "dashboard",
metadata: Optional[Dict[str, Any]] = None,
provider: Optional[str] = None,
model: Optional[str] = None,
) -> str:
request_id = uuid.uuid4().hex
rows = [str(item).strip() for item in (attachments or []) if str(item).strip()]
input_tokens = estimate_tokens(command)
usage = BotRequestUsage(
bot_id=bot_id,
request_id=request_id,
channel=channel,
status="PENDING",
provider=(str(provider or "").strip() or None),
model=(str(model or "").strip() or None),
token_source="estimated",
input_tokens=input_tokens,
output_tokens=0,
total_tokens=input_tokens,
input_text_preview=str(command or "")[:400],
attachments_json=json.dumps(rows, ensure_ascii=False) if rows else None,
metadata_json=json.dumps(metadata or {}, ensure_ascii=False),
started_at=utcnow(),
created_at=utcnow(),
updated_at=utcnow(),
)
session.add(usage)
session.flush()
return request_id
def bind_usage_message(
session: Session,
bot_id: str,
request_id: str,
message_id: Optional[int],
) -> Optional[BotRequestUsage]:
if not request_id or not message_id:
return None
usage_row = find_pending_usage_by_request_id(session, bot_id, request_id)
if not usage_row:
return None
usage_row.message_id = int(message_id)
usage_row.updated_at = utcnow()
session.add(usage_row)
return usage_row
def find_latest_pending_usage(session: Session, bot_id: str) -> Optional[BotRequestUsage]:
stmt = (
select(BotRequestUsage)
.where(BotRequestUsage.bot_id == bot_id)
.where(BotRequestUsage.status == "PENDING")
.order_by(BotRequestUsage.started_at.desc(), BotRequestUsage.id.desc())
.limit(1)
)
return session.exec(stmt).first()
def find_pending_usage_by_request_id(session: Session, bot_id: str, request_id: str) -> Optional[BotRequestUsage]:
if not request_id:
return None
stmt = (
select(BotRequestUsage)
.where(BotRequestUsage.bot_id == bot_id)
.where(BotRequestUsage.request_id == request_id)
.where(BotRequestUsage.status == "PENDING")
.order_by(BotRequestUsage.started_at.desc(), BotRequestUsage.id.desc())
.limit(1)
)
return session.exec(stmt).first()
def finalize_usage_from_packet(session: Session, bot_id: str, packet: Dict[str, Any]) -> Optional[BotRequestUsage]:
request_id = str(packet.get("request_id") or "").strip()
usage_row = find_pending_usage_by_request_id(session, bot_id, request_id) or find_latest_pending_usage(session, bot_id)
if not usage_row:
return None
raw_usage = packet.get("usage")
input_tokens: Optional[int] = None
output_tokens: Optional[int] = None
source = "estimated"
if isinstance(raw_usage, dict):
for key in ("input_tokens", "prompt_tokens", "promptTokens"):
if raw_usage.get(key) is not None:
try:
input_tokens = int(raw_usage.get(key) or 0)
except Exception:
input_tokens = None
break
for key in ("output_tokens", "completion_tokens", "completionTokens"):
if raw_usage.get(key) is not None:
try:
output_tokens = int(raw_usage.get(key) or 0)
except Exception:
output_tokens = None
break
if input_tokens is not None or output_tokens is not None:
source = "exact"
text = str(packet.get("text") or packet.get("content") or "").strip()
provider = str(packet.get("provider") or "").strip()
model = str(packet.get("model") or "").strip()
message_id = packet.get("message_id")
if input_tokens is None:
input_tokens = usage_row.input_tokens
if output_tokens is None:
output_tokens = estimate_tokens(text)
if source == "exact":
source = "mixed"
if provider:
usage_row.provider = provider[:120]
if model:
usage_row.model = model[:255]
if message_id is not None:
try:
usage_row.message_id = int(message_id)
except Exception:
pass
usage_row.output_tokens = max(0, int(output_tokens or 0))
usage_row.input_tokens = max(0, int(input_tokens or 0))
usage_row.total_tokens = usage_row.input_tokens + usage_row.output_tokens
usage_row.output_text_preview = text[:400] if text else usage_row.output_text_preview
usage_row.status = "COMPLETED"
usage_row.token_source = source
usage_row.completed_at = utcnow()
usage_row.updated_at = utcnow()
session.add(usage_row)
return usage_row
def fail_latest_usage(session: Session, bot_id: str, detail: str) -> Optional[BotRequestUsage]:
usage_row = find_latest_pending_usage(session, bot_id)
if not usage_row:
return None
usage_row.status = "ERROR"
usage_row.error_text = str(detail or "")[:500]
usage_row.completed_at = utcnow()
usage_row.updated_at = utcnow()
session.add(usage_row)
return usage_row
def list_usage(
session: Session,
bot_id: Optional[str] = None,
limit: int = 100,
offset: int = 0,
) -> Dict[str, Any]:
safe_limit = max(1, min(int(limit), 500))
safe_offset = max(0, int(offset or 0))
stmt = (
select(BotRequestUsage)
.order_by(BotRequestUsage.started_at.desc(), BotRequestUsage.id.desc())
.offset(safe_offset)
.limit(safe_limit)
)
summary_stmt = select(
func.count(BotRequestUsage.id),
func.coalesce(func.sum(BotRequestUsage.input_tokens), 0),
func.coalesce(func.sum(BotRequestUsage.output_tokens), 0),
func.coalesce(func.sum(BotRequestUsage.total_tokens), 0),
)
total_stmt = select(func.count(BotRequestUsage.id))
if bot_id:
stmt = stmt.where(BotRequestUsage.bot_id == bot_id)
summary_stmt = summary_stmt.where(BotRequestUsage.bot_id == bot_id)
total_stmt = total_stmt.where(BotRequestUsage.bot_id == bot_id)
else:
since = utcnow() - timedelta(days=1)
summary_stmt = summary_stmt.where(BotRequestUsage.created_at >= since)
rows = session.exec(stmt).all()
count, input_sum, output_sum, total_sum = session.exec(summary_stmt).one()
total = int(session.exec(total_stmt).one() or 0)
items = [
PlatformUsageItem(
id=int(row.id or 0),
bot_id=row.bot_id,
message_id=int(row.message_id) if row.message_id is not None else None,
request_id=row.request_id,
channel=row.channel,
status=row.status,
provider=row.provider,
model=row.model,
token_source=row.token_source,
content=row.input_text_preview or row.output_text_preview,
input_tokens=int(row.input_tokens or 0),
output_tokens=int(row.output_tokens or 0),
total_tokens=int(row.total_tokens or 0),
input_text_preview=row.input_text_preview,
output_text_preview=row.output_text_preview,
started_at=row.started_at.isoformat() + "Z",
completed_at=row.completed_at.isoformat() + "Z" if row.completed_at else None,
).model_dump()
for row in rows
]
return PlatformUsageResponse(
summary=PlatformUsageSummary(
request_count=int(count or 0),
input_tokens=int(input_sum or 0),
output_tokens=int(output_sum or 0),
total_tokens=int(total_sum or 0),
),
items=[PlatformUsageItem.model_validate(item) for item in items],
total=total,
limit=safe_limit,
offset=safe_offset,
has_more=safe_offset + len(items) < total,
).model_dump()

View File

@ -0,0 +1,86 @@
from typing import Any, Callable, Dict, List, Optional, Tuple
import httpx
from fastapi import HTTPException
class ProviderTestService:
def __init__(self, *, provider_defaults: Optional[Callable[[str], Tuple[str, str]]] = None) -> None:
self._provider_defaults = provider_defaults or self.provider_defaults
@staticmethod
def provider_defaults(provider: str) -> tuple[str, str]:
normalized = provider.lower().strip()
if normalized in {"openrouter"}:
return "openrouter", "https://openrouter.ai/api/v1"
if normalized in {"dashscope", "aliyun", "qwen", "aliyun-qwen"}:
return "dashscope", "https://dashscope.aliyuncs.com/compatible-mode/v1"
if normalized in {"xunfei", "iflytek", "xfyun"}:
return "openai", "https://spark-api-open.xf-yun.com/v1"
if normalized in {"kimi", "moonshot"}:
return "kimi", "https://api.moonshot.cn/v1"
if normalized in {"minimax"}:
return "minimax", "https://api.minimax.chat/v1"
if normalized in {"vllm"}:
return "vllm", ""
return normalized, ""
async def test_provider(self, *, payload: Dict[str, Any]) -> Dict[str, Any]:
provider = str(payload.get("provider") or "").strip()
api_key = str(payload.get("api_key") or "").strip()
model = str(payload.get("model") or "").strip()
api_base = str(payload.get("api_base") or "").strip()
if not provider or not api_key:
raise HTTPException(status_code=400, detail="provider and api_key are required")
normalized_provider, default_base = self._provider_defaults(provider)
base = (api_base or default_base).rstrip("/")
if normalized_provider not in {"openrouter", "dashscope", "kimi", "minimax", "openai", "deepseek", "vllm"}:
raise HTTPException(status_code=400, detail=f"provider not supported for test: {provider}")
if not base:
raise HTTPException(status_code=400, detail=f"api_base is required for provider: {provider}")
headers = {"Authorization": f"Bearer {api_key}"}
timeout = httpx.Timeout(20.0, connect=10.0)
url = f"{base}/models"
try:
async with httpx.AsyncClient(timeout=timeout) as client:
resp = await client.get(url, headers=headers)
if resp.status_code >= 400:
return {
"ok": False,
"provider": normalized_provider,
"status_code": resp.status_code,
"detail": resp.text[:500],
}
data = resp.json()
models_raw = data.get("data", []) if isinstance(data, dict) else []
model_ids: List[str] = []
for item in models_raw[:20]:
if isinstance(item, dict) and item.get("id"):
model_ids.append(str(item["id"]))
model_hint = ""
if model:
model_hint = "model_found" if any(model in value for value in model_ids) else "model_not_listed"
return {
"ok": True,
"provider": normalized_provider,
"endpoint": url,
"models_preview": model_ids[:8],
"model_hint": model_hint,
}
except Exception as exc:
return {
"ok": False,
"provider": normalized_provider,
"endpoint": url,
"detail": str(exc),
}

View File

@ -0,0 +1,289 @@
import asyncio
import json
import os
from datetime import datetime, timedelta, timezone
from typing import Any, Callable, Dict, List, Optional
from fastapi import HTTPException, WebSocket
from sqlmodel import Session
from models.bot import BotInstance, BotMessage
class WSConnectionManager:
def __init__(self) -> None:
self.connections: Dict[str, List[WebSocket]] = {}
async def connect(self, bot_id: str, websocket: WebSocket):
await websocket.accept()
self.connections.setdefault(bot_id, []).append(websocket)
def disconnect(self, bot_id: str, websocket: WebSocket):
conns = self.connections.get(bot_id, [])
if websocket in conns:
conns.remove(websocket)
if not conns and bot_id in self.connections:
del self.connections[bot_id]
async def broadcast(self, bot_id: str, data: Dict[str, Any]):
conns = list(self.connections.get(bot_id, []))
for ws in conns:
try:
await ws.send_json(data)
except Exception:
self.disconnect(bot_id, ws)
class RuntimeEventService:
def __init__(
self,
*,
app: Any,
engine: Any,
cache: Any,
logger: Any,
publish_runtime_topic_packet: Callable[..., None],
bind_usage_message: Callable[..., None],
finalize_usage_from_packet: Callable[..., Any],
workspace_root: Callable[[str], str],
parse_message_media: Callable[[str, Optional[str]], List[str]],
) -> None:
self._app = app
self._engine = engine
self._cache = cache
self._logger = logger
self._publish_runtime_topic_packet = publish_runtime_topic_packet
self._bind_usage_message = bind_usage_message
self._finalize_usage_from_packet = finalize_usage_from_packet
self._workspace_root = workspace_root
self._parse_message_media = parse_message_media
self.manager = WSConnectionManager()
@staticmethod
def cache_key_bots_list(user_id: Optional[int] = None) -> str:
normalized_user_id = int(user_id or 0)
return f"bots:list:user:{normalized_user_id}"
@staticmethod
def cache_key_bot_detail(bot_id: str) -> str:
return f"bot:detail:{bot_id}"
@staticmethod
def cache_key_bot_messages(bot_id: str, limit: int) -> str:
return f"bot:messages:v2:{bot_id}:limit:{limit}"
@staticmethod
def cache_key_bot_messages_page(bot_id: str, limit: int, before_id: Optional[int]) -> str:
cursor = str(int(before_id)) if isinstance(before_id, int) and before_id > 0 else "latest"
return f"bot:messages:page:v2:{bot_id}:before:{cursor}:limit:{limit}"
@staticmethod
def cache_key_images() -> str:
return "images:list"
def invalidate_bot_detail_cache(self, bot_id: str) -> None:
self._cache.delete(self.cache_key_bot_detail(bot_id))
self._cache.delete_prefix("bots:list:user:")
def invalidate_bot_messages_cache(self, bot_id: str) -> None:
self._cache.delete_prefix(f"bot:messages:{bot_id}:")
def invalidate_images_cache(self) -> None:
self._cache.delete(self.cache_key_images())
@staticmethod
def normalize_last_action_text(value: Any) -> str:
text = str(value or "").replace("\r\n", "\n").replace("\r", "\n").strip()
if not text:
return ""
text = __import__("re").sub(r"\n{4,}", "\n\n\n", text)
return text[:16000]
@staticmethod
def normalize_packet_channel(packet: Dict[str, Any]) -> str:
raw = str(packet.get("channel") or packet.get("source") or "").strip().lower()
if raw in {"dashboard", "dashboard_channel", "dashboard-channel"}:
return "dashboard"
return raw
def normalize_media_item(self, bot_id: str, value: Any) -> str:
raw = str(value or "").strip().replace("\\", "/")
if not raw:
return ""
if raw.startswith("/root/.nanobot/workspace/"):
return raw[len("/root/.nanobot/workspace/") :].lstrip("/")
root = self._workspace_root(bot_id)
if os.path.isabs(raw):
try:
if os.path.commonpath([root, raw]) == root:
return os.path.relpath(raw, root).replace("\\", "/")
except Exception:
pass
return raw.lstrip("/")
def normalize_media_list(self, raw: Any, bot_id: str) -> List[str]:
if not isinstance(raw, list):
return []
rows: List[str] = []
for value in raw:
normalized = self.normalize_media_item(bot_id, value)
if normalized:
rows.append(normalized)
return rows
def serialize_bot_message_row(self, bot_id: str, row: BotMessage) -> Dict[str, Any]:
created_at = row.created_at
if created_at.tzinfo is None:
created_at = created_at.replace(tzinfo=timezone.utc)
return {
"id": row.id,
"bot_id": row.bot_id,
"role": row.role,
"text": row.text,
"media": self._parse_message_media(bot_id, getattr(row, "media_json", None)),
"feedback": str(getattr(row, "feedback", "") or "").strip() or None,
"ts": int(created_at.timestamp() * 1000),
}
@staticmethod
def resolve_local_day_range(date_text: str, tz_offset_minutes: Optional[int]) -> tuple[datetime, datetime]:
try:
local_day = datetime.strptime(str(date_text or "").strip(), "%Y-%m-%d")
except ValueError as exc:
raise HTTPException(status_code=400, detail="Invalid date, expected YYYY-MM-DD") from exc
offset_minutes = 0
if tz_offset_minutes is not None:
try:
offset_minutes = int(tz_offset_minutes)
except (TypeError, ValueError) as exc:
raise HTTPException(status_code=400, detail="Invalid timezone offset") from exc
utc_start = local_day + timedelta(minutes=offset_minutes)
utc_end = utc_start + timedelta(days=1)
return utc_start, utc_end
def persist_runtime_packet(self, bot_id: str, packet: Dict[str, Any]) -> Optional[int]:
packet_type = str(packet.get("type", "")).upper()
if packet_type not in {"AGENT_STATE", "ASSISTANT_MESSAGE", "USER_COMMAND", "BUS_EVENT"}:
return None
source_channel = self.normalize_packet_channel(packet)
if source_channel != "dashboard":
return None
persisted_message_id: Optional[int] = None
with Session(self._engine) as session:
bot = session.get(BotInstance, bot_id)
if not bot:
return None
if packet_type == "AGENT_STATE":
payload = packet.get("payload") or {}
state = str(payload.get("state") or "").strip()
action = self.normalize_last_action_text(payload.get("action_msg") or payload.get("msg") or "")
if state:
bot.current_state = state
if action:
bot.last_action = action
elif packet_type == "ASSISTANT_MESSAGE":
bot.current_state = "IDLE"
text_msg = str(packet.get("text") or "").strip()
media_list = self.normalize_media_list(packet.get("media"), bot_id)
if text_msg or media_list:
if text_msg:
bot.last_action = self.normalize_last_action_text(text_msg)
message_row = BotMessage(
bot_id=bot_id,
role="assistant",
text=text_msg,
media_json=json.dumps(media_list, ensure_ascii=False) if media_list else None,
)
session.add(message_row)
session.flush()
persisted_message_id = message_row.id
self._finalize_usage_from_packet(
session,
bot_id,
{
**packet,
"message_id": persisted_message_id,
},
)
elif packet_type == "USER_COMMAND":
text_msg = str(packet.get("text") or "").strip()
media_list = self.normalize_media_list(packet.get("media"), bot_id)
if text_msg or media_list:
message_row = BotMessage(
bot_id=bot_id,
role="user",
text=text_msg,
media_json=json.dumps(media_list, ensure_ascii=False) if media_list else None,
)
session.add(message_row)
session.flush()
persisted_message_id = message_row.id
self._bind_usage_message(
session,
bot_id,
str(packet.get("request_id") or "").strip(),
persisted_message_id,
)
elif packet_type == "BUS_EVENT":
is_progress = bool(packet.get("is_progress"))
detail_text = str(packet.get("content") or packet.get("text") or "").strip()
if not is_progress:
text_msg = detail_text
media_list = self.normalize_media_list(packet.get("media"), bot_id)
if text_msg or media_list:
bot.current_state = "IDLE"
if text_msg:
bot.last_action = self.normalize_last_action_text(text_msg)
message_row = BotMessage(
bot_id=bot_id,
role="assistant",
text=text_msg,
media_json=json.dumps(media_list, ensure_ascii=False) if media_list else None,
)
session.add(message_row)
session.flush()
persisted_message_id = message_row.id
self._finalize_usage_from_packet(
session,
bot_id,
{
"text": text_msg,
"usage": packet.get("usage"),
"request_id": packet.get("request_id"),
"provider": packet.get("provider"),
"model": packet.get("model"),
"message_id": persisted_message_id,
},
)
bot.updated_at = datetime.utcnow()
session.add(bot)
session.commit()
self._publish_runtime_topic_packet(
self._engine,
bot_id,
packet,
source_channel,
persisted_message_id,
self._logger,
)
if persisted_message_id:
packet["message_id"] = persisted_message_id
if packet_type in {"ASSISTANT_MESSAGE", "USER_COMMAND", "BUS_EVENT"}:
self.invalidate_bot_messages_cache(bot_id)
self.invalidate_bot_detail_cache(bot_id)
return persisted_message_id
def broadcast_runtime_packet(self, bot_id: str, packet: Dict[str, Any], loop: Any) -> None:
asyncio.run_coroutine_threadsafe(self.manager.broadcast(bot_id, packet), loop)
def docker_callback(self, bot_id: str, packet: Dict[str, Any]):
self.persist_runtime_packet(bot_id, packet)
loop = getattr(self._app.state, "main_loop", None)
if not loop or not loop.is_running():
return
asyncio.run_coroutine_threadsafe(self.manager.broadcast(bot_id, packet), loop)

View File

@ -0,0 +1,169 @@
from datetime import datetime
from typing import Any, Callable, Dict
from sqlmodel import Session, select
from models.bot import BotInstance, BotMessage
from fastapi import HTTPException
from providers.runtime.base import RuntimeProvider
from services.bot_command_service import BotCommandService
class RuntimeService:
def __init__(
self,
*,
command_service: BotCommandService,
resolve_runtime_provider: Callable[[Any, BotInstance], RuntimeProvider],
clear_bot_sessions: Callable[[str], int],
clear_dashboard_direct_session_file: Callable[[str], Dict[str, Any]],
invalidate_bot_detail_cache: Callable[[str], None],
invalidate_bot_messages_cache: Callable[[str], None],
record_activity_event: Callable[..., None],
) -> None:
self._command_service = command_service
self._resolve_runtime_provider = resolve_runtime_provider
self._clear_bot_sessions = clear_bot_sessions
self._clear_dashboard_direct_session_file = clear_dashboard_direct_session_file
self._invalidate_bot_detail_cache = invalidate_bot_detail_cache
self._invalidate_bot_messages_cache = invalidate_bot_messages_cache
self._record_activity_event = record_activity_event
def _require_bot(self, *, session: Session, bot_id: str) -> BotInstance:
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return bot
async def start_bot(self, *, app_state: Any, session: Session, bot: BotInstance) -> Dict[str, Any]:
result = await self._resolve_runtime_provider(app_state, bot).start_bot(session=session, bot=bot)
self._invalidate_bot_detail_cache(str(bot.id or ""))
return result
def stop_bot(self, *, app_state: Any, session: Session, bot: BotInstance) -> Dict[str, Any]:
result = self._resolve_runtime_provider(app_state, bot).stop_bot(session=session, bot=bot)
self._invalidate_bot_detail_cache(str(bot.id or ""))
return result
async def clear_messages_for_bot(self, *, app_state: Any, session: Session, bot_id: str) -> Dict[str, Any]:
bot = self._require_bot(session=session, bot_id=bot_id)
return self.clear_messages(app_state=app_state, session=session, bot=bot)
def clear_dashboard_direct_session_for_bot(self, *, app_state: Any, session: Session, bot_id: str) -> Dict[str, Any]:
bot = self._require_bot(session=session, bot_id=bot_id)
return self.clear_dashboard_direct_session(app_state=app_state, session=session, bot=bot)
def get_logs_for_bot(self, *, app_state: Any, session: Session, bot_id: str, tail: int = 300) -> Dict[str, Any]:
bot = self._require_bot(session=session, bot_id=bot_id)
return self.get_logs(app_state=app_state, bot=bot, tail=tail)
def send_command_for_bot(
self,
*,
app_state: Any,
session: Session,
bot_id: str,
payload: Any,
) -> Dict[str, Any]:
bot = self._require_bot(session=session, bot_id=bot_id)
return self.send_command(
app_state=app_state,
session=session,
bot_id=bot_id,
bot=bot,
payload=payload,
)
def send_command(
self,
*,
app_state: Any,
session: Session,
bot_id: str,
bot: BotInstance,
payload: Any,
) -> Dict[str, Any]:
return self._command_service.execute(
session=session,
bot_id=bot_id,
bot=bot,
payload=payload,
runtime_provider=self._resolve_runtime_provider(app_state, bot),
app_state=app_state,
)
def get_logs(self, *, app_state: Any, bot: BotInstance, tail: int = 300) -> Dict[str, Any]:
return {
"bot_id": bot.id,
"logs": self._resolve_runtime_provider(app_state, bot).get_recent_logs(bot_id=bot.id, tail=tail),
}
def ensure_monitor(self, *, app_state: Any, bot: BotInstance) -> bool:
return bool(self._resolve_runtime_provider(app_state, bot).ensure_monitor(bot_id=bot.id))
def sync_edge_monitor_packets(self, *, app_state: Any, bot: BotInstance, request_id: str) -> None:
runtime_provider = self._resolve_runtime_provider(app_state, bot)
self._command_service.sync_edge_monitor_packets(
runtime_provider=runtime_provider,
bot_id=str(bot.id or "").strip(),
request_id=str(request_id or "").strip(),
app_state=app_state,
)
def clear_messages(self, *, app_state: Any, session: Session, bot: BotInstance) -> Dict[str, Any]:
bot_id = str(bot.id or "").strip()
rows = session.exec(select(BotMessage).where(BotMessage.bot_id == bot_id)).all()
deleted = 0
for row in rows:
session.delete(row)
deleted += 1
cleared_sessions = self._clear_bot_sessions(bot_id)
self._reset_running_runtime_session(app_state=app_state, bot=bot)
bot.last_action = ""
bot.current_state = "IDLE"
bot.updated_at = datetime.utcnow()
session.add(bot)
self._record_activity_event(
session,
bot_id,
"history_cleared",
channel="system",
detail=f"Cleared {deleted} stored messages",
metadata={"deleted_messages": deleted, "cleared_sessions": cleared_sessions},
)
session.commit()
self._invalidate_bot_detail_cache(bot_id)
self._invalidate_bot_messages_cache(bot_id)
return {"bot_id": bot_id, "deleted": deleted, "cleared_sessions": cleared_sessions}
def clear_dashboard_direct_session(self, *, app_state: Any, session: Session, bot: BotInstance) -> Dict[str, Any]:
bot_id = str(bot.id or "").strip()
result = self._clear_dashboard_direct_session_file(bot_id)
self._reset_running_runtime_session(app_state=app_state, bot=bot)
bot.updated_at = datetime.utcnow()
session.add(bot)
self._record_activity_event(
session,
bot_id,
"dashboard_session_cleared",
channel="dashboard",
detail="Cleared dashboard_direct session file",
metadata={"session_file": result["path"], "previously_existed": result["existed"]},
)
session.commit()
self._invalidate_bot_detail_cache(bot_id)
return {"bot_id": bot_id, "cleared": True, "session_file": result["path"], "previously_existed": result["existed"]}
def _reset_running_runtime_session(self, *, app_state: Any, bot: BotInstance) -> None:
if not self._is_runtime_running(bot):
return
try:
self._resolve_runtime_provider(app_state, bot).deliver_command(bot_id=str(bot.id), command="/new")
except Exception:
pass
@staticmethod
def _is_runtime_running(bot: BotInstance) -> bool:
runtime_status = str(getattr(bot, "runtime_status", None) or getattr(bot, "docker_status", None) or "").upper()
return runtime_status == "RUNNING"

View File

@ -0,0 +1,898 @@
import json
import logging
import os
import re
import shutil
import tempfile
import zipfile
from datetime import datetime
from typing import Any, Callable, Dict, List, Optional
from fastapi import HTTPException, UploadFile
from sqlmodel import Session, select
from clients.edge.errors import log_edge_failure
from core.settings import BOTS_WORKSPACE_ROOT, DATA_ROOT
from models.bot import BotInstance
from models.skill import BotSkillInstall, SkillMarketItem
from services.platform_settings_service import get_platform_settings_snapshot
EdgeStateContextResolver = Callable[[str], Optional[tuple[Any, Optional[str], str]]]
class SkillService:
def _require_bot(self, *, session: Session, bot_id: str) -> BotInstance:
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return bot
def _workspace_root(self, bot_id: str) -> str:
return os.path.abspath(os.path.join(BOTS_WORKSPACE_ROOT, bot_id, ".nanobot", "workspace"))
def _skills_root(self, bot_id: str) -> str:
return os.path.join(self._workspace_root(bot_id), "skills")
def _skill_market_root(self) -> str:
return os.path.abspath(os.path.join(DATA_ROOT, "skills"))
def _is_valid_top_level_skill_name(self, name: str) -> bool:
text = str(name or "").strip()
if not text:
return False
if "/" in text or "\\" in text:
return False
if text in {".", ".."}:
return False
return True
def _read_skill_description(self, entry_path: str) -> str:
candidates: List[str] = []
if os.path.isdir(entry_path):
candidates = [
os.path.join(entry_path, "SKILL.md"),
os.path.join(entry_path, "skill.md"),
os.path.join(entry_path, "README.md"),
os.path.join(entry_path, "readme.md"),
]
elif entry_path.lower().endswith(".md"):
candidates = [entry_path]
for candidate in candidates:
if not os.path.isfile(candidate):
continue
try:
with open(candidate, "r", encoding="utf-8") as file:
for line in file:
text = line.strip()
if text and not text.startswith("#"):
return text[:240]
except Exception:
continue
return ""
def list_workspace_skills(
self,
*,
bot_id: str,
resolve_edge_state_context: EdgeStateContextResolver,
logger: logging.Logger,
) -> List[Dict[str, Any]]:
edge_context = resolve_edge_state_context(bot_id)
if edge_context is not None:
client, workspace_root, node_id = edge_context
try:
payload = client.list_tree(
bot_id=bot_id,
path="skills",
recursive=False,
workspace_root=workspace_root,
)
except Exception as exc:
log_edge_failure(
logger,
key=f"skills-list:{node_id}:{bot_id}",
exc=exc,
message=f"Failed to list skills from edge workspace for bot_id={bot_id}",
)
return []
rows: List[Dict[str, Any]] = []
for entry in list(payload.get("entries") or []):
if not isinstance(entry, dict):
continue
name = str(entry.get("name") or "").strip()
if not name or name.startswith("."):
continue
if not self._is_valid_top_level_skill_name(name):
continue
entry_type = str(entry.get("type") or "").strip().lower()
if entry_type not in {"dir", "file"}:
continue
mtime = str(entry.get("mtime") or "").strip() or (datetime.utcnow().isoformat() + "Z")
size = entry.get("size")
rows.append(
{
"id": name,
"name": name,
"type": entry_type,
"path": f"skills/{name}",
"size": int(size) if isinstance(size, (int, float)) and entry_type == "file" else None,
"mtime": mtime,
"description": "",
}
)
rows.sort(key=lambda row: (row.get("type") != "dir", str(row.get("name") or "").lower()))
return rows
root = self._skills_root(bot_id)
if not os.path.isdir(root):
return []
rows: List[Dict[str, Any]] = []
names = sorted(os.listdir(root), key=lambda name: (not os.path.isdir(os.path.join(root, name)), name.lower()))
for name in names:
if not name or name.startswith("."):
continue
if not self._is_valid_top_level_skill_name(name):
continue
abs_path = os.path.join(root, name)
if not os.path.exists(abs_path):
continue
stat = os.stat(abs_path)
rows.append(
{
"id": name,
"name": name,
"type": "dir" if os.path.isdir(abs_path) else "file",
"path": f"skills/{name}",
"size": stat.st_size if os.path.isfile(abs_path) else None,
"mtime": datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z",
"description": self._read_skill_description(abs_path),
}
)
return rows
def list_workspace_skills_for_bot(
self,
*,
session: Session,
bot_id: str,
resolve_edge_state_context: EdgeStateContextResolver,
logger: logging.Logger,
) -> List[Dict[str, Any]]:
self._require_bot(session=session, bot_id=bot_id)
return self.list_workspace_skills(
bot_id=bot_id,
resolve_edge_state_context=resolve_edge_state_context,
logger=logger,
)
def _parse_json_string_list(self, raw: Any) -> List[str]:
if not raw:
return []
try:
data = json.loads(str(raw))
except Exception:
return []
if not isinstance(data, list):
return []
rows: List[str] = []
for item in data:
text = str(item or "").strip()
if text and text not in rows:
rows.append(text)
return rows
def _is_ignored_skill_zip_top_level(self, name: str) -> bool:
text = str(name or "").strip()
if not text:
return True
lowered = text.lower()
if lowered == "__macosx":
return True
if text.startswith("."):
return True
return False
def _read_description_from_text(self, raw: str) -> str:
for line in str(raw or "").splitlines():
text = line.strip()
if text and not text.startswith("#"):
return text[:240]
return ""
def _extract_skill_zip_summary(self, zip_path: str) -> Dict[str, Any]:
entry_names: List[str] = []
description = ""
with zipfile.ZipFile(zip_path) as archive:
members = archive.infolist()
file_members = [member for member in members if not member.is_dir()]
for member in file_members:
raw_name = str(member.filename or "").replace("\\", "/").lstrip("/")
if not raw_name:
continue
first = raw_name.split("/", 1)[0].strip()
if self._is_ignored_skill_zip_top_level(first):
continue
if self._is_valid_top_level_skill_name(first) and first not in entry_names:
entry_names.append(first)
candidates = sorted(
[
str(member.filename or "").replace("\\", "/").lstrip("/")
for member in file_members
if str(member.filename or "").replace("\\", "/").rsplit("/", 1)[-1].lower() in {"skill.md", "readme.md"}
],
key=lambda value: (value.count("/"), value.lower()),
)
for candidate in candidates:
try:
with archive.open(candidate, "r") as file:
preview = file.read(4096).decode("utf-8", errors="ignore")
description = self._read_description_from_text(preview)
if description:
break
except Exception:
continue
return {
"entry_names": entry_names,
"description": description,
}
def _sanitize_skill_market_key(self, raw: Any) -> str:
value = str(raw or "").strip().lower()
value = re.sub(r"[^a-z0-9._-]+", "-", value)
value = re.sub(r"-{2,}", "-", value).strip("._-")
return value[:120]
def _sanitize_zip_filename(self, raw: Any) -> str:
filename = os.path.basename(str(raw or "").strip())
if not filename:
return ""
filename = filename.replace("\\", "/").rsplit("/", 1)[-1]
stem, ext = os.path.splitext(filename)
safe_stem = re.sub(r"[^A-Za-z0-9._-]+", "-", stem).strip("._-")
if not safe_stem:
safe_stem = "skill-package"
safe_ext = ".zip" if ext.lower() == ".zip" else ""
return f"{safe_stem[:180]}{safe_ext}"
def _resolve_unique_skill_market_key(self, session: Session, preferred_key: str, exclude_id: Optional[int] = None) -> str:
base_key = self._sanitize_skill_market_key(preferred_key) or "skill"
candidate = base_key
counter = 2
while True:
stmt = select(SkillMarketItem).where(SkillMarketItem.skill_key == candidate)
rows = session.exec(stmt).all()
conflict = next((row for row in rows if exclude_id is None or row.id != exclude_id), None)
if not conflict:
return candidate
candidate = f"{base_key}-{counter}"
counter += 1
def _resolve_unique_skill_market_zip_filename(
self,
session: Session,
filename: str,
*,
exclude_filename: Optional[str] = None,
exclude_id: Optional[int] = None,
) -> str:
root = self._skill_market_root()
os.makedirs(root, exist_ok=True)
safe_name = self._sanitize_zip_filename(filename)
if not safe_name.lower().endswith(".zip"):
raise HTTPException(status_code=400, detail="Only .zip skill package is supported")
candidate = safe_name
stem, ext = os.path.splitext(safe_name)
counter = 2
while True:
file_conflict = os.path.exists(os.path.join(root, candidate)) and candidate != str(exclude_filename or "").strip()
rows = session.exec(select(SkillMarketItem).where(SkillMarketItem.zip_filename == candidate)).all()
db_conflict = next((row for row in rows if exclude_id is None or row.id != exclude_id), None)
if not file_conflict and not db_conflict:
return candidate
candidate = f"{stem}-{counter}{ext}"
counter += 1
async def _store_skill_market_zip_upload(
self,
session: Session,
upload: UploadFile,
*,
exclude_filename: Optional[str] = None,
exclude_id: Optional[int] = None,
) -> Dict[str, Any]:
root = self._skill_market_root()
os.makedirs(root, exist_ok=True)
incoming_name = self._sanitize_zip_filename(upload.filename or "")
if not incoming_name.lower().endswith(".zip"):
raise HTTPException(status_code=400, detail="Only .zip skill package is supported")
target_filename = self._resolve_unique_skill_market_zip_filename(
session,
incoming_name,
exclude_filename=exclude_filename,
exclude_id=exclude_id,
)
max_bytes = get_platform_settings_snapshot().upload_max_mb * 1024 * 1024
total_size = 0
tmp_path: Optional[str] = None
try:
with tempfile.NamedTemporaryFile(prefix=".skill_market_", suffix=".zip", dir=root, delete=False) as tmp_zip:
tmp_path = tmp_zip.name
while True:
chunk = await upload.read(1024 * 1024)
if not chunk:
break
total_size += len(chunk)
if total_size > max_bytes:
raise HTTPException(
status_code=413,
detail=f"Zip package too large (max {max_bytes // (1024 * 1024)}MB)",
)
tmp_zip.write(chunk)
if total_size == 0:
raise HTTPException(status_code=400, detail="Zip package is empty")
summary = self._extract_skill_zip_summary(tmp_path)
if not summary["entry_names"]:
raise HTTPException(status_code=400, detail="Zip package has no valid skill entries")
final_path = os.path.join(root, target_filename)
os.replace(tmp_path, final_path)
tmp_path = None
return {
"zip_filename": target_filename,
"zip_size_bytes": total_size,
"entry_names": summary["entry_names"],
"description": summary["description"],
}
except zipfile.BadZipFile as exc:
raise HTTPException(status_code=400, detail="Invalid zip file") from exc
finally:
await upload.close()
if tmp_path and os.path.exists(tmp_path):
os.remove(tmp_path)
def serialize_skill_market_item(
self,
item: SkillMarketItem,
*,
install_count: int = 0,
install_row: Optional[BotSkillInstall] = None,
workspace_installed: Optional[bool] = None,
installed_entries: Optional[List[str]] = None,
) -> Dict[str, Any]:
zip_path = os.path.join(self._skill_market_root(), str(item.zip_filename or ""))
entry_names = self._parse_json_string_list(item.entry_names_json)
payload = {
"id": item.id,
"skill_key": item.skill_key,
"display_name": item.display_name or item.skill_key,
"description": item.description or "",
"zip_filename": item.zip_filename,
"zip_size_bytes": int(item.zip_size_bytes or 0),
"entry_names": entry_names,
"entry_count": len(entry_names),
"zip_exists": os.path.isfile(zip_path),
"install_count": int(install_count or 0),
"created_at": item.created_at.isoformat() + "Z" if item.created_at else None,
"updated_at": item.updated_at.isoformat() + "Z" if item.updated_at else None,
}
if install_row is not None:
resolved_entries = installed_entries if installed_entries is not None else self._parse_json_string_list(install_row.installed_entries_json)
resolved_installed = workspace_installed if workspace_installed is not None else install_row.status == "INSTALLED"
payload.update(
{
"installed": resolved_installed,
"install_status": install_row.status,
"installed_at": install_row.installed_at.isoformat() + "Z" if install_row.installed_at else None,
"installed_entries": resolved_entries,
"install_error": install_row.last_error,
}
)
return payload
def list_market_items(self, *, session: Session) -> List[Dict[str, Any]]:
items = session.exec(select(SkillMarketItem).order_by(SkillMarketItem.display_name, SkillMarketItem.id)).all()
installs = session.exec(select(BotSkillInstall)).all()
install_count_by_skill: Dict[int, int] = {}
for row in installs:
skill_id = int(row.skill_market_item_id or 0)
if skill_id <= 0 or row.status != "INSTALLED":
continue
install_count_by_skill[skill_id] = install_count_by_skill.get(skill_id, 0) + 1
return [
self.serialize_skill_market_item(item, install_count=install_count_by_skill.get(int(item.id or 0), 0))
for item in items
]
async def create_market_item(
self,
*,
session: Session,
skill_key: str,
display_name: str,
description: str,
file: UploadFile,
) -> Dict[str, Any]:
upload_meta = await self._store_skill_market_zip_upload(session, file)
try:
preferred_key = skill_key or display_name or os.path.splitext(upload_meta["zip_filename"])[0]
next_key = self._resolve_unique_skill_market_key(session, preferred_key)
item = SkillMarketItem(
skill_key=next_key,
display_name=str(display_name or next_key).strip() or next_key,
description=str(description or upload_meta["description"] or "").strip(),
zip_filename=upload_meta["zip_filename"],
zip_size_bytes=int(upload_meta["zip_size_bytes"] or 0),
entry_names_json=json.dumps(upload_meta["entry_names"], ensure_ascii=False),
)
session.add(item)
session.commit()
session.refresh(item)
return self.serialize_skill_market_item(item, install_count=0)
except Exception:
target_path = os.path.join(self._skill_market_root(), upload_meta["zip_filename"])
if os.path.exists(target_path):
os.remove(target_path)
raise
async def update_market_item(
self,
*,
session: Session,
skill_id: int,
skill_key: str,
display_name: str,
description: str,
file: Optional[UploadFile],
) -> Dict[str, Any]:
item = session.get(SkillMarketItem, skill_id)
if not item:
raise HTTPException(status_code=404, detail="Skill market item not found")
old_filename = str(item.zip_filename or "").strip()
upload_meta: Optional[Dict[str, Any]] = None
if file is not None:
upload_meta = await self._store_skill_market_zip_upload(
session,
file,
exclude_filename=old_filename or None,
exclude_id=item.id,
)
next_key = self._resolve_unique_skill_market_key(
session,
skill_key or item.skill_key or display_name or os.path.splitext(upload_meta["zip_filename"] if upload_meta else old_filename)[0],
exclude_id=item.id,
)
item.skill_key = next_key
item.display_name = str(display_name or item.display_name or next_key).strip() or next_key
item.description = str(description or (upload_meta["description"] if upload_meta else item.description) or "").strip()
item.updated_at = datetime.utcnow()
if upload_meta:
item.zip_filename = upload_meta["zip_filename"]
item.zip_size_bytes = int(upload_meta["zip_size_bytes"] or 0)
item.entry_names_json = json.dumps(upload_meta["entry_names"], ensure_ascii=False)
session.add(item)
session.commit()
session.refresh(item)
if upload_meta and old_filename and old_filename != upload_meta["zip_filename"]:
old_path = os.path.join(self._skill_market_root(), old_filename)
if os.path.exists(old_path):
os.remove(old_path)
installs = session.exec(select(BotSkillInstall).where(BotSkillInstall.skill_market_item_id == skill_id)).all()
install_count = sum(1 for row in installs if row.status == "INSTALLED")
return self.serialize_skill_market_item(item, install_count=install_count)
def delete_market_item(self, *, session: Session, skill_id: int) -> Dict[str, Any]:
item = session.get(SkillMarketItem, skill_id)
if not item:
raise HTTPException(status_code=404, detail="Skill market item not found")
zip_filename = str(item.zip_filename or "").strip()
installs = session.exec(select(BotSkillInstall).where(BotSkillInstall.skill_market_item_id == skill_id)).all()
for row in installs:
session.delete(row)
session.delete(item)
session.commit()
if zip_filename:
zip_path = os.path.join(self._skill_market_root(), zip_filename)
if os.path.exists(zip_path):
os.remove(zip_path)
return {"status": "deleted", "id": skill_id}
def list_bot_market_items(
self,
*,
bot_id: str,
session: Session,
resolve_edge_state_context: EdgeStateContextResolver,
logger: logging.Logger,
) -> List[Dict[str, Any]]:
items = session.exec(select(SkillMarketItem).order_by(SkillMarketItem.display_name, SkillMarketItem.id)).all()
install_rows = session.exec(select(BotSkillInstall).where(BotSkillInstall.bot_id == bot_id)).all()
install_lookup = {int(row.skill_market_item_id): row for row in install_rows}
all_install_rows = session.exec(select(BotSkillInstall)).all()
install_count_by_skill: Dict[int, int] = {}
for row in all_install_rows:
skill_id = int(row.skill_market_item_id or 0)
if skill_id <= 0 or row.status != "INSTALLED":
continue
install_count_by_skill[skill_id] = install_count_by_skill.get(skill_id, 0) + 1
workspace_skill_names = {
str(row.get("name") or "").strip()
for row in self.list_workspace_skills(bot_id=bot_id, resolve_edge_state_context=resolve_edge_state_context, logger=logger)
}
return [
self.serialize_skill_market_item(
item,
install_count=install_count_by_skill.get(int(item.id or 0), 0),
install_row=install_lookup.get(int(item.id or 0)),
workspace_installed=(
None
if install_lookup.get(int(item.id or 0)) is None
else (
install_lookup[int(item.id or 0)].status == "INSTALLED"
and all(
name in workspace_skill_names
for name in self._parse_json_string_list(install_lookup[int(item.id or 0)].installed_entries_json)
)
)
),
installed_entries=(
None
if install_lookup.get(int(item.id or 0)) is None
else self._parse_json_string_list(install_lookup[int(item.id or 0)].installed_entries_json)
),
)
for item in items
]
def list_bot_market_items_for_bot(
self,
*,
session: Session,
bot_id: str,
resolve_edge_state_context: EdgeStateContextResolver,
logger: logging.Logger,
) -> List[Dict[str, Any]]:
self._require_bot(session=session, bot_id=bot_id)
return self.list_bot_market_items(
bot_id=bot_id,
session=session,
resolve_edge_state_context=resolve_edge_state_context,
logger=logger,
)
def _install_skill_zip_into_workspace(
self,
*,
bot_id: str,
zip_path: str,
resolve_edge_state_context: EdgeStateContextResolver,
logger: logging.Logger,
) -> Dict[str, Any]:
try:
archive = zipfile.ZipFile(zip_path)
except Exception as exc:
raise HTTPException(status_code=400, detail="Invalid zip file") from exc
edge_context = resolve_edge_state_context(bot_id)
skills_root = self._skills_root(bot_id)
installed: List[str] = []
with archive:
members = archive.infolist()
file_members = [member for member in members if not member.is_dir()]
if not file_members:
raise HTTPException(status_code=400, detail="Zip package has no files")
top_names: List[str] = []
for member in file_members:
raw_name = str(member.filename or "").replace("\\", "/").lstrip("/")
if not raw_name:
continue
first = raw_name.split("/", 1)[0].strip()
if self._is_ignored_skill_zip_top_level(first):
continue
if not self._is_valid_top_level_skill_name(first):
raise HTTPException(status_code=400, detail=f"Invalid skill entry name in zip: {first}")
if first not in top_names:
top_names.append(first)
if not top_names:
raise HTTPException(status_code=400, detail="Zip package has no valid skill entries")
if edge_context is not None:
existing_names = {
str(item.get("name") or "").strip()
for item in self.list_workspace_skills(
bot_id=bot_id,
resolve_edge_state_context=resolve_edge_state_context,
logger=logger,
)
if isinstance(item, dict)
}
conflicts = [name for name in top_names if name in existing_names]
else:
os.makedirs(skills_root, exist_ok=True)
conflicts = [name for name in top_names if os.path.exists(os.path.join(skills_root, name))]
if conflicts:
raise HTTPException(status_code=400, detail=f"Skill already exists: {', '.join(conflicts)}")
temp_dir_root = skills_root if edge_context is None else None
with tempfile.TemporaryDirectory(prefix=".skill_upload_", dir=temp_dir_root) as tmp_dir:
tmp_root = os.path.abspath(tmp_dir)
for member in members:
raw_name = str(member.filename or "").replace("\\", "/").lstrip("/")
if not raw_name:
continue
target = os.path.abspath(os.path.join(tmp_root, raw_name))
if os.path.commonpath([tmp_root, target]) != tmp_root:
raise HTTPException(status_code=400, detail=f"Unsafe zip entry path: {raw_name}")
if member.is_dir():
os.makedirs(target, exist_ok=True)
continue
os.makedirs(os.path.dirname(target), exist_ok=True)
with archive.open(member, "r") as source, open(target, "wb") as dest:
shutil.copyfileobj(source, dest)
if edge_context is not None:
client, workspace_root, _node_id = edge_context
upload_groups: Dict[str, List[str]] = {}
for name in top_names:
src = os.path.join(tmp_root, name)
if not os.path.exists(src):
continue
if os.path.isfile(src):
upload_groups.setdefault("skills", []).append(src)
installed.append(name)
continue
for walk_root, _dirs, files in os.walk(src):
for filename in files:
local_path = os.path.join(walk_root, filename)
relative_path = os.path.relpath(local_path, tmp_root).replace("\\", "/")
relative_dir = os.path.dirname(relative_path).strip("/")
target_dir = f"skills/{relative_dir}" if relative_dir else "skills"
upload_groups.setdefault(target_dir, []).append(local_path)
installed.append(name)
for target_dir, local_paths in upload_groups.items():
client.upload_local_files(
bot_id=bot_id,
local_paths=local_paths,
path=target_dir,
workspace_root=workspace_root,
)
else:
for name in top_names:
src = os.path.join(tmp_root, name)
dst = os.path.join(skills_root, name)
if not os.path.exists(src):
continue
shutil.move(src, dst)
installed.append(name)
if not installed:
raise HTTPException(status_code=400, detail="No skill entries installed from zip")
return {
"installed": installed,
"skills": self.list_workspace_skills(
bot_id=bot_id,
resolve_edge_state_context=resolve_edge_state_context,
logger=logger,
),
}
def install_market_item_for_bot(
self,
*,
bot_id: str,
skill_id: int,
session: Session,
resolve_edge_state_context: EdgeStateContextResolver,
logger: logging.Logger,
) -> Dict[str, Any]:
item = session.get(SkillMarketItem, skill_id)
if not item:
raise HTTPException(status_code=404, detail="Skill market item not found")
zip_path = os.path.join(self._skill_market_root(), str(item.zip_filename or ""))
if not os.path.isfile(zip_path):
raise HTTPException(status_code=404, detail="Skill zip package not found")
install_row = session.exec(
select(BotSkillInstall).where(
BotSkillInstall.bot_id == bot_id,
BotSkillInstall.skill_market_item_id == skill_id,
)
).first()
try:
install_result = self._install_skill_zip_into_workspace(
bot_id=bot_id,
zip_path=zip_path,
resolve_edge_state_context=resolve_edge_state_context,
logger=logger,
)
now = datetime.utcnow()
if not install_row:
install_row = BotSkillInstall(
bot_id=bot_id,
skill_market_item_id=skill_id,
)
install_row.installed_entries_json = json.dumps(install_result["installed"], ensure_ascii=False)
install_row.source_zip_filename = str(item.zip_filename or "")
install_row.status = "INSTALLED"
install_row.last_error = None
install_row.installed_at = now
install_row.updated_at = now
session.add(install_row)
session.commit()
session.refresh(install_row)
return {
"status": "installed",
"bot_id": bot_id,
"skill_market_item_id": skill_id,
"installed": install_result["installed"],
"skills": install_result["skills"],
"market_item": self.serialize_skill_market_item(item, install_count=0, install_row=install_row),
}
except HTTPException as exc:
now = datetime.utcnow()
if not install_row:
install_row = BotSkillInstall(
bot_id=bot_id,
skill_market_item_id=skill_id,
installed_at=now,
)
install_row.source_zip_filename = str(item.zip_filename or "")
install_row.status = "FAILED"
install_row.last_error = str(exc.detail or "Install failed")
install_row.updated_at = now
session.add(install_row)
session.commit()
raise
except Exception as exc:
now = datetime.utcnow()
if not install_row:
install_row = BotSkillInstall(
bot_id=bot_id,
skill_market_item_id=skill_id,
installed_at=now,
)
install_row.source_zip_filename = str(item.zip_filename or "")
install_row.status = "FAILED"
install_row.last_error = str(exc or "Install failed")[:1000]
install_row.updated_at = now
session.add(install_row)
session.commit()
raise HTTPException(status_code=500, detail="Skill install failed unexpectedly") from exc
def install_market_item_for_bot_checked(
self,
*,
session: Session,
bot_id: str,
skill_id: int,
resolve_edge_state_context: EdgeStateContextResolver,
logger: logging.Logger,
) -> Dict[str, Any]:
self._require_bot(session=session, bot_id=bot_id)
return self.install_market_item_for_bot(
bot_id=bot_id,
skill_id=skill_id,
session=session,
resolve_edge_state_context=resolve_edge_state_context,
logger=logger,
)
async def upload_bot_skill_zip(
self,
*,
bot_id: str,
file: UploadFile,
resolve_edge_state_context: EdgeStateContextResolver,
logger: logging.Logger,
) -> Dict[str, Any]:
tmp_zip_path: Optional[str] = None
try:
with tempfile.NamedTemporaryFile(prefix=".skill_upload_", suffix=".zip", delete=False) as tmp_zip:
tmp_zip_path = tmp_zip.name
filename = str(file.filename or "").strip()
if not filename.lower().endswith(".zip"):
raise HTTPException(status_code=400, detail="Only .zip skill package is supported")
max_bytes = get_platform_settings_snapshot().upload_max_mb * 1024 * 1024
total_size = 0
while True:
chunk = await file.read(1024 * 1024)
if not chunk:
break
total_size += len(chunk)
if total_size > max_bytes:
raise HTTPException(
status_code=413,
detail=f"Zip package too large (max {max_bytes // (1024 * 1024)}MB)",
)
tmp_zip.write(chunk)
if total_size == 0:
raise HTTPException(status_code=400, detail="Zip package is empty")
finally:
await file.close()
try:
install_result = self._install_skill_zip_into_workspace(
bot_id=bot_id,
zip_path=tmp_zip_path,
resolve_edge_state_context=resolve_edge_state_context,
logger=logger,
)
finally:
if tmp_zip_path and os.path.exists(tmp_zip_path):
os.remove(tmp_zip_path)
return {
"status": "installed",
"bot_id": bot_id,
"installed": install_result["installed"],
"skills": install_result["skills"],
}
async def upload_bot_skill_zip_for_bot(
self,
*,
session: Session,
bot_id: str,
file: UploadFile,
resolve_edge_state_context: EdgeStateContextResolver,
logger: logging.Logger,
) -> Dict[str, Any]:
self._require_bot(session=session, bot_id=bot_id)
return await self.upload_bot_skill_zip(
bot_id=bot_id,
file=file,
resolve_edge_state_context=resolve_edge_state_context,
logger=logger,
)
def delete_workspace_skill(
self,
*,
bot_id: str,
skill_name: str,
resolve_edge_state_context: EdgeStateContextResolver,
) -> Dict[str, Any]:
if resolve_edge_state_context(bot_id) is not None:
raise HTTPException(
status_code=400,
detail="Edge bot skill delete is disabled here. Use edge workspace file management.",
)
name = str(skill_name or "").strip()
if not self._is_valid_top_level_skill_name(name):
raise HTTPException(status_code=400, detail="Invalid skill name")
root = self._skills_root(bot_id)
target = os.path.abspath(os.path.join(root, name))
if os.path.commonpath([os.path.abspath(root), target]) != os.path.abspath(root):
raise HTTPException(status_code=400, detail="Invalid skill path")
if not os.path.exists(target):
raise HTTPException(status_code=404, detail="Skill not found in workspace")
if os.path.isdir(target):
shutil.rmtree(target, ignore_errors=False)
else:
os.remove(target)
return {"status": "deleted", "bot_id": bot_id, "skill": name}
def delete_workspace_skill_for_bot(
self,
*,
session: Session,
bot_id: str,
skill_name: str,
resolve_edge_state_context: EdgeStateContextResolver,
) -> Dict[str, Any]:
self._require_bot(session=session, bot_id=bot_id)
return self.delete_workspace_skill(
bot_id=bot_id,
skill_name=skill_name,
resolve_edge_state_context=resolve_edge_state_context,
)

View File

@ -0,0 +1,126 @@
import asyncio
import os
import tempfile
from typing import Any, Callable, Dict, Optional
from fastapi import HTTPException, UploadFile
from sqlmodel import Session
from core.speech_service import SpeechDisabledError, SpeechDurationError, SpeechServiceError
from models.bot import BotInstance
class SpeechTranscriptionService:
def __init__(
self,
*,
data_root: str,
speech_service: Any,
get_speech_runtime_settings: Callable[[], Dict[str, Any]],
logger: Any,
) -> None:
self._data_root = data_root
self._speech_service = speech_service
self._get_speech_runtime_settings = get_speech_runtime_settings
self._logger = logger
def _require_bot(self, *, session: Session, bot_id: str) -> BotInstance:
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return bot
async def transcribe(
self,
*,
session: Session,
bot_id: str,
file: UploadFile,
language: Optional[str] = None,
) -> Dict[str, Any]:
self._require_bot(session=session, bot_id=bot_id)
speech_settings = self._get_speech_runtime_settings()
if not speech_settings["enabled"]:
raise HTTPException(status_code=400, detail="Speech recognition is disabled")
if not file:
raise HTTPException(status_code=400, detail="no audio file uploaded")
original_name = str(file.filename or "audio.webm").strip() or "audio.webm"
safe_name = os.path.basename(original_name).replace("\\", "_").replace("/", "_")
ext = os.path.splitext(safe_name)[1].strip().lower() or ".webm"
if len(ext) > 12:
ext = ".webm"
tmp_path = ""
try:
with tempfile.NamedTemporaryFile(delete=False, suffix=ext, prefix=".speech_", dir=self._data_root) as tmp:
tmp_path = tmp.name
while True:
chunk = await file.read(1024 * 1024)
if not chunk:
break
tmp.write(chunk)
if not tmp_path or not os.path.exists(tmp_path) or os.path.getsize(tmp_path) <= 0:
raise HTTPException(status_code=400, detail="audio payload is empty")
resolved_language = str(language or "").strip() or speech_settings["default_language"]
result = await asyncio.to_thread(self._speech_service.transcribe_file, tmp_path, resolved_language)
text = str(result.get("text") or "").strip()
if not text:
raise HTTPException(status_code=400, detail="No speech detected")
return {
"bot_id": bot_id,
"text": text,
"duration_seconds": result.get("duration_seconds"),
"max_audio_seconds": speech_settings["max_audio_seconds"],
"model": speech_settings["model"],
"device": speech_settings["device"],
"language": result.get("language") or resolved_language,
}
except SpeechDisabledError as exc:
self._logger.warning(
"speech transcribe disabled bot_id=%s file=%s language=%s detail=%s",
bot_id,
safe_name,
language,
exc,
)
raise HTTPException(status_code=400, detail=str(exc))
except SpeechDurationError:
self._logger.warning(
"speech transcribe too long bot_id=%s file=%s language=%s max_seconds=%s",
bot_id,
safe_name,
language,
speech_settings["max_audio_seconds"],
)
raise HTTPException(status_code=413, detail=f"Audio duration exceeds {speech_settings['max_audio_seconds']} seconds")
except SpeechServiceError as exc:
self._logger.exception(
"speech transcribe failed bot_id=%s file=%s language=%s",
bot_id,
safe_name,
language,
)
raise HTTPException(status_code=400, detail=str(exc))
except HTTPException:
raise
except Exception as exc:
self._logger.exception(
"speech transcribe unexpected error bot_id=%s file=%s language=%s",
bot_id,
safe_name,
language,
)
raise HTTPException(status_code=500, detail=f"speech transcription failed: {exc}")
finally:
try:
await file.close()
except Exception:
pass
if tmp_path and os.path.exists(tmp_path):
try:
os.remove(tmp_path)
except Exception:
pass

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,155 @@
import json
import os
from typing import Any, Callable, Dict
from fastapi import HTTPException
from sqlmodel import Session, select
from models.bot import BotInstance
class SystemService:
def __init__(
self,
*,
engine: Any,
cache: Any,
database_engine: str,
redis_enabled: bool,
redis_url: str,
redis_prefix: str,
agent_md_templates_file: str,
topic_presets_templates_file: str,
default_soul_md: str,
default_agents_md: str,
default_user_md: str,
default_tools_md: str,
default_identity_md: str,
topic_preset_templates: Any,
get_default_system_timezone: Callable[[], str],
load_agent_md_templates: Callable[[], Dict[str, Any]],
load_topic_presets_template: Callable[[], Dict[str, Any]],
get_platform_settings_snapshot: Callable[[], Any],
get_speech_runtime_settings: Callable[[], Dict[str, Any]],
) -> None:
self._engine = engine
self._cache = cache
self._database_engine = database_engine
self._redis_enabled = redis_enabled
self._redis_url = redis_url
self._redis_prefix = redis_prefix
self._agent_md_templates_file = agent_md_templates_file
self._topic_presets_templates_file = topic_presets_templates_file
self._default_soul_md = default_soul_md
self._default_agents_md = default_agents_md
self._default_user_md = default_user_md
self._default_tools_md = default_tools_md
self._default_identity_md = default_identity_md
self._topic_preset_templates = topic_preset_templates
self._get_default_system_timezone = get_default_system_timezone
self._load_agent_md_templates = load_agent_md_templates
self._load_topic_presets_template = load_topic_presets_template
self._get_platform_settings_snapshot = get_platform_settings_snapshot
self._get_speech_runtime_settings = get_speech_runtime_settings
@staticmethod
def _write_json_atomic(path: str, payload: Dict[str, Any]) -> None:
os.makedirs(os.path.dirname(path), exist_ok=True)
tmp = f"{path}.tmp"
with open(tmp, "w", encoding="utf-8") as file:
json.dump(payload, file, ensure_ascii=False, indent=2)
os.replace(tmp, path)
def get_system_defaults(self) -> Dict[str, Any]:
md_templates = self._load_agent_md_templates()
topic_presets = self._load_topic_presets_template()
platform_settings = self._get_platform_settings_snapshot()
speech_settings = self._get_speech_runtime_settings()
return {
"templates": {
"soul_md": md_templates.get("soul_md") or self._default_soul_md,
"agents_md": md_templates.get("agents_md") or self._default_agents_md,
"user_md": md_templates.get("user_md") or self._default_user_md,
"tools_md": md_templates.get("tools_md") or self._default_tools_md,
"identity_md": md_templates.get("identity_md") or self._default_identity_md,
},
"limits": {
"upload_max_mb": platform_settings.upload_max_mb,
},
"workspace": {
"download_extensions": list(platform_settings.workspace_download_extensions),
"allowed_attachment_extensions": list(platform_settings.allowed_attachment_extensions),
},
"bot": {
"system_timezone": self._get_default_system_timezone(),
},
"loading_page": platform_settings.loading_page.model_dump(),
"chat": {
"pull_page_size": platform_settings.chat_pull_page_size,
"page_size": platform_settings.page_size,
"command_auto_unlock_seconds": platform_settings.command_auto_unlock_seconds,
},
"topic_presets": topic_presets.get("presets") or self._topic_preset_templates,
"speech": {
"enabled": speech_settings["enabled"],
"model": speech_settings["model"],
"device": speech_settings["device"],
"max_audio_seconds": speech_settings["max_audio_seconds"],
"default_language": speech_settings["default_language"],
},
}
def get_system_templates(self) -> Dict[str, Any]:
return {
"agent_md_templates": self._load_agent_md_templates(),
"topic_presets": self._load_topic_presets_template(),
}
def update_system_templates(self, *, payload: Any) -> Dict[str, Any]:
if payload.agent_md_templates is not None:
sanitized_agent: Dict[str, str] = {}
for key in ("agents_md", "soul_md", "user_md", "tools_md", "identity_md"):
sanitized_agent[key] = str(payload.agent_md_templates.get(key, "") or "").replace("\r\n", "\n")
self._write_json_atomic(str(self._agent_md_templates_file), sanitized_agent)
if payload.topic_presets is not None:
presets = payload.topic_presets.get("presets") if isinstance(payload.topic_presets, dict) else None
if presets is None:
normalized_topic: Dict[str, Any] = {"presets": []}
elif isinstance(presets, list):
normalized_topic = {"presets": [dict(row) for row in presets if isinstance(row, dict)]}
else:
raise HTTPException(status_code=400, detail="topic_presets.presets must be an array")
self._write_json_atomic(str(self._topic_presets_templates_file), normalized_topic)
return {
"status": "ok",
"agent_md_templates": self._load_agent_md_templates(),
"topic_presets": self._load_topic_presets_template(),
}
def get_health(self) -> Dict[str, Any]:
try:
with Session(self._engine) as session:
session.exec(select(BotInstance).limit(1)).first()
return {"status": "ok", "database": self._database_engine}
except Exception as exc:
raise HTTPException(status_code=503, detail=f"database check failed: {exc}")
def get_cache_health(self) -> Dict[str, Any]:
redis_url = str(self._redis_url or "").strip()
configured = bool(self._redis_enabled and redis_url)
client_enabled = bool(getattr(self._cache, "enabled", False))
reachable = bool(self._cache.ping()) if client_enabled else False
status = "ok"
if configured and not reachable:
status = "degraded"
return {
"status": status,
"cache": {
"configured": configured,
"enabled": client_enabled,
"reachable": reachable,
"prefix": self._redis_prefix,
},
}

View File

@ -0,0 +1,146 @@
from typing import Any, Dict, List, Optional
from fastapi import HTTPException, Request, UploadFile
from sqlmodel import Session
from models.bot import BotInstance
from providers.selector import get_workspace_provider
class WorkspaceService:
def _require_bot(self, *, session: Session, bot_id: str) -> BotInstance:
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return bot
def list_tree(
self,
*,
app_state: Any,
bot: BotInstance,
path: Optional[str] = None,
recursive: bool = False,
) -> Dict[str, Any]:
return get_workspace_provider(app_state, bot).list_tree(bot_id=bot.id, path=path, recursive=recursive)
def read_file(
self,
*,
app_state: Any,
bot: BotInstance,
path: str,
max_bytes: int = 200000,
) -> Dict[str, Any]:
return get_workspace_provider(app_state, bot).read_file(bot_id=bot.id, path=path, max_bytes=max_bytes)
def write_markdown(
self,
*,
app_state: Any,
bot: BotInstance,
path: str,
content: str,
) -> Dict[str, Any]:
return get_workspace_provider(app_state, bot).write_markdown(bot_id=bot.id, path=path, content=content)
async def upload_files(
self,
*,
app_state: Any,
bot: BotInstance,
files: List[UploadFile],
path: Optional[str] = None,
) -> Dict[str, Any]:
return await get_workspace_provider(app_state, bot).upload_files(bot_id=bot.id, files=files, path=path)
def serve_file(
self,
*,
app_state: Any,
bot: BotInstance,
path: str,
download: bool,
request: Request,
public: bool = False,
redirect_html_to_raw: bool = False,
):
return get_workspace_provider(app_state, bot).serve_file(
bot_id=bot.id,
path=path,
download=download,
request=request,
public=public,
redirect_html_to_raw=redirect_html_to_raw,
)
def list_tree_for_bot(
self,
*,
app_state: Any,
session: Session,
bot_id: str,
path: Optional[str] = None,
recursive: bool = False,
) -> Dict[str, Any]:
bot = self._require_bot(session=session, bot_id=bot_id)
return self.list_tree(app_state=app_state, bot=bot, path=path, recursive=recursive)
def read_file_for_bot(
self,
*,
app_state: Any,
session: Session,
bot_id: str,
path: str,
max_bytes: int = 200000,
) -> Dict[str, Any]:
bot = self._require_bot(session=session, bot_id=bot_id)
return self.read_file(app_state=app_state, bot=bot, path=path, max_bytes=max_bytes)
def write_markdown_for_bot(
self,
*,
app_state: Any,
session: Session,
bot_id: str,
path: str,
content: str,
) -> Dict[str, Any]:
bot = self._require_bot(session=session, bot_id=bot_id)
return self.write_markdown(app_state=app_state, bot=bot, path=path, content=content)
def serve_file_for_bot(
self,
*,
app_state: Any,
session: Session,
bot_id: str,
path: str,
download: bool,
request: Request,
public: bool = False,
redirect_html_to_raw: bool = False,
):
bot = self._require_bot(session=session, bot_id=bot_id)
return self.serve_file(
app_state=app_state,
bot=bot,
path=path,
download=download,
request=request,
public=public,
redirect_html_to_raw=redirect_html_to_raw,
)
async def upload_files_for_bot(
self,
*,
app_state: Any,
session: Session,
bot_id: str,
files: List[UploadFile],
path: Optional[str] = None,
) -> Dict[str, Any]:
bot = self._require_bot(session=session, bot_id=bot_id)
return await self.upload_files(app_state=app_state, bot=bot, files=files, path=path)

View File

@ -0,0 +1,12 @@
FROM python:3.12-slim
WORKDIR /app
COPY requirements.txt /app/requirements.txt
RUN pip install --no-cache-dir -r /app/requirements.txt
COPY . /app
EXPOSE 8010
CMD ["python", "main.py"]

View File

@ -0,0 +1,59 @@
# dashboard-edge
`dashboard-edge` is the execution-plane service for Dashboard Nanobot.
It is designed to run on every managed node and bridge Dashboard control requests to local Docker or native Bot runtimes.
## Local development
```bash
cd dashboard-edge
python -m venv .venv
source .venv/bin/activate
pip install -r requirements.txt
python main.py
```
If your configured pip mirror is unavailable, install with the official index:
```bash
pip install -r requirements.txt -i https://pypi.org/simple
```
Default server:
- Host: `0.0.0.0`
- Port: `8010`
- Dev reload: enabled by default in `./scripts/dev-edge.sh`
Native local development:
```bash
cd /Users/jiliu/WorkSpace/dashboard-nanobot
bash scripts/dev-edge-native.sh
```
By default, the native launcher will auto-detect:
- `engines/nanobot-v0.1.4-post5/.venv/bin/python -m nanobot.cli.commands gateway`
when that virtualenv exists. You can still override it with `EDGE_NATIVE_COMMAND`.
Environment variables:
- `EDGE_HOST`
- `EDGE_PORT`
- `EDGE_RELOAD`
- `EDGE_AUTH_TOKEN`
- `EDGE_NODE_ID`
- `EDGE_NODE_NAME`
- `EDGE_BOTS_WORKSPACE_ROOT`
- `EDGE_BASE_IMAGE`
- `EDGE_RUNTIME_KIND`
- `EDGE_NATIVE_COMMAND`
- `EDGE_NATIVE_DASHBOARD_URL`
- `EDGE_NATIVE_DASHBOARD_HOST`
- `EDGE_NATIVE_DASHBOARD_PORT`
- `EDGE_NATIVE_WORKDIR`
- `EDGE_UPLOAD_MAX_MB`
- `EDGE_ALLOWED_ATTACHMENT_EXTENSIONS`

View File

@ -0,0 +1 @@
# dashboard-edge application package.

View File

@ -0,0 +1 @@
# API package for dashboard-edge.

View File

@ -0,0 +1,250 @@
from typing import List, Optional
from fastapi import APIRouter, Depends, File, HTTPException, Query, Request, UploadFile
from app.dependencies.auth import require_edge_auth
from app.schemas.edge import (
EdgeCommandRequest,
EdgeLogsResponse,
EdgeNativePreflightRequest,
EdgeNativePreflightResponse,
EdgeNodeHeartbeatResponse,
EdgeMonitorPacketsResponse,
EdgeMarkdownWriteRequest,
EdgeMonitorEnsureResponse,
EdgeNodeResourcesResponse,
EdgeNodeSelfResponse,
EdgeStateResponse,
EdgeStateWriteRequest,
EdgeStatusResponse,
EdgeWorkspaceSyncRequest,
)
from app.schemas.runtime import EdgeStartBotRequest
from app.services import provision_service as provision_service_module
from app.services import state_store_service as state_store_service_module
from app.services.runtime_service import edge_runtime_service
from app.services import workspace_service as workspace_service_module
router = APIRouter(dependencies=[Depends(require_edge_auth)])
@router.get("/api/edge/node/self", response_model=EdgeNodeSelfResponse)
def get_edge_node_self():
return edge_runtime_service.get_node_identity()
@router.get("/api/edge/node/resources", response_model=EdgeNodeResourcesResponse)
def get_edge_node_resources():
return edge_runtime_service.get_node_resource_summary()
@router.post("/api/edge/node/heartbeat", response_model=EdgeNodeHeartbeatResponse)
def heartbeat_edge_node():
return edge_runtime_service.heartbeat()
@router.post("/api/edge/runtime/native/preflight", response_model=EdgeNativePreflightResponse)
def native_preflight(payload: EdgeNativePreflightRequest):
return edge_runtime_service.native_preflight(
native_command=str(payload.native_command or "").strip() or None,
native_workdir=str(payload.native_workdir or "").strip() or None,
)
@router.post("/api/edge/bots/{bot_id}/start", response_model=EdgeStatusResponse)
async def start_bot(bot_id: str, payload: EdgeStartBotRequest):
return await edge_runtime_service.start_bot(bot_id=bot_id, payload=payload)
@router.post("/api/edge/bots/{bot_id}/stop", response_model=EdgeStatusResponse)
def stop_bot(bot_id: str):
return edge_runtime_service.stop_bot(bot_id=bot_id)
@router.post("/api/edge/bots/{bot_id}/command", response_model=EdgeStatusResponse)
def send_command(bot_id: str, payload: EdgeCommandRequest):
return edge_runtime_service.send_command(bot_id=bot_id, payload=payload)
@router.post("/api/edge/bots/{bot_id}/monitor/ensure", response_model=EdgeMonitorEnsureResponse)
def ensure_monitor(bot_id: str):
return edge_runtime_service.ensure_monitor(bot_id=bot_id)
@router.get("/api/edge/bots/{bot_id}/monitor/packets", response_model=EdgeMonitorPacketsResponse)
def get_monitor_packets(bot_id: str, after_seq: int = 0, limit: int = 200):
return edge_runtime_service.get_monitor_packets(bot_id=bot_id, after_seq=after_seq, limit=limit)
@router.get("/api/edge/bots/{bot_id}/logs", response_model=EdgeLogsResponse)
def get_logs(bot_id: str, tail: int = Query(300, ge=1, le=2000)):
return edge_runtime_service.get_recent_logs(bot_id=bot_id, tail=tail)
@router.get("/api/edge/bots/{bot_id}/runtime/status", response_model=EdgeStatusResponse)
def get_runtime_status(bot_id: str):
return edge_runtime_service.get_runtime_status(bot_id=bot_id)
@router.get("/api/edge/bots/{bot_id}/resources")
def get_resource_snapshot(bot_id: str):
return edge_runtime_service.get_resource_snapshot(bot_id=bot_id)
@router.post("/api/edge/bots/{bot_id}/workspace/sync", response_model=EdgeStatusResponse)
def sync_workspace(bot_id: str, payload: EdgeWorkspaceSyncRequest):
return provision_service_module.edge_provision_service.sync_bot_workspace(bot_id=bot_id, payload=payload)
@router.get("/api/edge/bots/{bot_id}/state/{state_key}", response_model=EdgeStateResponse)
def read_bot_state(bot_id: str, state_key: str, workspace_root: str | None = None):
return state_store_service_module.edge_state_store_service.read_state(
bot_id=bot_id,
state_key=state_key,
workspace_root=workspace_root,
)
@router.put("/api/edge/bots/{bot_id}/state/{state_key}", response_model=EdgeStateResponse)
def write_bot_state(bot_id: str, state_key: str, payload: EdgeStateWriteRequest):
return state_store_service_module.edge_state_store_service.write_state(
bot_id=bot_id,
state_key=state_key,
data=dict(payload.data or {}),
workspace_root=str(payload.workspace_root or "").strip() or None,
)
@router.get("/api/edge/bots/{bot_id}/workspace/tree")
def list_workspace_tree(
bot_id: str,
path: str | None = None,
recursive: bool = False,
workspace_root: str | None = None,
):
return workspace_service_module.edge_workspace_service.list_tree(
bot_id=bot_id,
path=path,
recursive=recursive,
workspace_root=workspace_root,
)
@router.get("/api/edge/bots/{bot_id}/workspace/file")
def read_workspace_file(
bot_id: str,
path: str = Query(...),
max_bytes: int = Query(200000, ge=4096, le=1000000),
workspace_root: str | None = None,
):
return workspace_service_module.edge_workspace_service.read_file(
bot_id=bot_id,
path=path,
max_bytes=max_bytes,
workspace_root=workspace_root,
)
@router.put("/api/edge/bots/{bot_id}/workspace/file/markdown")
def write_workspace_markdown(
bot_id: str,
path: str = Query(...),
payload: EdgeMarkdownWriteRequest = None,
workspace_root: str | None = None,
):
if payload is None:
raise HTTPException(status_code=400, detail="markdown payload is required")
return workspace_service_module.edge_workspace_service.write_markdown(
bot_id=bot_id,
path=path,
content=payload.content,
workspace_root=workspace_root,
)
@router.put("/api/edge/bots/{bot_id}/workspace/file/text")
def write_workspace_text(
bot_id: str,
path: str = Query(...),
payload: EdgeMarkdownWriteRequest = None,
workspace_root: str | None = None,
):
if payload is None:
raise HTTPException(status_code=400, detail="text payload is required")
return workspace_service_module.edge_workspace_service.write_text_file(
bot_id=bot_id,
path=path,
content=payload.content,
workspace_root=workspace_root,
)
@router.post("/api/edge/bots/{bot_id}/workspace/upload")
async def upload_workspace_files(
bot_id: str,
files: List[UploadFile] = File(...),
path: Optional[str] = None,
workspace_root: str | None = None,
):
return await workspace_service_module.edge_workspace_service.upload_files(
bot_id=bot_id,
files=files,
path=path,
workspace_root=workspace_root,
)
@router.delete("/api/edge/bots/{bot_id}/workspace/file")
def delete_workspace_path(
bot_id: str,
path: str = Query(...),
workspace_root: str | None = None,
):
return workspace_service_module.edge_workspace_service.delete_path(
bot_id=bot_id,
path=path,
workspace_root=workspace_root,
)
@router.get("/api/edge/bots/{bot_id}/workspace/download")
def download_workspace_file(
bot_id: str,
path: str = Query(...),
download: bool = False,
request: Request = None,
workspace_root: str | None = None,
):
return workspace_service_module.edge_workspace_service.serve_file(
bot_id=bot_id,
path=path,
download=download,
request=request,
workspace_root=workspace_root,
)
@router.get("/api/edge/bots/{bot_id}/workspace/raw/{path:path}")
def raw_workspace_file(
bot_id: str,
path: str,
download: bool = False,
request: Request = None,
workspace_root: str | None = None,
):
return workspace_service_module.edge_workspace_service.serve_file(
bot_id=bot_id,
path=path,
download=download,
request=request,
workspace_root=workspace_root,
)
@router.post("/api/edge/bots/{bot_id}/workspace/purge", response_model=EdgeStatusResponse)
def purge_workspace(bot_id: str, workspace_root: str | None = None):
result = workspace_service_module.edge_workspace_service.purge_bot_workspace(
bot_id=bot_id,
workspace_root=workspace_root,
)
return EdgeStatusResponse(status="deleted" if bool(result.get("deleted")) else "not_found")

View File

@ -0,0 +1 @@
# Core package for dashboard-edge.

View File

@ -0,0 +1,86 @@
import os
import re
from pathlib import Path
from dotenv import load_dotenv
load_dotenv()
EDGE_ROOT = Path(__file__).resolve().parents[2]
PROJECT_ROOT = EDGE_ROOT.parent
EDGE_HOST = str(os.getenv("EDGE_HOST", "0.0.0.0") or "0.0.0.0").strip() or "0.0.0.0"
try:
EDGE_PORT = int(os.getenv("EDGE_PORT", "8010"))
except Exception:
EDGE_PORT = 8010
EDGE_PORT = max(1, min(EDGE_PORT, 65535))
EDGE_RELOAD = str(os.getenv("EDGE_RELOAD", "true")).strip().lower() in {"1", "true", "yes", "on"}
EDGE_AUTH_TOKEN = str(os.getenv("EDGE_AUTH_TOKEN", "") or "").strip()
EDGE_NODE_ID = str(os.getenv("EDGE_NODE_ID", "local") or "local").strip().lower() or "local"
EDGE_NODE_NAME = str(os.getenv("EDGE_NODE_NAME", "Local Node") or "Local Node").strip() or "Local Node"
EDGE_BASE_IMAGE = str(os.getenv("EDGE_BASE_IMAGE", "nanobot-base:v0.1.4") or "nanobot-base:v0.1.4").strip()
EDGE_LOG_LEVEL = str(os.getenv("EDGE_LOG_LEVEL", "warning") or "warning").strip().lower() or "warning"
EDGE_ACCESS_LOG = str(os.getenv("EDGE_ACCESS_LOG", "false")).strip().lower() in {"1", "true", "yes", "on"}
def _default_native_command() -> str:
configured = str(os.getenv("EDGE_NATIVE_COMMAND", "") or "").strip()
if configured:
return configured
native_python = PROJECT_ROOT / "engines" / "nanobot-v0.1.4-post5" / ".venv" / "bin" / "python"
if native_python.is_file() and os.access(native_python, os.X_OK):
return f"{native_python} -m nanobot.cli.commands gateway"
return "nanobot gateway"
EDGE_NATIVE_COMMAND = _default_native_command()
EDGE_NATIVE_DASHBOARD_URL = str(
os.getenv("EDGE_NATIVE_DASHBOARD_URL", "http://127.0.0.1:9000/chat") or "http://127.0.0.1:9000/chat"
).strip() or "http://127.0.0.1:9000/chat"
EDGE_NATIVE_WORKDIR = str(os.getenv("EDGE_NATIVE_WORKDIR", "") or "").strip()
EDGE_BOTS_WORKSPACE_ROOT = str(
Path(os.getenv("EDGE_BOTS_WORKSPACE_ROOT", str(PROJECT_ROOT / "workspace" / "bots"))).expanduser().resolve()
)
def _env_int(name: str, default: int, min_value: int, max_value: int) -> int:
raw = os.getenv(name)
if raw is None:
return default
try:
value = int(str(raw).strip())
except Exception:
value = default
return max(min_value, min(max_value, value))
def _normalize_extension(raw: str) -> str:
text = str(raw or "").strip().lower()
if not text:
return ""
if text.startswith("*."):
text = text[1:]
if not text.startswith("."):
text = f".{text}"
if not re.fullmatch(r"\.[a-z0-9][a-z0-9._+-]{0,31}", text):
return ""
return text
def _env_extensions(name: str) -> tuple[str, ...]:
raw = os.getenv(name)
if raw is None:
return ()
rows: list[str] = []
for item in re.split(r"[,;\s]+", str(raw)):
ext = _normalize_extension(item)
if ext and ext not in rows:
rows.append(ext)
return tuple(rows)
EDGE_UPLOAD_MAX_MB = _env_int("EDGE_UPLOAD_MAX_MB", 100, 1, 2048)
EDGE_ALLOWED_ATTACHMENT_EXTENSIONS = _env_extensions("EDGE_ALLOWED_ATTACHMENT_EXTENSIONS")

View File

@ -0,0 +1 @@
# Dependency package for dashboard-edge.

View File

@ -0,0 +1,14 @@
from fastapi import Header, HTTPException
from app.core.settings import EDGE_AUTH_TOKEN
EDGE_AUTH_HEADER = "x-dashboard-edge-token"
def require_edge_auth(x_dashboard_edge_token: str | None = Header(default=None)) -> None:
configured = str(EDGE_AUTH_TOKEN or "").strip()
if not configured:
return
supplied = str(x_dashboard_edge_token or "").strip()
if supplied != configured:
raise HTTPException(status_code=401, detail="Invalid dashboard-edge token")

View File

@ -0,0 +1,30 @@
from fastapi import FastAPI
from app.api.router import router as edge_router
from app.core.settings import EDGE_BOTS_WORKSPACE_ROOT, EDGE_NODE_ID, EDGE_NODE_NAME
from app.services.provision_service import EdgeProvisionService
from app.services.runtime_service import edge_runtime_service
from app.services.state_store_service import EdgeStateStoreService
from app.services.workspace_service import EdgeWorkspaceService
app = FastAPI(title="Dashboard Edge API")
app.include_router(edge_router)
app.state.edge_runtime_service = edge_runtime_service
from app.services import provision_service as provision_service_module
from app.services import state_store_service as state_store_service_module
from app.services import workspace_service as workspace_service_module
provision_service_module.edge_provision_service = EdgeProvisionService(host_data_root=EDGE_BOTS_WORKSPACE_ROOT)
state_store_service_module.edge_state_store_service = EdgeStateStoreService(host_data_root=EDGE_BOTS_WORKSPACE_ROOT)
workspace_service_module.edge_workspace_service = EdgeWorkspaceService(host_data_root=EDGE_BOTS_WORKSPACE_ROOT)
@app.get("/api/edge/health")
def healthcheck():
return {
"status": "ok",
"service": "dashboard-edge",
"node_id": EDGE_NODE_ID,
"node_name": EDGE_NODE_NAME,
}

View File

@ -0,0 +1 @@
# Runtime package for dashboard-edge.

View File

@ -0,0 +1,62 @@
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, List, Optional
class EdgeRuntimeBackend(ABC):
runtime_kind: str = "docker"
@abstractmethod
def capabilities(self) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def has_image(self, tag: str) -> bool:
raise NotImplementedError
@abstractmethod
def start_bot(
self,
bot_id: str,
image_tag: Optional[str] = None,
env_vars: Optional[Dict[str, str]] = None,
workspace_root: Optional[str] = None,
native_command: Optional[str] = None,
native_workdir: Optional[str] = None,
cpu_cores: Optional[float] = None,
memory_mb: Optional[int] = None,
storage_gb: Optional[int] = None,
on_state_change: Optional[Callable[[str, dict], None]] = None,
) -> bool:
raise NotImplementedError
@abstractmethod
def ensure_monitor(self, bot_id: str, on_state_change: Callable[[str, dict], None]) -> bool:
raise NotImplementedError
@abstractmethod
def stop_bot(self, bot_id: str) -> bool:
raise NotImplementedError
@abstractmethod
def get_bot_status(self, bot_id: str) -> str:
raise NotImplementedError
@abstractmethod
def get_bot_resource_snapshot(self, bot_id: str) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def get_recent_logs(self, bot_id: str, tail: int = 300) -> List[str]:
raise NotImplementedError
@abstractmethod
def send_command(self, bot_id: str, command: str, media: Optional[List[str]] = None) -> bool:
raise NotImplementedError
@abstractmethod
def get_last_delivery_error(self, bot_id: str) -> str:
raise NotImplementedError
@abstractmethod
def parse_monitor_packet(self, line: str) -> Optional[Dict[str, Any]]:
raise NotImplementedError

View File

@ -0,0 +1,716 @@
import base64
import codecs
import json
import os
import re
import threading
import time
from typing import Any, Callable, Dict, List, Optional, Tuple
import docker
import httpx
from app.runtime.base import EdgeRuntimeBackend
class EdgeDockerManager(EdgeRuntimeBackend):
runtime_kind = "docker"
def __init__(self, host_data_root: str, base_image: str = "nanobot-base:v0.1.4") -> None:
try:
self.client = docker.from_env(timeout=6)
self.client.version()
print("✅ Edge Docker engine connected")
except Exception as exc:
self.client = None
print(f"⚠️ Edge Docker engine unavailable: {exc}")
self.host_data_root = host_data_root
self.base_image = base_image
self.active_monitors: Dict[str, threading.Thread] = {}
self._last_delivery_error: Dict[str, str] = {}
def capabilities(self) -> Dict[str, Any]:
return {
"protocol": {"version": "1"},
"runtime": {"docker": bool(self.client is not None), "native": False},
"workspace": {
"tree": True,
"read_file": True,
"write_markdown": True,
"upload_files": True,
"serve_file": True,
},
"monitor": {"logs": True, "ensure": True},
}
@staticmethod
def _normalize_resource_limits(
cpu_cores: Optional[float],
memory_mb: Optional[int],
storage_gb: Optional[int],
) -> Tuple[float, int, int]:
try:
cpu = float(cpu_cores) if cpu_cores is not None else 1.0
except Exception:
cpu = 1.0
try:
memory = int(memory_mb) if memory_mb is not None else 1024
except Exception:
memory = 1024
try:
storage = int(storage_gb) if storage_gb is not None else 10
except Exception:
storage = 10
if cpu < 0:
cpu = 1.0
if memory < 0:
memory = 1024
if storage < 0:
storage = 10
cpu = 0.0 if cpu == 0 else min(16.0, max(0.1, cpu))
memory = 0 if memory == 0 else min(65536, max(256, memory))
storage = 0 if storage == 0 else min(1024, max(1, storage))
return cpu, memory, storage
def has_image(self, tag: str) -> bool:
if not self.client:
return False
try:
self.client.images.get(tag)
return True
except Exception:
return False
def start_bot(
self,
bot_id: str,
image_tag: Optional[str] = None,
env_vars: Optional[Dict[str, str]] = None,
workspace_root: Optional[str] = None,
native_command: Optional[str] = None,
native_workdir: Optional[str] = None,
cpu_cores: Optional[float] = None,
memory_mb: Optional[int] = None,
storage_gb: Optional[int] = None,
on_state_change: Optional[Callable[[str, dict], None]] = None,
) -> bool:
if not self.client:
return False
image = image_tag or self.base_image
if not self.has_image(image):
return False
state_nanobot_dir = self._state_nanobot_dir(bot_id=bot_id, workspace_root=workspace_root)
workspace_dir = self._workspace_dir(bot_id=bot_id, workspace_root=workspace_root)
default_workspace_dir = os.path.join(state_nanobot_dir, "workspace")
container_name = f"worker_{bot_id}"
os.makedirs(state_nanobot_dir, exist_ok=True)
os.makedirs(workspace_dir, exist_ok=True)
cpu, memory, storage = self._normalize_resource_limits(cpu_cores, memory_mb, storage_gb)
volumes = {
state_nanobot_dir: {"bind": "/root/.nanobot", "mode": "rw"},
}
if os.path.abspath(workspace_dir) != os.path.abspath(default_workspace_dir):
volumes[workspace_dir] = {"bind": "/root/.nanobot/workspace", "mode": "rw"}
base_kwargs = {
"image": image,
"name": container_name,
"detach": True,
"stdin_open": True,
"tty": True,
"environment": env_vars or {},
"volumes": volumes,
"network_mode": "bridge",
}
if memory > 0:
base_kwargs["mem_limit"] = f"{memory}m"
if cpu > 0:
base_kwargs["nano_cpus"] = int(cpu * 1_000_000_000)
try:
try:
container = self.client.containers.get(container_name)
container.reload()
if container.status == "running":
if on_state_change:
self.ensure_monitor(bot_id, on_state_change)
return True
container.remove(force=True)
except docker.errors.NotFound:
pass
if storage > 0:
try:
container = self.client.containers.run(
storage_opt={"size": f"{storage}G"},
**base_kwargs,
)
except Exception:
container = self.client.containers.run(**base_kwargs)
else:
container = self.client.containers.run(**base_kwargs)
if on_state_change:
monitor_thread = threading.Thread(
target=self._monitor_container_logs,
args=(bot_id, container, on_state_change),
daemon=True,
)
monitor_thread.start()
self.active_monitors[bot_id] = monitor_thread
return True
except Exception:
return False
def ensure_monitor(self, bot_id: str, on_state_change: Callable[[str, dict], None]) -> bool:
if not self.client:
return False
existing = self.active_monitors.get(bot_id)
if existing and existing.is_alive():
return True
try:
container = self.client.containers.get(f"worker_{bot_id}")
container.reload()
if container.status != "running":
return False
monitor_thread = threading.Thread(
target=self._monitor_container_logs,
args=(bot_id, container, on_state_change),
daemon=True,
)
monitor_thread.start()
self.active_monitors[bot_id] = monitor_thread
return True
except Exception:
return False
def stop_bot(self, bot_id: str) -> bool:
if not self.client:
return False
try:
container = self.client.containers.get(f"worker_{bot_id}")
container.stop(timeout=5)
container.remove()
self.active_monitors.pop(bot_id, None)
return True
except docker.errors.NotFound:
self.active_monitors.pop(bot_id, None)
return False
except Exception:
return False
def get_bot_status(self, bot_id: str) -> str:
if not self.client:
return "STOPPED"
try:
container = self.client.containers.get(f"worker_{bot_id}")
container.reload()
raw = str(container.status or "").strip().lower()
if raw in {"running", "restarting"}:
return "RUNNING"
return "STOPPED"
except Exception:
return "STOPPED"
@staticmethod
def _parse_size_to_bytes(raw: Any) -> Optional[int]:
if raw is None:
return None
text = str(raw).strip()
if not text:
return None
try:
return int(float(text))
except Exception:
pass
match = re.fullmatch(r"([0-9]+(?:\.[0-9]+)?)\s*([kmgtp]?)(i?b)?", text.lower())
if not match:
return None
number = float(match.group(1))
unit = (match.group(2) or "").lower()
scale = {
"": 1,
"k": 1024,
"m": 1024 ** 2,
"g": 1024 ** 3,
"t": 1024 ** 4,
"p": 1024 ** 5,
}.get(unit, 1)
return int(number * scale)
@staticmethod
def _calc_cpu_percent(stats: Dict[str, Any]) -> float:
try:
cpu_stats = stats.get("cpu_stats") or {}
precpu_stats = stats.get("precpu_stats") or {}
cpu_total = float((cpu_stats.get("cpu_usage") or {}).get("total_usage") or 0)
prev_cpu_total = float((precpu_stats.get("cpu_usage") or {}).get("total_usage") or 0)
cpu_delta = cpu_total - prev_cpu_total
system_total = float(cpu_stats.get("system_cpu_usage") or 0)
prev_system_total = float(precpu_stats.get("system_cpu_usage") or 0)
system_delta = system_total - prev_system_total
online_cpus = int(
cpu_stats.get("online_cpus")
or len((cpu_stats.get("cpu_usage") or {}).get("percpu_usage") or [])
or 1
)
if cpu_delta <= 0 or system_delta <= 0:
return 0.0
return max(0.0, (cpu_delta / system_delta) * online_cpus * 100.0)
except Exception:
return 0.0
def get_bot_resource_snapshot(self, bot_id: str) -> Dict[str, Any]:
snapshot: Dict[str, Any] = {
"docker_status": "STOPPED",
"limits": {
"cpu_cores": None,
"memory_bytes": None,
"storage_bytes": None,
"nano_cpus": 0,
"storage_opt_raw": "",
},
"usage": {
"cpu_percent": 0.0,
"memory_bytes": 0,
"memory_limit_bytes": 0,
"memory_percent": 0.0,
"network_rx_bytes": 0,
"network_tx_bytes": 0,
"blk_read_bytes": 0,
"blk_write_bytes": 0,
"pids": 0,
"container_rw_bytes": 0,
},
}
if not self.client:
return snapshot
try:
container = self.client.containers.get(f"worker_{bot_id}")
container.reload()
status_raw = str(container.status or "").strip().lower()
snapshot["docker_status"] = "RUNNING" if status_raw in {"running", "restarting"} else "STOPPED"
inspect: Dict[str, Any]
try:
inspect = self.client.api.inspect_container(container.id, size=True)
except TypeError:
inspect = self.client.api.inspect_container(container.id)
except Exception as e:
if "unexpected keyword argument 'size'" in str(e):
inspect = self.client.api.inspect_container(container.id)
else:
raise
host_cfg = inspect.get("HostConfig") or {}
nano_cpus = int(host_cfg.get("NanoCpus") or 0)
cpu_quota = int(host_cfg.get("CpuQuota") or 0)
cpu_period = int(host_cfg.get("CpuPeriod") or 0)
memory_bytes = int(host_cfg.get("Memory") or 0)
storage_opt = host_cfg.get("StorageOpt") or {}
storage_raw = storage_opt.get("size")
storage_bytes = self._parse_size_to_bytes(storage_raw)
if nano_cpus > 0:
cpu_cores = nano_cpus / 1_000_000_000
elif cpu_quota > 0 and cpu_period > 0:
cpu_cores = cpu_quota / cpu_period
else:
cpu_cores = None
snapshot["limits"] = {
"cpu_cores": cpu_cores,
"memory_bytes": memory_bytes if memory_bytes > 0 else None,
"storage_bytes": storage_bytes,
"nano_cpus": nano_cpus,
"storage_opt_raw": str(storage_raw or ""),
}
snapshot["usage"]["container_rw_bytes"] = int(inspect.get("SizeRw") or 0)
if snapshot["docker_status"] == "RUNNING":
stats = container.stats(stream=False) or {}
memory_stats = stats.get("memory_stats") or {}
memory_usage = int(memory_stats.get("usage") or 0)
memory_limit = int(memory_stats.get("limit") or 0)
if memory_usage > 0:
cache = int((memory_stats.get("stats") or {}).get("inactive_file") or 0)
memory_usage = max(0, memory_usage - cache)
networks = stats.get("networks") or {}
rx_total = 0
tx_total = 0
for _, row in networks.items():
if isinstance(row, dict):
rx_total += int(row.get("rx_bytes") or 0)
tx_total += int(row.get("tx_bytes") or 0)
blk_stats = stats.get("blkio_stats") or {}
io_rows = blk_stats.get("io_service_bytes_recursive") or []
blk_read = 0
blk_write = 0
for row in io_rows:
if not isinstance(row, dict):
continue
op = str(row.get("op") or "").upper()
value = int(row.get("value") or 0)
if op == "READ":
blk_read += value
elif op == "WRITE":
blk_write += value
pids_current = int((stats.get("pids_stats") or {}).get("current") or 0)
cpu_percent = self._calc_cpu_percent(stats)
memory_percent = 0.0
if memory_limit > 0:
memory_percent = (memory_usage / memory_limit) * 100.0
if snapshot["usage"]["container_rw_bytes"] <= 0:
storage_stats = stats.get("storage_stats") or {}
rw_size = int(storage_stats.get("size_rw") or storage_stats.get("rw_size") or 0)
snapshot["usage"]["container_rw_bytes"] = max(0, rw_size)
snapshot["usage"].update(
{
"cpu_percent": cpu_percent,
"memory_bytes": memory_usage,
"memory_limit_bytes": memory_limit,
"memory_percent": max(0.0, memory_percent),
"network_rx_bytes": rx_total,
"network_tx_bytes": tx_total,
"blk_read_bytes": blk_read,
"blk_write_bytes": blk_write,
"pids": pids_current,
}
)
except docker.errors.NotFound:
return snapshot
except Exception:
return snapshot
return snapshot
def send_command(self, bot_id: str, command: str, media: Optional[List[str]] = None) -> bool:
if not self.client:
self._last_delivery_error[bot_id] = "Docker client is not available"
return False
media_paths = [str(v).strip().replace("\\", "/") for v in (media or []) if str(v).strip()]
self._last_delivery_error.pop(bot_id, None)
for attempt in range(3):
if self._send_command_via_exec(bot_id, command, media_paths):
self._last_delivery_error.pop(bot_id, None)
return True
time.sleep(0.25 * (attempt + 1))
if self._send_command_via_host_http(bot_id, command, media_paths):
self._last_delivery_error.pop(bot_id, None)
return True
if bot_id not in self._last_delivery_error:
self._last_delivery_error[bot_id] = "Unknown delivery failure"
return False
def get_last_delivery_error(self, bot_id: str) -> str:
return str(self._last_delivery_error.get(bot_id, "") or "").strip()
def get_recent_logs(self, bot_id: str, tail: int = 300) -> List[str]:
if not self.client:
return []
try:
container = self.client.containers.get(f"worker_{bot_id}")
raw = container.logs(tail=max(1, int(tail)))
text = raw.decode("utf-8", errors="ignore")
return [line for line in text.splitlines() if line.strip()]
except Exception:
return []
def parse_monitor_packet(self, line: str) -> Optional[Dict[str, Any]]:
return self._parse_log_line(str(line or "").strip())
def _workspace_dir(self, *, bot_id: str, workspace_root: Optional[str]) -> str:
return os.path.abspath(os.path.join(self._state_nanobot_dir(bot_id=bot_id, workspace_root=workspace_root), "workspace"))
def _state_nanobot_dir(self, *, bot_id: str, workspace_root: Optional[str]) -> str:
configured_root = str(workspace_root or "").strip()
if not configured_root:
return os.path.abspath(os.path.join(self.host_data_root, bot_id, ".nanobot"))
normalized_root = os.path.abspath(os.path.expanduser(configured_root))
return os.path.abspath(os.path.join(normalized_root, bot_id, ".nanobot"))
def _monitor_container_logs(self, bot_id: str, container: Any, callback: Callable[[str, dict], None]) -> None:
try:
buffer = ""
decoder = codecs.getincrementaldecoder("utf-8")("replace")
since_ts = int(time.time())
for chunk in container.logs(stream=True, follow=True, since=since_ts):
text = decoder.decode(chunk) if isinstance(chunk, bytes) else str(chunk)
if not text:
continue
buffer += text
while "\n" in buffer:
line, buffer = buffer.split("\n", 1)
normalized = line.strip("\r").strip()
if normalized:
state_packet = self._parse_log_line(normalized)
if state_packet:
callback(bot_id, state_packet)
callback(bot_id, {"type": "RAW_LOG", "text": normalized})
rest = decoder.decode(b"", final=True)
if rest:
buffer += rest
tail = buffer.strip()
if tail:
state_packet = self._parse_log_line(tail)
if state_packet:
callback(bot_id, state_packet)
callback(bot_id, {"type": "RAW_LOG", "text": tail})
except Exception:
return
def _parse_monitor_packet_json(self, line: str) -> Optional[Dict[str, Any]]:
if "__DASHBOARD_DATA_START__" not in line or "__DASHBOARD_DATA_END__" not in line:
return None
try:
raw_json = line.split("__DASHBOARD_DATA_START__", 1)[1].split("__DASHBOARD_DATA_END__", 1)[0].strip()
data = json.loads(raw_json)
event_type = str(data.get("type", "")).upper()
content = str(data.get("content") or data.get("text") or "").strip()
media = [str(v).strip().replace("\\", "/") for v in (data.get("media") or []) if str(v).strip()]
is_progress = bool(data.get("is_progress", False))
is_tool = bool(data.get("is_tool", False))
usage = data.get("usage") if isinstance(data.get("usage"), dict) else None
request_id = str(data.get("request_id") or "").strip() or None
provider = str(data.get("provider") or "").strip() or None
model = str(data.get("model") or "").strip() or None
if event_type == "AGENT_STATE":
payload = data.get("payload") or {}
state = str(payload.get("state") or data.get("state") or ("TOOL_CALL" if is_tool else "THINKING"))
action_msg = str(payload.get("action_msg") or payload.get("msg") or content)
return {
"type": "AGENT_STATE",
"channel": "dashboard",
"payload": {"state": state, "action_msg": action_msg},
"request_id": request_id,
}
if event_type == "ASSISTANT_MESSAGE":
if content or media:
return {
"type": "ASSISTANT_MESSAGE",
"channel": "dashboard",
"text": content,
"media": media,
"usage": usage,
"request_id": request_id,
"provider": provider,
"model": model,
}
return None
if event_type == "BUS_EVENT" or is_progress:
return {
"type": "BUS_EVENT",
"channel": "dashboard",
"content": content,
"media": media,
"is_progress": is_progress,
"is_tool": is_tool,
"usage": usage,
"request_id": request_id,
"provider": provider,
"model": model,
}
if content or media:
return {
"type": "ASSISTANT_MESSAGE",
"channel": "dashboard",
"text": content,
"media": media,
"usage": usage,
"request_id": request_id,
"provider": provider,
"model": model,
}
except Exception:
return None
return None
def _parse_log_line(self, line: str) -> Optional[Dict[str, Any]]:
if "__DASHBOARD_DATA_START__" in line:
packet = self._parse_monitor_packet_json(line)
if packet:
return packet
process_match = re.search(r"Processing message from ([\w\-]+):[^:]+:\s*(.+)$", line)
if process_match:
channel = process_match.group(1).strip().lower()
action_msg = process_match.group(2).strip()
return {
"type": "AGENT_STATE",
"channel": channel,
"payload": {
"state": "THINKING",
"action_msg": action_msg[:4000],
},
}
response_match = re.search(r"Response to ([\w\-]+):[^:]+:\s*(.+)$", line)
if response_match:
channel = response_match.group(1).strip().lower()
action_msg = response_match.group(2).strip()
if channel == "dashboard":
return {
"type": "ASSISTANT_MESSAGE",
"channel": "dashboard",
"text": action_msg[:4000],
}
return {
"type": "AGENT_STATE",
"channel": channel,
"payload": {
"state": "SUCCESS",
"action_msg": action_msg[:4000],
},
}
lower = line.lower()
tool_call_match = re.search(r"tool call:\s*(.+)$", line, re.IGNORECASE)
if tool_call_match:
return {
"type": "AGENT_STATE",
"payload": {
"state": "TOOL_CALL",
"action_msg": tool_call_match.group(1).strip()[:4000],
},
}
if "error" in lower or "traceback" in lower:
return {
"type": "AGENT_STATE",
"payload": {"state": "ERROR", "action_msg": "执行异常,请检查日志"},
}
return None
def _send_command_via_exec(self, bot_id: str, command: str, media: Optional[List[str]] = None) -> bool:
try:
container = self.client.containers.get(f"worker_{bot_id}")
container.reload()
if container.status != "running":
self._last_delivery_error[bot_id] = f"Container status is {container.status}"
return False
dashboard_port = self._resolve_dashboard_port(container=container, bot_id=bot_id)
dashboard_url = f"http://127.0.0.1:{dashboard_port}/chat"
payload_json = json.dumps({"message": command, "media": media or []}, ensure_ascii=False)
result = container.exec_run(
[
"curl",
"-sS",
"--fail",
"--max-time",
"6",
"-X",
"POST",
"-H",
"Content-Type: application/json",
"-d",
payload_json,
dashboard_url,
]
)
output = result.output.decode("utf-8", errors="ignore") if isinstance(result.output, (bytes, bytearray)) else str(result.output)
if result.exit_code != 0:
payload_b64 = base64.b64encode(payload_json.encode("utf-8")).decode("ascii")
py_script = (
"import base64,json,os,urllib.request\n"
"payload=json.loads(base64.b64decode(os.environ['DASHBOARD_PAYLOAD_B64']).decode('utf-8'))\n"
"req=urllib.request.Request(os.environ.get('DASHBOARD_CHAT_URL', 'http://127.0.0.1:9000/chat'),"
"data=json.dumps(payload,ensure_ascii=False).encode('utf-8'),"
"headers={'Content-Type':'application/json'})\n"
"with urllib.request.urlopen(req, timeout=8) as resp:\n"
" print(resp.read().decode('utf-8','ignore'))\n"
)
for py_bin in ["python3", "python"]:
py_result = container.exec_run(
[py_bin, "-c", py_script],
environment={
"DASHBOARD_PAYLOAD_B64": payload_b64,
"DASHBOARD_CHAT_URL": dashboard_url,
},
)
py_output = py_result.output.decode("utf-8", errors="ignore") if isinstance(py_result.output, (bytes, bytearray)) else str(py_result.output)
if py_result.exit_code == 0:
return True
self._last_delivery_error[bot_id] = f"exec fallback failed: {py_output[:300]}"
self._last_delivery_error[bot_id] = f"exec curl failed: {output[:300]}"
return False
return True
except Exception as exc:
self._last_delivery_error[bot_id] = f"exec curl exception: {exc}"
return False
def _send_command_via_host_http(self, bot_id: str, command: str, media: Optional[List[str]] = None) -> bool:
try:
container = self.client.containers.get(f"worker_{bot_id}")
container.reload()
ip_address = self._resolve_container_ip(container)
if not ip_address:
self._last_delivery_error[bot_id] = "host HTTP failed: container has no reachable IP address"
return False
dashboard_port = self._resolve_dashboard_port(container=container, bot_id=bot_id)
target_url = f"http://{ip_address}:{dashboard_port}/chat"
with httpx.Client(timeout=4.0) as client:
resp = client.post(target_url, json={"message": command, "media": media or []})
if resp.status_code == 200:
return True
self._last_delivery_error[bot_id] = f"host HTTP failed: {resp.status_code} - {resp.text[:300]}"
return False
except Exception as exc:
self._last_delivery_error[bot_id] = f"host HTTP exception: {exc}"
return False
def _resolve_dashboard_port(self, *, container: Any, bot_id: str) -> int:
# Dashboard channel port may be per-bot dynamic; read from mounted config.json when available.
default_port = 9000
config_path = self._resolve_mounted_config_path(container=container, bot_id=bot_id)
if not config_path or not os.path.isfile(config_path):
return default_port
try:
with open(config_path, "r", encoding="utf-8") as fh:
payload = json.load(fh)
if not isinstance(payload, dict):
return default_port
channels = payload.get("channels")
if not isinstance(channels, dict):
return default_port
dashboard = channels.get("dashboard")
if not isinstance(dashboard, dict):
return default_port
raw_port = int(dashboard.get("port") or default_port)
if 1 <= raw_port <= 65535:
return raw_port
except Exception:
return default_port
return default_port
def _resolve_mounted_config_path(self, *, container: Any, bot_id: str) -> str:
mounts = list((container.attrs or {}).get("Mounts") or [])
for row in mounts:
if not isinstance(row, dict):
continue
destination = str(row.get("Destination") or "").strip()
if destination != "/root/.nanobot":
continue
source = str(row.get("Source") or "").strip()
if source:
return os.path.join(source, "config.json")
return os.path.join(self.host_data_root, bot_id, ".nanobot", "config.json")
@staticmethod
def _resolve_container_ip(container: Any) -> str:
attrs = dict(getattr(container, "attrs", {}) or {})
network = dict(attrs.get("NetworkSettings") or {})
primary = str(network.get("IPAddress") or "").strip()
if primary:
return primary
networks = dict(network.get("Networks") or {})
for _, row in networks.items():
if not isinstance(row, dict):
continue
ip = str(row.get("IPAddress") or "").strip()
if ip:
return ip
return ""

View File

@ -0,0 +1,31 @@
import os
from typing import Dict
from app.core.settings import EDGE_BOTS_WORKSPACE_ROOT, EDGE_BASE_IMAGE
from app.runtime.docker_manager import EdgeDockerManager
from app.runtime.native_manager import EdgeNativeRuntimeBackend
def edge_runtime_mode() -> str:
runtime_kind = str(os.getenv("EDGE_RUNTIME_KIND", "all") or "all").strip().lower()
if runtime_kind in {"docker", "native"}:
return runtime_kind
return "all"
def build_edge_runtime_backends() -> Dict[str, object]:
mode = edge_runtime_mode()
backends: Dict[str, object] = {}
if mode in {"all", "docker"}:
backends["docker"] = EdgeDockerManager(host_data_root=EDGE_BOTS_WORKSPACE_ROOT, base_image=EDGE_BASE_IMAGE)
if mode in {"all", "native"}:
backends["native"] = EdgeNativeRuntimeBackend()
return backends
def preferred_edge_runtime_kind(backends: Dict[str, object]) -> str:
if "docker" in backends:
return "docker"
if "native" in backends:
return "native"
return "docker"

View File

@ -0,0 +1,782 @@
import codecs
import csv
import hashlib
import json
import signal
import socket
import os
import re
import shlex
import shutil
import subprocess
import threading
import time
from dataclasses import dataclass, field
from datetime import datetime, timezone
from typing import Any, Callable, Dict, List, Optional
import httpx
import psutil
from app.core.settings import (
EDGE_BOTS_WORKSPACE_ROOT,
EDGE_NATIVE_COMMAND,
EDGE_NATIVE_DASHBOARD_URL,
EDGE_NATIVE_WORKDIR,
)
from app.runtime.base import EdgeRuntimeBackend
@dataclass
class _NativeProcessRecord:
process: subprocess.Popen[str]
command: List[str]
cwd: str
log_path: str
log_handle: Any
dashboard_url: str
dashboard_host: str
dashboard_port: int
cpu_cores: Optional[float]
memory_mb: Optional[int]
storage_gb: Optional[int]
stop_event: threading.Event = field(default_factory=threading.Event)
stdout_thread: Optional[threading.Thread] = None
last_error: str = ""
class EdgeNativeRuntimeBackend(EdgeRuntimeBackend):
runtime_kind = "native"
def __init__(self) -> None:
self._command = shlex.split(EDGE_NATIVE_COMMAND)
self._native_available = bool(self._command and shutil.which(self._command[0]))
self._last_errors: Dict[str, str] = {}
self._records: Dict[str, _NativeProcessRecord] = {}
self._lock = threading.RLock()
def capabilities(self) -> Dict[str, Any]:
available = bool(self._native_available)
return {
"protocol": {"version": "1"},
"runtime": {"docker": False, "native": available},
"workspace": {
"tree": True,
"read_file": True,
"write_markdown": True,
"upload_files": True,
"serve_file": True,
},
"monitor": {"logs": available, "ensure": available},
"process": {"command": list(self._command), "available": available},
}
def has_image(self, tag: str) -> bool:
return False
def start_bot(
self,
bot_id: str,
image_tag: Optional[str] = None,
env_vars: Optional[Dict[str, str]] = None,
workspace_root: Optional[str] = None,
native_command: Optional[str] = None,
native_workdir: Optional[str] = None,
cpu_cores: Optional[float] = None,
memory_mb: Optional[int] = None,
storage_gb: Optional[int] = None,
on_state_change: Optional[Callable[[str, dict], None]] = None,
) -> bool:
bot_id = str(bot_id or "").strip()
if not bot_id:
return False
effective_env = dict(env_vars or {})
launch_command = self._resolve_launch_command(native_command=native_command, env_vars=effective_env)
if not self._is_launch_command_available(launch_command):
self._set_last_error(bot_id, f"native command not available: {self._render_command(launch_command) or 'nanobot gateway'}")
return False
with self._lock:
existing = self._records.get(bot_id)
if existing and existing.process.poll() is None:
if on_state_change:
self.ensure_monitor(bot_id, on_state_change)
return True
if existing:
self._cleanup_record(bot_id, existing)
state_root = self._bot_root(bot_id)
workspace_dir = self._workspace_dir(bot_id=bot_id, workspace_root=workspace_root)
config_path = self._config_path(bot_id, workspace_root=workspace_root)
runtime_dir = os.path.join(os.path.dirname(config_path), "runtime")
os.makedirs(runtime_dir, exist_ok=True)
os.makedirs(workspace_dir, exist_ok=True)
log_path = os.path.join(runtime_dir, "native.log")
cwd = self._resolve_workdir(state_root, native_workdir=native_workdir, env_vars=effective_env)
dashboard_host, dashboard_port, dashboard_url = self._resolve_dashboard_endpoint(bot_id, effective_env)
env = os.environ.copy()
env.update({str(k): str(v) for k, v in effective_env.items() if str(k).strip()})
env.setdefault("PYTHONUNBUFFERED", "1")
env.setdefault("EDGE_RUNTIME_KIND", "native")
env.setdefault("EDGE_NODE_MODE", "native")
env.setdefault("NANOBOT_BOT_ID", bot_id)
env.setdefault("DASHBOARD_HOST", dashboard_host)
env.setdefault("DASHBOARD_PORT", str(dashboard_port))
env.setdefault("DASHBOARD_URL", dashboard_url)
env.setdefault("NANOBOT_CONFIG", config_path)
env.setdefault("NANOBOT_WORKSPACE", workspace_dir)
if not os.path.isfile(config_path):
self._set_last_error(bot_id, f"native config not found: {config_path}")
return False
self._terminate_orphan_processes(bot_id=bot_id, config_path=config_path)
log_handle = open(log_path, "a", encoding="utf-8")
command = self._build_launch_command(base_command=launch_command, config_path=config_path, workspace_dir=workspace_dir)
log_handle.write(
f"[{self._now()}] native bootstrap command={shlex.join(command)} cwd={cwd} config={config_path} workspace={workspace_dir} dashboard={dashboard_url}\n"
)
log_handle.flush()
try:
process = subprocess.Popen(
command,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
bufsize=1,
start_new_session=True,
)
except FileNotFoundError as exc:
log_handle.write(f"[{self._now()}] native bootstrap failed: {exc}\n")
log_handle.flush()
log_handle.close()
self._set_last_error(bot_id, f"native command not found: {exc}")
return False
except Exception as exc:
log_handle.write(f"[{self._now()}] native bootstrap failed: {exc}\n")
log_handle.flush()
log_handle.close()
self._set_last_error(bot_id, f"native start failed: {exc}")
return False
record = _NativeProcessRecord(
process=process,
command=command,
cwd=cwd,
log_path=log_path,
log_handle=log_handle,
dashboard_url=dashboard_url,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
cpu_cores=cpu_cores,
memory_mb=memory_mb,
storage_gb=storage_gb,
)
self._records[bot_id] = record
record.stdout_thread = threading.Thread(
target=self._drain_stdout,
args=(bot_id, record, on_state_change),
daemon=True,
)
record.stdout_thread.start()
if not self._wait_for_dashboard_ready(record):
self._set_last_error(bot_id, f"native dashboard did not become ready: {dashboard_url}")
try:
if process.poll() is None:
process.terminate()
process.wait(timeout=5)
except Exception:
pass
self._cleanup_record(bot_id, record)
self._records.pop(bot_id, None)
return False
self._set_last_error(bot_id, "")
return True
def ensure_monitor(self, bot_id: str, on_state_change: Callable[[str, dict], None]) -> bool:
record = self._records.get(bot_id)
if record is None or record.process.poll() is not None:
return False
thread = record.stdout_thread
if thread is not None and thread.is_alive():
return True
record.stdout_thread = threading.Thread(
target=self._drain_stdout,
args=(bot_id, record, on_state_change),
daemon=True,
)
record.stdout_thread.start()
return True
def stop_bot(self, bot_id: str) -> bool:
bot_id = str(bot_id or "").strip()
with self._lock:
record = self._records.pop(bot_id, None)
stopped = False
if record is not None:
try:
if record.process.poll() is None:
record.stop_event.set()
record.process.terminate()
try:
record.process.wait(timeout=8)
except Exception:
record.process.kill()
record.process.wait(timeout=5)
self._cleanup_record(bot_id, record)
stopped = True
except Exception as exc:
self._set_last_error(bot_id, f"native stop failed: {exc}")
self._cleanup_record(bot_id, record)
return False
orphan_stopped = self._terminate_orphan_processes(bot_id=bot_id, config_path=self._config_path(bot_id))
return bool(stopped or orphan_stopped)
def get_bot_status(self, bot_id: str) -> str:
normalized_bot_id = str(bot_id or "").strip()
record = self._records.get(normalized_bot_id)
if record is None:
return "RUNNING" if self._has_orphan_process(normalized_bot_id) else "STOPPED"
try:
return "RUNNING" if record.process.poll() is None else "STOPPED"
except Exception:
return "STOPPED"
def get_bot_resource_snapshot(self, bot_id: str) -> Dict[str, Any]:
bot_id = str(bot_id or "").strip()
record = self._records.get(bot_id)
snapshot: Dict[str, Any] = {
"docker_status": self.get_bot_status(bot_id),
"limits": {
"cpu_cores": self._normalize_cpu_limit(record.cpu_cores if record else None),
"memory_bytes": self._normalize_memory_limit(record.memory_mb if record else None),
"storage_bytes": self._normalize_storage_limit(record.storage_gb if record else None),
"nano_cpus": 0,
"storage_opt_raw": "",
},
"usage": {
"cpu_percent": 0.0,
"memory_bytes": 0,
"memory_limit_bytes": 0,
"memory_percent": 0.0,
"network_rx_bytes": 0,
"network_tx_bytes": 0,
"blk_read_bytes": 0,
"blk_write_bytes": 0,
"pids": 0,
"container_rw_bytes": 0,
},
}
if record is None or record.process.poll() is not None:
return snapshot
try:
proc = psutil.Process(record.process.pid)
cpu_percent = float(proc.cpu_percent(interval=None) or 0.0)
memory_info = proc.memory_info()
memory_bytes = int(getattr(memory_info, "rss", 0) or 0)
memory_limit = int(psutil.virtual_memory().total or 0)
memory_percent = float(proc.memory_percent() or 0.0)
children = proc.children(recursive=True)
workspace_used = self._calc_workspace_used_bytes(bot_id)
snapshot["usage"].update(
{
"cpu_percent": round(cpu_percent, 2),
"memory_bytes": memory_bytes,
"memory_limit_bytes": memory_limit,
"memory_percent": round(memory_percent, 2),
"network_rx_bytes": 0,
"network_tx_bytes": 0,
"blk_read_bytes": 0,
"blk_write_bytes": 0,
"pids": 1 + len(children),
"container_rw_bytes": workspace_used,
}
)
except Exception:
workspace_used = self._calc_workspace_used_bytes(bot_id)
snapshot["usage"]["container_rw_bytes"] = workspace_used
return snapshot
def get_recent_logs(self, bot_id: str, tail: int = 300) -> List[str]:
log_path = self._log_path(str(bot_id or "").strip())
if not os.path.isfile(log_path):
return []
try:
with open(log_path, "r", encoding="utf-8", errors="ignore") as fh:
rows = [line.rstrip("\n") for line in fh.readlines() if line.strip()]
if tail > 0:
return rows[-int(tail) :]
return rows
except Exception:
return []
def send_command(self, bot_id: str, command: str, media: Optional[List[str]] = None) -> bool:
bot_id = str(bot_id or "").strip()
record = self._records.get(bot_id)
if record is None or record.process.poll() is not None:
self._set_last_error(bot_id, "native process is not running")
return False
try:
payload = {"message": command, "media": list(media or [])}
with httpx.Client(timeout=5.0, trust_env=False) as client:
resp = client.post(record.dashboard_url, json=payload)
if resp.status_code == 200:
self._set_last_error(bot_id, "")
return True
self._set_last_error(bot_id, f"native dashboard returned {resp.status_code}: {resp.text[:300]}")
return False
except Exception as exc:
self._set_last_error(bot_id, f"native dashboard request failed: {exc}")
return False
def get_last_delivery_error(self, bot_id: str) -> str:
bot_id = str(bot_id or "").strip()
record = self._records.get(bot_id)
if record is None:
return str(self._last_errors.get(bot_id) or "").strip()
return str(record.last_error or self._last_errors.get(bot_id) or "").strip()
def parse_monitor_packet(self, line: str) -> Optional[Dict[str, Any]]:
return self._parse_log_line(str(line or "").strip())
def _drain_stdout(
self,
bot_id: str,
record: _NativeProcessRecord,
callback: Optional[Callable[[str, dict], None]] = None,
) -> None:
stream = record.process.stdout
if stream is None:
return
try:
for raw_line in iter(stream.readline, ""):
if record.stop_event.is_set():
break
line = str(raw_line or "").rstrip("\r\n")
if not line:
continue
try:
record.log_handle.write(f"{line}\n")
record.log_handle.flush()
except Exception:
pass
if callback:
parsed = self._parse_log_line(line)
if parsed:
callback(bot_id, parsed)
callback(bot_id, {"type": "RAW_LOG", "text": line})
finally:
try:
stream.close()
except Exception:
pass
try:
record.log_handle.flush()
except Exception:
pass
def _cleanup_record(self, bot_id: str, record: _NativeProcessRecord) -> None:
try:
record.stop_event.set()
except Exception:
pass
try:
if record.log_handle and not record.log_handle.closed:
record.log_handle.flush()
record.log_handle.close()
except Exception:
pass
def _set_last_error(self, bot_id: str, message: str) -> None:
normalized_bot_id = str(bot_id or "").strip()
self._last_errors[normalized_bot_id] = str(message or "").strip()
record = self._records.get(normalized_bot_id)
if record is None:
return
record.last_error = self._last_errors[normalized_bot_id]
def _resolve_workdir(
self,
bot_root: str,
*,
native_workdir: Optional[str] = None,
env_vars: Optional[Dict[str, str]] = None,
) -> str:
configured = str(native_workdir or (env_vars or {}).get("EDGE_NATIVE_WORKDIR") or EDGE_NATIVE_WORKDIR or "").strip()
if configured:
return os.path.abspath(configured)
return os.path.abspath(bot_root)
def _resolve_dashboard_endpoint(self, bot_id: str, env_vars: Dict[str, str]) -> tuple[str, int, str]:
host = str(env_vars.get("DASHBOARD_HOST") or os.getenv("EDGE_NATIVE_DASHBOARD_HOST") or "127.0.0.1").strip() or "127.0.0.1"
raw_port = str(env_vars.get("DASHBOARD_PORT") or os.getenv("EDGE_NATIVE_DASHBOARD_PORT") or "").strip()
try:
port = int(raw_port) if raw_port else self._default_dashboard_port(bot_id)
except Exception:
port = self._default_dashboard_port(bot_id)
port = max(1, min(port, 65535))
url = str(env_vars.get("DASHBOARD_URL") or os.getenv("EDGE_NATIVE_DASHBOARD_URL") or f"http://{host}:{port}/chat").strip()
if not url:
url = f"http://{host}:{port}/chat"
return host, port, url
def _build_launch_command(self, *, base_command: List[str], config_path: str, workspace_dir: str) -> List[str]:
command = list(base_command)
has_config_flag = any(part in {"--config", "-c"} for part in command)
has_workspace_flag = any(part in {"--workspace", "-w"} for part in command)
if not has_config_flag:
command.extend(["--config", config_path])
if not has_workspace_flag:
command.extend(["--workspace", workspace_dir])
return command
def _resolve_launch_command(self, *, native_command: Optional[str], env_vars: Dict[str, str]) -> List[str]:
explicit = str(native_command or "").strip()
if explicit:
return self._parse_launcher_command(explicit)
configured = str(env_vars.get("EDGE_NATIVE_COMMAND") or "").strip()
if configured:
rows = self._parse_launcher_command(configured)
if rows:
return rows
return list(self._command)
@staticmethod
def _parse_launcher_command(raw_command: str) -> List[str]:
text = str(raw_command or "").strip()
if not text:
return []
if text.startswith("[") and text.endswith("]"):
try:
payload = json.loads(text)
if isinstance(payload, list):
rows = [str(item or "").strip() for item in payload if str(item or "").strip()]
if rows:
return rows
except Exception:
pass
if "," in text and any(mark in text for mark in ['"', "'"]):
try:
rows = [str(item or "").strip() for item in next(csv.reader([text], skipinitialspace=True)) if str(item or "").strip()]
if rows:
return rows
except Exception:
pass
try:
return [str(item or "").strip() for item in shlex.split(text) if str(item or "").strip()]
except Exception:
return []
@staticmethod
def _is_launch_command_available(command: List[str]) -> bool:
if not command:
return False
return bool(shutil.which(command[0]))
@staticmethod
def _render_command(command: List[str]) -> str:
return " ".join(str(part or "").strip() for part in command if str(part or "").strip())
def _log_path(self, bot_id: str) -> str:
config_path = self._config_path(bot_id)
return os.path.join(os.path.dirname(config_path), "runtime", "native.log")
def _config_path(self, bot_id: str, workspace_root: Optional[str] = None) -> str:
configured_root = str(workspace_root or "").strip()
if configured_root:
external_config = os.path.abspath(
os.path.join(
os.path.abspath(os.path.expanduser(configured_root)),
bot_id,
".nanobot",
"config.json",
)
)
if os.path.isfile(external_config):
return external_config
inferred_root = self._workspace_root_from_runtime_target(bot_id)
if inferred_root:
inferred_config = os.path.abspath(os.path.join(inferred_root, bot_id, ".nanobot", "config.json"))
if os.path.isfile(inferred_config):
return inferred_config
return os.path.join(self._bot_root(bot_id), ".nanobot", "config.json")
def _bot_root(self, bot_id: str) -> str:
return os.path.abspath(os.path.join(EDGE_BOTS_WORKSPACE_ROOT, bot_id))
def _workspace_dir(self, *, bot_id: str, workspace_root: Optional[str] = None) -> str:
configured_root = str(workspace_root or "").strip()
if configured_root:
normalized_root = os.path.abspath(os.path.expanduser(configured_root))
return os.path.abspath(os.path.join(normalized_root, bot_id, ".nanobot", "workspace"))
config_workspace = self._workspace_dir_from_config(bot_id)
if config_workspace:
return config_workspace
return os.path.abspath(os.path.join(self._bot_root(bot_id), ".nanobot", "workspace"))
def _workspace_dir_from_config(self, bot_id: str) -> Optional[str]:
config_path = self._config_path(bot_id)
if not os.path.isfile(config_path):
return None
try:
with open(config_path, "r", encoding="utf-8") as fh:
payload = json.load(fh)
if not isinstance(payload, dict):
return None
agents = payload.get("agents") if isinstance(payload.get("agents"), dict) else {}
defaults = agents.get("defaults") if isinstance(agents.get("defaults"), dict) else {}
workspace = str(defaults.get("workspace") or "").strip()
if not workspace:
return None
return os.path.abspath(os.path.expanduser(workspace))
except Exception:
return None
def _workspace_root_from_runtime_target(self, bot_id: str) -> str:
path = os.path.join(self._bot_root(bot_id), ".nanobot", "runtime-target.json")
if not os.path.isfile(path):
return ""
try:
with open(path, "r", encoding="utf-8") as fh:
payload = json.load(fh)
if not isinstance(payload, dict):
return ""
raw_root = str(payload.get("workspace_root") or "").strip()
if not raw_root:
return ""
return os.path.abspath(os.path.expanduser(raw_root))
except Exception:
return ""
def _has_orphan_process(self, bot_id: str) -> bool:
return bool(self._find_orphan_processes(bot_id=bot_id, config_path=self._config_path(bot_id)))
def _find_orphan_processes(self, *, bot_id: str, config_path: str) -> List[psutil.Process]:
matches: List[psutil.Process] = []
normalized_config_path = os.path.abspath(config_path)
for proc in psutil.process_iter(["pid", "cmdline"]):
try:
cmdline = [str(part or "") for part in (proc.info.get("cmdline") or [])]
if not cmdline:
continue
joined = " ".join(cmdline)
if "nanobot.cli.commands" not in joined or " gateway" not in joined:
continue
if normalized_config_path not in joined:
continue
matches.append(proc)
except (psutil.NoSuchProcess, psutil.AccessDenied):
continue
except Exception:
continue
return matches
def _terminate_orphan_processes(self, *, bot_id: str, config_path: str) -> int:
stopped = 0
for proc in self._find_orphan_processes(bot_id=bot_id, config_path=config_path):
try:
os.kill(int(proc.pid), signal.SIGTERM)
try:
proc.wait(timeout=5)
except psutil.TimeoutExpired:
os.kill(int(proc.pid), signal.SIGKILL)
proc.wait(timeout=3)
stopped += 1
except (psutil.NoSuchProcess, ProcessLookupError):
continue
except Exception as exc:
self._set_last_error(bot_id, f"failed to cleanup orphan native process: {exc}")
return stopped
@staticmethod
def _wait_for_dashboard_ready(record: _NativeProcessRecord, timeout_seconds: float = 8.0) -> bool:
deadline = time.monotonic() + max(1.0, float(timeout_seconds or 8.0))
while time.monotonic() < deadline:
if record.process.poll() is not None:
return False
try:
with socket.create_connection((record.dashboard_host, record.dashboard_port), timeout=0.5):
return True
except OSError:
time.sleep(0.2)
continue
return False
@staticmethod
def _default_dashboard_port(bot_id: str) -> int:
digest = hashlib.sha1(str(bot_id or "").strip().encode("utf-8")).hexdigest()
return 19000 + (int(digest[:6], 16) % 2000)
@staticmethod
def _normalize_cpu_limit(value: Optional[float]) -> Optional[float]:
if value is None:
return None
try:
return round(float(value), 2)
except Exception:
return None
@staticmethod
def _normalize_memory_limit(value: Optional[int]) -> Optional[int]:
if value is None:
return None
try:
return max(0, int(value)) * 1024 * 1024
except Exception:
return None
@staticmethod
def _normalize_storage_limit(value: Optional[int]) -> Optional[int]:
if value is None:
return None
try:
return max(0, int(value)) * 1024 * 1024 * 1024
except Exception:
return None
def _calc_workspace_used_bytes(self, bot_id: str) -> int:
total = 0
root = self._workspace_dir(bot_id=bot_id)
for current_root, _, files in os.walk(root):
for filename in files:
path = os.path.join(current_root, filename)
try:
total += int(os.path.getsize(path))
except Exception:
continue
return total
@staticmethod
def _now() -> str:
return datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
@staticmethod
def _parse_monitor_packet_json(line: str) -> Optional[Dict[str, Any]]:
if "__DASHBOARD_DATA_START__" not in line or "__DASHBOARD_DATA_END__" not in line:
return None
try:
raw_json = line.split("__DASHBOARD_DATA_START__", 1)[1].split("__DASHBOARD_DATA_END__", 1)[0].strip()
data = json.loads(raw_json)
event_type = str(data.get("type", "")).upper()
content = str(data.get("content") or data.get("text") or "").strip()
media = [str(v).strip().replace("\\", "/") for v in (data.get("media") or []) if str(v).strip()]
is_progress = bool(data.get("is_progress", False))
is_tool = bool(data.get("is_tool", False))
usage = data.get("usage") if isinstance(data.get("usage"), dict) else None
request_id = str(data.get("request_id") or "").strip() or None
provider = str(data.get("provider") or "").strip() or None
model = str(data.get("model") or "").strip() or None
if event_type == "AGENT_STATE":
payload = data.get("payload") or {}
state = str(payload.get("state") or data.get("state") or ("TOOL_CALL" if is_tool else "THINKING"))
action_msg = str(payload.get("action_msg") or payload.get("msg") or content)
return {
"type": "AGENT_STATE",
"channel": "dashboard",
"payload": {"state": state, "action_msg": action_msg},
"request_id": request_id,
}
if event_type == "ASSISTANT_MESSAGE":
if content or media:
return {
"type": "ASSISTANT_MESSAGE",
"channel": "dashboard",
"text": content,
"media": media,
"usage": usage,
"request_id": request_id,
"provider": provider,
"model": model,
}
return None
if event_type == "BUS_EVENT" or is_progress:
return {
"type": "BUS_EVENT",
"channel": "dashboard",
"content": content,
"media": media,
"is_progress": is_progress,
"is_tool": is_tool,
"usage": usage,
"request_id": request_id,
"provider": provider,
"model": model,
}
if content or media:
return {
"type": "ASSISTANT_MESSAGE",
"channel": "dashboard",
"text": content,
"media": media,
"usage": usage,
"request_id": request_id,
"provider": provider,
"model": model,
}
except Exception:
return None
return None
@classmethod
def _parse_log_line(cls, line: str) -> Optional[Dict[str, Any]]:
if "__DASHBOARD_DATA_START__" in line:
packet = cls._parse_monitor_packet_json(line)
if packet:
return packet
process_match = re.search(r"Processing message from ([\w\-]+):[^:]+:\s*(.+)$", line)
if process_match:
channel = process_match.group(1).strip().lower()
action_msg = process_match.group(2).strip()
return {
"type": "AGENT_STATE",
"channel": channel,
"payload": {
"state": "THINKING",
"action_msg": action_msg[:4000],
},
}
response_match = re.search(r"Response to ([\w\-]+):[^:]+:\s*(.+)$", line)
if response_match:
channel = response_match.group(1).strip().lower()
action_msg = response_match.group(2).strip()
if channel == "dashboard":
return {
"type": "ASSISTANT_MESSAGE",
"channel": "dashboard",
"text": action_msg[:4000],
}
return {
"type": "AGENT_STATE",
"channel": channel,
"payload": {
"state": "SUCCESS",
"action_msg": action_msg[:4000],
},
}
tool_call_match = re.search(r"tool call:\s*(.+)$", line, re.IGNORECASE)
if tool_call_match:
return {
"type": "AGENT_STATE",
"payload": {
"state": "TOOL_CALL",
"action_msg": tool_call_match.group(1).strip()[:4000],
},
}
lower = line.lower()
if "error" in lower or "traceback" in lower:
return {
"type": "AGENT_STATE",
"payload": {"state": "ERROR", "action_msg": "执行异常,请检查日志"},
}
return None

View File

@ -0,0 +1 @@
# Schema package for dashboard-edge.

View File

@ -0,0 +1,116 @@
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
NODE_PROTOCOL_VERSION = "1"
class EdgeNodeIdentityBase(BaseModel):
protocol_version: str = NODE_PROTOCOL_VERSION
node_id: str
display_name: str
service: str = "dashboard-edge"
transport_kind: str = "edge"
runtime_kind: str = "docker"
core_adapter: str = "nanobot"
class EdgeStatusResponse(BaseModel):
status: str
class EdgeStateWriteRequest(BaseModel):
data: Dict[str, Any] = Field(default_factory=dict)
workspace_root: Optional[str] = None
class EdgeStateResponse(BaseModel):
bot_id: str
state_key: str
data: Dict[str, Any] = Field(default_factory=dict)
class EdgeNativePreflightRequest(BaseModel):
native_command: Optional[str] = None
native_workdir: Optional[str] = None
class EdgeNativePreflightResponse(BaseModel):
ok: bool = False
command: List[str] = Field(default_factory=list)
workdir: str = ""
command_available: bool = False
workdir_exists: bool = False
detail: str = ""
class EdgeCommandRequest(BaseModel):
command: str
media: List[str] = Field(default_factory=list)
class EdgeLogsResponse(BaseModel):
bot_id: str
logs: List[str] = Field(default_factory=list)
class EdgeMonitorEnsureResponse(BaseModel):
ensured: bool = False
class EdgeMonitorPacket(BaseModel):
protocol_version: str = NODE_PROTOCOL_VERSION
node_id: str = ""
bot_id: str = ""
seq: int = 0
captured_at: str = ""
packet: Dict[str, Any] = Field(default_factory=dict)
class EdgeMonitorPacketsResponse(BaseModel):
protocol_version: str = NODE_PROTOCOL_VERSION
node_id: str = ""
bot_id: str
latest_seq: int = 0
packets: List[EdgeMonitorPacket] = Field(default_factory=list)
class EdgeWorkspaceSyncRequest(BaseModel):
channels_override: Optional[List[Dict[str, Any]]] = None
global_delivery_override: Optional[Dict[str, Any]] = None
runtime_overrides: Optional[Dict[str, Any]] = None
class EdgeMarkdownWriteRequest(BaseModel):
content: str = ""
class EdgeNodeSelfResponse(BaseModel):
protocol_version: str = NODE_PROTOCOL_VERSION
node_id: str
display_name: str
service: str = "dashboard-edge"
transport_kind: str = "edge"
runtime_kind: str = "docker"
core_adapter: str = "nanobot"
capabilities: Dict[str, Any] = Field(default_factory=dict)
resources: Dict[str, Any] = Field(default_factory=dict)
reported_at: str = ""
class EdgeNodeResourcesResponse(BaseModel):
protocol_version: str = NODE_PROTOCOL_VERSION
node_id: str
display_name: str = ""
service: str = "dashboard-edge"
transport_kind: str = "edge"
runtime_kind: str = "docker"
core_adapter: str = "nanobot"
resources: Dict[str, Any] = Field(default_factory=dict)
reported_at: str = ""
class EdgeNodeHeartbeatResponse(EdgeNodeIdentityBase):
capabilities: Dict[str, Any] = Field(default_factory=dict)
resources: Dict[str, Any] = Field(default_factory=dict)
reported_at: str = ""

View File

@ -0,0 +1,15 @@
from typing import Dict, Optional
from pydantic import BaseModel, Field
class EdgeStartBotRequest(BaseModel):
image_tag: str
runtime_kind: str = "docker"
env_vars: Dict[str, str] = Field(default_factory=dict)
workspace_root: Optional[str] = None
native_command: Optional[str] = None
native_workdir: Optional[str] = None
cpu_cores: float = 1.0
memory_mb: int = 1024
storage_gb: int = 10

View File

@ -0,0 +1 @@
# Service package for dashboard-edge.

View File

@ -0,0 +1,278 @@
import json
import os
import hashlib
from typing import Any, Dict, List, Optional
DEFAULT_SOUL_MD = "# Soul\n"
DEFAULT_AGENTS_MD = "# Agent Instructions\n"
DEFAULT_USER_MD = "# User Preferences\n"
DEFAULT_TOOLS_MD = "# Tools\n"
DEFAULT_IDENTITY_MD = "# Identity\n"
class EdgeProvisionService:
def __init__(self, *, host_data_root: str) -> None:
self._host_data_root = host_data_root
def sync_bot_workspace(
self,
*,
bot_id: str,
payload: Any,
) -> Dict[str, Any]:
runtime = dict(getattr(payload, "runtime_overrides", None) or {})
workspace_root_override = self._workspace_root_override(runtime)
workspace_bot_dir = self._bot_workspace_dir(bot_id, workspace_root_override)
state_nanobot_dir = os.path.join(workspace_bot_dir, ".nanobot")
workspace_dir = os.path.join(workspace_bot_dir, ".nanobot", "workspace")
memory_dir = os.path.join(workspace_dir, "memory")
skills_dir = os.path.join(workspace_dir, "skills")
for path in [state_nanobot_dir, workspace_dir, memory_dir, skills_dir]:
os.makedirs(path, exist_ok=True)
channels_override = list(getattr(payload, "channels_override", None) or [])
global_delivery_override = dict(getattr(payload, "global_delivery_override", None) or {})
raw_provider_name = str(runtime.get("llm_provider") or "openrouter").strip().lower()
provider_name = {
"aliyun": "dashscope",
"qwen": "dashscope",
"aliyun-qwen": "dashscope",
"moonshot": "kimi",
"xunfei": "openai",
"iflytek": "openai",
"xfyun": "openai",
}.get(raw_provider_name, raw_provider_name)
model_name = str(runtime.get("llm_model") or "openai/gpt-4o-mini").strip()
if provider_name == "openai" and raw_provider_name in {"xunfei", "iflytek", "xfyun"} and model_name and "/" not in model_name:
model_name = f"openai/{model_name}"
provider_cfg: Dict[str, Any] = {"apiKey": str(runtime.get("api_key") or "").strip()}
api_base = str(runtime.get("api_base") or "").strip()
if api_base:
provider_cfg["apiBase"] = api_base
channels_cfg: Dict[str, Any] = {
"sendProgress": bool(global_delivery_override.get("sendProgress", runtime.get("send_progress", False))),
"sendToolHints": bool(global_delivery_override.get("sendToolHints", runtime.get("send_tool_hints", False))),
}
existing_config: Dict[str, Any] = {}
config_path = os.path.join(state_nanobot_dir, "config.json")
if os.path.isfile(config_path):
try:
with open(config_path, "r", encoding="utf-8") as fh:
loaded = json.load(fh)
if isinstance(loaded, dict):
existing_config = loaded
except Exception:
existing_config = {}
existing_tools = existing_config.get("tools")
tools_cfg: Dict[str, Any] = dict(existing_tools) if isinstance(existing_tools, dict) else {}
native_sandbox_mode = self._normalize_native_sandbox_mode(runtime.get("native_sandbox_mode"))
if native_sandbox_mode == "workspace":
tools_cfg["restrictToWorkspace"] = True
elif native_sandbox_mode == "full_access":
tools_cfg["restrictToWorkspace"] = False
existing_channels = existing_config.get("channels")
existing_dashboard_cfg = (
existing_channels.get("dashboard")
if isinstance(existing_channels, dict) and isinstance(existing_channels.get("dashboard"), dict)
else {}
)
dashboard_cfg: Dict[str, Any] = {
"enabled": True,
"host": "0.0.0.0",
"port": self._dashboard_port_for_bot(bot_id),
"allowFrom": ["*"],
}
for key in ("host", "port", "allowFrom"):
if key in existing_dashboard_cfg:
dashboard_cfg[key] = existing_dashboard_cfg[key]
dashboard_cfg["port"] = self._dashboard_port_for_bot(bot_id)
channels_cfg["dashboard"] = dashboard_cfg
for channel in channels_override:
channel_type = str(channel.get("channel_type") or "").strip().lower()
if not channel_type or channel_type == "dashboard":
continue
extra = channel.get("extra_config") if isinstance(channel.get("extra_config"), dict) else {}
enabled = bool(channel.get("is_active", True))
external = str(channel.get("external_app_id") or "")
secret = str(channel.get("app_secret") or "")
if channel_type == "telegram":
channels_cfg["telegram"] = {
"enabled": enabled,
"token": secret,
"proxy": extra.get("proxy", ""),
"replyToMessage": bool(extra.get("replyToMessage", False)),
"allowFrom": self._normalize_allow_from(extra.get("allowFrom", [])),
}
continue
if channel_type == "feishu":
channels_cfg["feishu"] = {
"enabled": enabled,
"appId": external,
"appSecret": secret,
"encryptKey": extra.get("encryptKey", ""),
"verificationToken": extra.get("verificationToken", ""),
"allowFrom": self._normalize_allow_from(extra.get("allowFrom", [])),
}
continue
if channel_type == "dingtalk":
channels_cfg["dingtalk"] = {
"enabled": enabled,
"clientId": external,
"clientSecret": secret,
"allowFrom": self._normalize_allow_from(extra.get("allowFrom", [])),
}
continue
if channel_type == "slack":
channels_cfg["slack"] = {
"enabled": enabled,
"mode": extra.get("mode", "socket"),
"botToken": external,
"appToken": secret,
"replyInThread": bool(extra.get("replyInThread", True)),
"groupPolicy": extra.get("groupPolicy", "mention"),
"groupAllowFrom": extra.get("groupAllowFrom", []),
"reactEmoji": extra.get("reactEmoji", "eyes"),
}
continue
if channel_type == "qq":
channels_cfg["qq"] = {
"enabled": enabled,
"appId": external,
"secret": secret,
"allowFrom": self._normalize_allow_from(extra.get("allowFrom", [])),
}
continue
if channel_type == "email":
channels_cfg["email"] = {
"enabled": enabled,
"consentGranted": bool(extra.get("consentGranted", False)),
"imapHost": extra.get("imapHost", ""),
"imapPort": max(1, min(int(extra.get("imapPort", 993) or 993), 65535)),
"imapUsername": extra.get("imapUsername", ""),
"imapPassword": extra.get("imapPassword", ""),
"imapMailbox": extra.get("imapMailbox", "INBOX"),
"imapUseSsl": bool(extra.get("imapUseSsl", True)),
"smtpHost": extra.get("smtpHost", ""),
"smtpPort": max(1, min(int(extra.get("smtpPort", 587) or 587), 65535)),
"smtpUsername": extra.get("smtpUsername", ""),
"smtpPassword": extra.get("smtpPassword", ""),
"smtpUseTls": bool(extra.get("smtpUseTls", True)),
"smtpUseSsl": bool(extra.get("smtpUseSsl", False)),
"fromAddress": extra.get("fromAddress", ""),
"autoReplyEnabled": bool(extra.get("autoReplyEnabled", True)),
"pollIntervalSeconds": max(5, int(extra.get("pollIntervalSeconds", 30) or 30)),
"markSeen": bool(extra.get("markSeen", True)),
"maxBodyChars": max(1, int(extra.get("maxBodyChars", 12000) or 12000)),
"subjectPrefix": extra.get("subjectPrefix", "Re: "),
"allowFrom": self._normalize_allow_from(extra.get("allowFrom", [])),
}
continue
channels_cfg[channel_type] = {
"enabled": enabled,
"appId": external,
"appSecret": secret,
**extra,
}
config_data: Dict[str, Any] = {
"agents": {
"defaults": {
"workspace": workspace_dir,
"model": model_name,
"temperature": float(runtime.get("temperature") or 0.2),
"topP": float(runtime.get("top_p") or 1.0),
"maxTokens": int(runtime.get("max_tokens") or 8192),
}
},
"providers": {provider_name: provider_cfg},
"channels": channels_cfg,
}
if tools_cfg:
config_data["tools"] = tools_cfg
self._write_json(config_path, config_data)
runtime_target = {
"runtime_kind": str(runtime.get("runtime_kind") or "").strip().lower(),
"transport_kind": str(runtime.get("transport_kind") or "").strip().lower(),
"core_adapter": str(runtime.get("core_adapter") or "").strip().lower(),
}
if native_sandbox_mode != "inherit":
runtime_target["native_sandbox_mode"] = native_sandbox_mode
if workspace_root_override:
runtime_target["workspace_root"] = workspace_root_override
if any(runtime_target.values()):
runtime_target_path = os.path.join(state_nanobot_dir, "runtime-target.json")
self._write_json(runtime_target_path, runtime_target)
bootstrap_files = {
"AGENTS.md": str(runtime.get("agents_md") or DEFAULT_AGENTS_MD).strip() + "\n",
"SOUL.md": str(runtime.get("soul_md") or runtime.get("system_prompt") or DEFAULT_SOUL_MD).strip() + "\n",
"USER.md": str(runtime.get("user_md") or DEFAULT_USER_MD).strip() + "\n",
"TOOLS.md": str(runtime.get("tools_md") or DEFAULT_TOOLS_MD).strip() + "\n",
"IDENTITY.md": str(runtime.get("identity_md") or DEFAULT_IDENTITY_MD).strip() + "\n",
}
for filename, content in bootstrap_files.items():
file_path = os.path.join(workspace_dir, filename)
with open(file_path, "w", encoding="utf-8") as fh:
fh.write(content)
return {"status": "ok"}
@staticmethod
def _normalize_allow_from(raw: Any) -> List[str]:
rows: List[str] = []
if isinstance(raw, list):
for item in raw:
text = str(item or "").strip()
if text and text not in rows:
rows.append(text)
if not rows:
return ["*"]
return rows
@staticmethod
def _dashboard_port_for_bot(bot_id: str) -> int:
digest = hashlib.sha1(str(bot_id or "").strip().encode("utf-8")).hexdigest()
return 19000 + (int(digest[:6], 16) % 2000)
@staticmethod
def _workspace_root_override(runtime_overrides: Dict[str, Any]) -> str:
raw = str(runtime_overrides.get("workspace_root") or "").strip()
if not raw:
return ""
return os.path.abspath(os.path.expanduser(raw))
@staticmethod
def _normalize_native_sandbox_mode(raw_value: Any) -> str:
text = str(raw_value or "").strip().lower()
if text in {"workspace", "sandbox", "strict"}:
return "workspace"
if text in {"full_access", "full-access", "danger-full-access", "escape"}:
return "full_access"
return "inherit"
def _bot_workspace_dir(self, bot_id: str, workspace_root_override: str) -> str:
if not workspace_root_override:
return os.path.abspath(os.path.join(self._host_data_root, str(bot_id or "").strip()))
return os.path.abspath(os.path.join(workspace_root_override, str(bot_id or "").strip()))
@staticmethod
def _write_json(path: str, payload: Dict[str, Any]) -> None:
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "w", encoding="utf-8") as fh:
json.dump(payload, fh, ensure_ascii=False, indent=2)
edge_provision_service: EdgeProvisionService | None = None

Some files were not shown because too many files have changed in this diff Show More