diff --git a/backend/.env.example b/backend/.env.example index f5a95b5..192806e 100644 --- a/backend/.env.example +++ b/backend/.env.example @@ -50,10 +50,3 @@ STT_DEVICE=cpu APP_HOST=0.0.0.0 APP_PORT=8000 APP_RELOAD=true - -# Optional overrides (fallback only; usually keep empty when using template files) -DEFAULT_AGENTS_MD= -DEFAULT_SOUL_MD= -DEFAULT_USER_MD= -DEFAULT_TOOLS_MD= -DEFAULT_IDENTITY_MD= diff --git a/backend/api/bot_config_router.py b/backend/api/bot_config_router.py new file mode 100644 index 0000000..d1be1f8 --- /dev/null +++ b/backend/api/bot_config_router.py @@ -0,0 +1,102 @@ +from fastapi import APIRouter, Depends, HTTPException +from sqlmodel import Session + +from core.database import get_session +from models.bot import BotInstance +from schemas.bot import ( + BotEnvParamsUpdateRequest, + BotMcpConfigUpdateRequest, + BotToolsConfigUpdateRequest, + ChannelConfigRequest, + ChannelConfigUpdateRequest, +) +from services.bot_config_service import ( + create_bot_channel_config, + delete_bot_channel_config, + get_bot_env_params_state, + get_bot_mcp_config_state, + get_bot_resources_snapshot, + get_bot_tools_config_state, + list_bot_channels_config, + reject_bot_tools_config_update, + update_bot_channel_config, + update_bot_env_params_state, + update_bot_mcp_config_state, +) + +router = APIRouter() + +@router.get("/api/bots/{bot_id}/resources") +def get_bot_resources(bot_id: str, session: Session = Depends(get_session)): + return get_bot_resources_snapshot(session, bot_id=bot_id) + + +@router.get("/api/bots/{bot_id}/channels") +def list_bot_channels(bot_id: str, session: Session = Depends(get_session)): + return list_bot_channels_config(session, bot_id=bot_id) + + +@router.get("/api/bots/{bot_id}/tools-config") +def get_bot_tools_config(bot_id: str, session: Session = Depends(get_session)): + return get_bot_tools_config_state(session, bot_id=bot_id) + + +@router.put("/api/bots/{bot_id}/tools-config") +def update_bot_tools_config( + bot_id: str, + payload: BotToolsConfigUpdateRequest, + session: Session = Depends(get_session), +): + return reject_bot_tools_config_update(session, bot_id=bot_id, payload=payload) + + +@router.get("/api/bots/{bot_id}/mcp-config") +def get_bot_mcp_config(bot_id: str, session: Session = Depends(get_session)): + return get_bot_mcp_config_state(session, bot_id=bot_id) + + +@router.put("/api/bots/{bot_id}/mcp-config") +def update_bot_mcp_config( + bot_id: str, + payload: BotMcpConfigUpdateRequest, + session: Session = Depends(get_session), +): + return update_bot_mcp_config_state(session, bot_id=bot_id, payload=payload) + + +@router.get("/api/bots/{bot_id}/env-params") +def get_bot_env_params(bot_id: str, session: Session = Depends(get_session)): + return get_bot_env_params_state(session, bot_id=bot_id) + + +@router.put("/api/bots/{bot_id}/env-params") +def update_bot_env_params( + bot_id: str, + payload: BotEnvParamsUpdateRequest, + session: Session = Depends(get_session), +): + return update_bot_env_params_state(session, bot_id=bot_id, payload=payload) + + +@router.post("/api/bots/{bot_id}/channels") +def create_bot_channel( + bot_id: str, + payload: ChannelConfigRequest, + session: Session = Depends(get_session), +): + return create_bot_channel_config(session, bot_id=bot_id, payload=payload) + + +@router.put("/api/bots/{bot_id}/channels/{channel_id}") +def update_bot_channel( + bot_id: str, + channel_id: str, + payload: ChannelConfigUpdateRequest, + session: Session = Depends(get_session), +): + return update_bot_channel_config(session, bot_id=bot_id, channel_id=channel_id, payload=payload) + + +@router.delete("/api/bots/{bot_id}/channels/{channel_id}") +def delete_bot_channel(bot_id: str, channel_id: str, session: Session = Depends(get_session)): + return delete_bot_channel_config(session, bot_id=bot_id, channel_id=channel_id) diff --git a/backend/api/bot_management_router.py b/backend/api/bot_management_router.py new file mode 100644 index 0000000..16febf1 --- /dev/null +++ b/backend/api/bot_management_router.py @@ -0,0 +1,68 @@ +from fastapi import APIRouter, Depends, HTTPException +from sqlmodel import Session + +from core.database import get_session +from services.bot_lifecycle_service import ( + deactivate_bot_instance, + delete_bot_instance, + disable_bot_instance, + enable_bot_instance, + start_bot_instance, + stop_bot_instance, +) + +router = APIRouter() + + +@router.post("/api/bots/{bot_id}/start") +async def start_bot(bot_id: str, session: Session = Depends(get_session)): + try: + return await start_bot_instance(session, bot_id) + except ValueError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + except PermissionError as exc: + raise HTTPException(status_code=403, detail=str(exc)) from exc + except RuntimeError as exc: + raise HTTPException(status_code=500, detail=str(exc)) from exc + + +@router.post("/api/bots/{bot_id}/stop") +def stop_bot(bot_id: str, session: Session = Depends(get_session)): + try: + return stop_bot_instance(session, bot_id) + except ValueError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + except PermissionError as exc: + raise HTTPException(status_code=403, detail=str(exc)) from exc + + +@router.post("/api/bots/{bot_id}/enable") +def enable_bot(bot_id: str, session: Session = Depends(get_session)): + try: + return enable_bot_instance(session, bot_id) + except ValueError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + + +@router.post("/api/bots/{bot_id}/disable") +def disable_bot(bot_id: str, session: Session = Depends(get_session)): + try: + return disable_bot_instance(session, bot_id) + except ValueError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + + +@router.post("/api/bots/{bot_id}/deactivate") +def deactivate_bot(bot_id: str, session: Session = Depends(get_session)): + try: + return deactivate_bot_instance(session, bot_id) + except ValueError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + + +@router.delete("/api/bots/{bot_id}") +def delete_bot(bot_id: str, delete_workspace: bool = True, session: Session = Depends(get_session)): + try: + return delete_bot_instance(session, bot_id, delete_workspace=delete_workspace) + except ValueError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc diff --git a/backend/api/bot_router.py b/backend/api/bot_router.py new file mode 100644 index 0000000..1fbf1d5 --- /dev/null +++ b/backend/api/bot_router.py @@ -0,0 +1,45 @@ +from fastapi import APIRouter, Depends +from sqlmodel import Session + +from core.database import get_session +from schemas.bot import BotCreateRequest, BotPageAuthLoginRequest, BotUpdateRequest +from services.bot_management_service import ( + authenticate_bot_page_access, + create_bot_record, + get_bot_detail_cached, + list_bots_with_cache, + test_provider_connection, + update_bot_record, +) + +router = APIRouter() + + +@router.post("/api/providers/test") +async def test_provider(payload: dict): + return await test_provider_connection(payload) + + +@router.post("/api/bots") +def create_bot(payload: BotCreateRequest, session: Session = Depends(get_session)): + return create_bot_record(session, payload=payload) + + +@router.get("/api/bots") +def list_bots(session: Session = Depends(get_session)): + return list_bots_with_cache(session) + + +@router.get("/api/bots/{bot_id}") +def get_bot_detail(bot_id: str, session: Session = Depends(get_session)): + return get_bot_detail_cached(session, bot_id=bot_id) + + +@router.post("/api/bots/{bot_id}/auth/login") +def login_bot_page(bot_id: str, payload: BotPageAuthLoginRequest, session: Session = Depends(get_session)): + return authenticate_bot_page_access(session, bot_id=bot_id, password=payload.password) + + +@router.put("/api/bots/{bot_id}") +def update_bot(bot_id: str, payload: BotUpdateRequest, session: Session = Depends(get_session)): + return update_bot_record(session, bot_id=bot_id, payload=payload) diff --git a/backend/api/bot_runtime_router.py b/backend/api/bot_runtime_router.py new file mode 100644 index 0000000..fab47d8 --- /dev/null +++ b/backend/api/bot_runtime_router.py @@ -0,0 +1,185 @@ +import logging +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List, Optional + +from fastapi import APIRouter, Depends, HTTPException, WebSocket, WebSocketDisconnect +from sqlmodel import Session + +from core.database import engine, get_session +from core.docker_instance import docker_manager +from core.settings import BOTS_WORKSPACE_ROOT +from core.websocket_manager import manager +from models.bot import BotInstance +from services.bot_channel_service import _get_bot_channels_from_config +from services.bot_lifecycle_service import start_bot_instance, stop_bot_instance +from services.bot_storage_service import _read_bot_config, _write_bot_config +from services.bot_storage_service import _read_cron_store, _write_cron_store +from services.runtime_service import docker_callback + +router = APIRouter() +logger = logging.getLogger("dashboard.backend") + + +def _get_bot_or_404(session: Session, bot_id: str) -> BotInstance: + bot = session.get(BotInstance, bot_id) + if not bot: + raise HTTPException(status_code=404, detail="Bot not found") + return bot + + +def _weixin_state_file_path(bot_id: str) -> Path: + return Path(BOTS_WORKSPACE_ROOT) / bot_id / ".nanobot" / "weixin" / "account.json" + + +@router.get("/api/bots/{bot_id}/logs") +def get_bot_logs( + bot_id: str, + tail: Optional[int] = 300, + offset: int = 0, + limit: Optional[int] = None, + reverse: bool = False, + session: Session = Depends(get_session), +): + _get_bot_or_404(session, bot_id) + if limit is not None: + page = docker_manager.get_logs_page( + bot_id, + offset=max(0, int(offset)), + limit=max(1, int(limit)), + reverse=bool(reverse), + ) + return {"bot_id": bot_id, **page} + effective_tail = max(1, int(tail or 300)) + return {"bot_id": bot_id, "logs": docker_manager.get_recent_logs(bot_id, tail=effective_tail)} + + +@router.post("/api/bots/{bot_id}/weixin/relogin") +async def relogin_weixin(bot_id: str, session: Session = Depends(get_session)): + bot = _get_bot_or_404(session, bot_id) + weixin_channel = next( + ( + row + for row in _get_bot_channels_from_config(bot) + if str(row.get("channel_type") or "").strip().lower() == "weixin" + ), + None, + ) + if not weixin_channel: + raise HTTPException(status_code=404, detail="Weixin channel not found") + + state_file = _weixin_state_file_path(bot_id) + removed = False + try: + if state_file.is_file(): + state_file.unlink() + removed = True + except Exception as exc: + raise HTTPException(status_code=500, detail=f"Failed to remove weixin state: {exc}") from exc + + config_data = _read_bot_config(bot_id) + channels_cfg = config_data.get("channels") if isinstance(config_data, dict) else {} + weixin_cfg = channels_cfg.get("weixin") if isinstance(channels_cfg, dict) else None + if isinstance(weixin_cfg, dict) and "token" in weixin_cfg: + weixin_cfg.pop("token", None) + _write_bot_config(bot_id, config_data) + + restarted = False + if str(bot.docker_status or "").upper() == "RUNNING": + stop_bot_instance(session, bot_id) + await start_bot_instance(session, bot_id) + restarted = True + + return { + "status": "relogin_started", + "bot_id": bot_id, + "removed_state": removed, + "restarted": restarted, + } + + +@router.get("/api/bots/{bot_id}/cron/jobs") +def list_cron_jobs(bot_id: str, include_disabled: bool = True, session: Session = Depends(get_session)): + _get_bot_or_404(session, bot_id) + store = _read_cron_store(bot_id) + rows = [] + for row in store.get("jobs", []): + if not isinstance(row, dict): + continue + enabled = bool(row.get("enabled", True)) + if not include_disabled and not enabled: + continue + rows.append(row) + rows.sort(key=lambda value: int(((value.get("state") or {}).get("nextRunAtMs")) or 2**62)) + return {"bot_id": bot_id, "version": int(store.get("version", 1) or 1), "jobs": rows} + + +@router.post("/api/bots/{bot_id}/cron/jobs/{job_id}/stop") +def stop_cron_job(bot_id: str, job_id: str, session: Session = Depends(get_session)): + _get_bot_or_404(session, bot_id) + store = _read_cron_store(bot_id) + jobs = store.get("jobs", []) + if not isinstance(jobs, list): + jobs = [] + found = None + for row in jobs: + if isinstance(row, dict) and str(row.get("id")) == job_id: + found = row + break + if not found: + raise HTTPException(status_code=404, detail="Cron job not found") + found["enabled"] = False + found["updatedAtMs"] = int(datetime.utcnow().timestamp() * 1000) + _write_cron_store(bot_id, {"version": int(store.get("version", 1) or 1), "jobs": jobs}) + return {"status": "stopped", "job_id": job_id} + + +@router.delete("/api/bots/{bot_id}/cron/jobs/{job_id}") +def delete_cron_job(bot_id: str, job_id: str, session: Session = Depends(get_session)): + _get_bot_or_404(session, bot_id) + store = _read_cron_store(bot_id) + jobs = store.get("jobs", []) + if not isinstance(jobs, list): + jobs = [] + kept = [row for row in jobs if not (isinstance(row, dict) and str(row.get("id")) == job_id)] + if len(kept) == len(jobs): + raise HTTPException(status_code=404, detail="Cron job not found") + _write_cron_store(bot_id, {"version": int(store.get("version", 1) or 1), "jobs": kept}) + return {"status": "deleted", "job_id": job_id} + + +@router.websocket("/ws/monitor/{bot_id}") +async def websocket_endpoint(websocket: WebSocket, bot_id: str): + with Session(engine) as session: + bot = session.get(BotInstance, bot_id) + if not bot: + await websocket.close(code=4404, reason="Bot not found") + return + + connected = False + try: + await manager.connect(bot_id, websocket) + connected = True + except Exception as exc: + logger.warning("websocket connect failed bot_id=%s detail=%s", bot_id, exc) + try: + await websocket.close(code=1011, reason="WebSocket accept failed") + except Exception: + pass + return + + docker_manager.ensure_monitor(bot_id, docker_callback) + try: + while True: + await websocket.receive_text() + except WebSocketDisconnect: + pass + except RuntimeError as exc: + msg = str(exc or "").lower() + if "need to call \"accept\" first" not in msg and "not connected" not in msg: + logger.exception("websocket runtime error bot_id=%s", bot_id) + except Exception: + logger.exception("websocket unexpected error bot_id=%s", bot_id) + finally: + if connected: + manager.disconnect(bot_id, websocket) diff --git a/backend/api/bot_speech_router.py b/backend/api/bot_speech_router.py new file mode 100644 index 0000000..434dccc --- /dev/null +++ b/backend/api/bot_speech_router.py @@ -0,0 +1,33 @@ +import logging +from typing import Optional + +from fastapi import APIRouter, Depends, File, Form, HTTPException, Request, UploadFile +from sqlmodel import Session + +from core.database import get_session +from core.speech_service import WhisperSpeechService +from services.speech_transcribe_service import transcribe_bot_speech_upload + +router = APIRouter() +logger = logging.getLogger("dashboard.backend") + + +@router.post("/api/bots/{bot_id}/speech/transcribe") +async def transcribe_bot_speech( + bot_id: str, + request: Request, + file: UploadFile = File(...), + language: Optional[str] = Form(None), + session: Session = Depends(get_session), +): + speech_service = getattr(request.app.state, "speech_service", None) + if not isinstance(speech_service, WhisperSpeechService): + raise HTTPException(status_code=500, detail="Speech service is not initialized") + return await transcribe_bot_speech_upload( + session, + bot_id, + upload=file, + language=language, + speech_service=speech_service, + logger=logger, + ) diff --git a/backend/api/chat_history_router.py b/backend/api/chat_history_router.py new file mode 100644 index 0000000..1dbf7f7 --- /dev/null +++ b/backend/api/chat_history_router.py @@ -0,0 +1,69 @@ +from typing import Optional + +from fastapi import APIRouter, Depends +from sqlmodel import Session + +from core.database import get_session +from schemas.bot import MessageFeedbackRequest +from services.chat_history_service import ( + clear_bot_messages_payload, + clear_dashboard_direct_session_payload, + list_bot_messages_by_date_payload, + list_bot_messages_page_payload, + list_bot_messages_payload, + update_bot_message_feedback_payload, +) + +router = APIRouter() + + +@router.get("/api/bots/{bot_id}/messages") +def list_bot_messages(bot_id: str, limit: int = 200, session: Session = Depends(get_session)): + return list_bot_messages_payload(session, bot_id, limit=limit) + + +@router.get("/api/bots/{bot_id}/messages/page") +def list_bot_messages_page( + bot_id: str, + limit: Optional[int] = None, + before_id: Optional[int] = None, + session: Session = Depends(get_session), +): + return list_bot_messages_page_payload(session, bot_id, limit=limit, before_id=before_id) + + +@router.get("/api/bots/{bot_id}/messages/by-date") +def list_bot_messages_by_date( + bot_id: str, + date: str, + tz_offset_minutes: Optional[int] = None, + limit: Optional[int] = None, + session: Session = Depends(get_session), +): + return list_bot_messages_by_date_payload( + session, + bot_id, + date=date, + tz_offset_minutes=tz_offset_minutes, + limit=limit, + ) + + +@router.put("/api/bots/{bot_id}/messages/{message_id}/feedback") +def update_bot_message_feedback( + bot_id: str, + message_id: int, + payload: MessageFeedbackRequest, + session: Session = Depends(get_session), +): + return update_bot_message_feedback_payload(session, bot_id, message_id, payload.feedback) + + +@router.delete("/api/bots/{bot_id}/messages") +def clear_bot_messages(bot_id: str, session: Session = Depends(get_session)): + return clear_bot_messages_payload(session, bot_id) + + +@router.post("/api/bots/{bot_id}/sessions/dashboard-direct/clear") +def clear_bot_dashboard_direct_session(bot_id: str, session: Session = Depends(get_session)): + return clear_dashboard_direct_session_payload(session, bot_id) diff --git a/backend/api/chat_router.py b/backend/api/chat_router.py new file mode 100644 index 0000000..636afbf --- /dev/null +++ b/backend/api/chat_router.py @@ -0,0 +1,29 @@ +from typing import Any, Dict, Tuple + +from fastapi import APIRouter, Body, Depends +from sqlmodel import Session + +from core.database import get_session +from services.chat_command_service import send_bot_command + +router = APIRouter() + + +def _parse_command_payload(payload: Dict[str, Any] | None) -> Tuple[str, Any]: + body = payload if isinstance(payload, dict) else {} + return str(body.get("command") or ""), body.get("attachments") + + +@router.post("/api/bots/{bot_id}/command") +def send_command( + bot_id: str, + payload: Dict[str, Any] | None = Body(default=None), + session: Session = Depends(get_session), +): + command, attachments = _parse_command_payload(payload) + return send_bot_command( + session, + bot_id, + command=command, + attachments=attachments, + ) diff --git a/backend/api/image_router.py b/backend/api/image_router.py new file mode 100644 index 0000000..7083a0a --- /dev/null +++ b/backend/api/image_router.py @@ -0,0 +1,126 @@ +from typing import Any, Dict, List + +from fastapi import APIRouter, Depends, HTTPException +from sqlmodel import Session, select + +from core.cache import cache +from core.database import get_session +from core.docker_instance import docker_manager +from models.bot import BotInstance, NanobotImage +from services.cache_service import _cache_key_images, _invalidate_images_cache + +router = APIRouter() + + +def _serialize_image(row: NanobotImage) -> Dict[str, Any]: + created_at = row.created_at.isoformat() + "Z" if row.created_at else None + return { + "tag": row.tag, + "image_id": row.image_id, + "version": row.version, + "status": row.status, + "source_dir": row.source_dir, + "created_at": created_at, + } + + +def _reconcile_registered_images(session: Session) -> None: + rows = session.exec(select(NanobotImage)).all() + dirty = False + for row in rows: + docker_exists = docker_manager.has_image(row.tag) + next_status = "READY" if docker_exists else "ERROR" + next_image_id = row.image_id + if docker_exists and docker_manager.client: + try: + next_image_id = docker_manager.client.images.get(row.tag).id + except Exception: + next_image_id = row.image_id + if row.status != next_status or row.image_id != next_image_id: + row.status = next_status + row.image_id = next_image_id + session.add(row) + dirty = True + if dirty: + session.commit() + + +def reconcile_image_registry(session: Session) -> None: + """Backward-compatible alias for older callers after router refactor.""" + _reconcile_registered_images(session) + + +@router.get("/api/images") +def list_images(session: Session = Depends(get_session)): + cached = cache.get_json(_cache_key_images()) + if isinstance(cached, list) and all(isinstance(row, dict) for row in cached): + return cached + if isinstance(cached, list): + _invalidate_images_cache() + try: + _reconcile_registered_images(session) + except Exception as exc: + # Docker status probing should not break image management in dev mode. + print(f"[image_router] reconcile images skipped: {exc}") + rows = session.exec(select(NanobotImage).order_by(NanobotImage.created_at.desc())).all() + payload = [_serialize_image(row) for row in rows] + cache.set_json(_cache_key_images(), payload, ttl=60) + return payload + +@router.delete("/api/images/{tag:path}") +def delete_image(tag: str, session: Session = Depends(get_session)): + image = session.get(NanobotImage, tag) + if not image: + raise HTTPException(status_code=404, detail="Image not found") + + # 检查是否有机器人正在使用此镜像 + bots_using = session.exec(select(BotInstance).where(BotInstance.image_tag == tag)).all() + if bots_using: + raise HTTPException(status_code=400, detail=f"Cannot delete image: {len(bots_using)} bots are using it.") + + session.delete(image) + session.commit() + _invalidate_images_cache() + return {"status": "deleted"} + +@router.get("/api/docker-images") +def list_docker_images(repository: str = "nanobot-base"): + rows = docker_manager.list_images_by_repo(repository) + return rows + +@router.post("/api/images/register") +def register_image(payload: dict, session: Session = Depends(get_session)): + tag = (payload.get("tag") or "").strip() + source_dir = (payload.get("source_dir") or "manual").strip() or "manual" + if not tag: + raise HTTPException(status_code=400, detail="tag is required") + + if not docker_manager.has_image(tag): + raise HTTPException(status_code=404, detail=f"Docker image not found: {tag}") + + version = tag.split(":")[-1].removeprefix("v") if ":" in tag else tag + try: + docker_img = docker_manager.client.images.get(tag) if docker_manager.client else None + image_id = docker_img.id if docker_img else None + except Exception: + image_id = None + + row = session.get(NanobotImage, tag) + if not row: + row = NanobotImage( + tag=tag, + version=version, + status="READY", + source_dir=source_dir, + image_id=image_id, + ) + else: + row.version = version + row.status = "READY" + row.source_dir = source_dir + row.image_id = image_id + session.add(row) + session.commit() + session.refresh(row) + _invalidate_images_cache() + return _serialize_image(row) diff --git a/backend/api/skill_router.py b/backend/api/skill_router.py new file mode 100644 index 0000000..a50a16c --- /dev/null +++ b/backend/api/skill_router.py @@ -0,0 +1,100 @@ +from typing import Optional + +from fastapi import APIRouter, Depends, File, Form, HTTPException, UploadFile +from sqlmodel import Session + +from core.database import get_session +from models.bot import BotInstance +from services.skill_market_service import ( + create_skill_market_item_record, + delete_skill_market_item_record, + install_skill_market_item_for_bot, + list_bot_skill_market_items, + list_skill_market_items, + update_skill_market_item_record, +) +from services.skill_service import ( + delete_workspace_skill_entry, + list_bot_skills as list_workspace_bot_skills, + upload_bot_skill_zip_to_workspace, +) + +router = APIRouter() + + +@router.get("/api/platform/skills") +def list_skill_market(session: Session = Depends(get_session)): + return list_skill_market_items(session) + +@router.post("/api/platform/skills") +async def create_skill_market_item( + skill_key: str = Form(""), + display_name: str = Form(""), + description: str = Form(""), + file: UploadFile = File(...), + session: Session = Depends(get_session), +): + return await create_skill_market_item_record( + session, + skill_key=skill_key, + display_name=display_name, + description=description, + upload=file, + ) + +@router.put("/api/platform/skills/{skill_id}") +async def update_skill_market_item( + skill_id: int, + skill_key: str = Form(""), + display_name: str = Form(""), + description: str = Form(""), + file: Optional[UploadFile] = File(None), + session: Session = Depends(get_session), +): + return await update_skill_market_item_record( + session, + skill_id=skill_id, + skill_key=skill_key, + display_name=display_name, + description=description, + upload=file, + ) + +@router.delete("/api/platform/skills/{skill_id}") +def delete_skill_market_item(skill_id: int, session: Session = Depends(get_session)): + return delete_skill_market_item_record(session, skill_id=skill_id) + +@router.get("/api/bots/{bot_id}/skills") +def list_bot_skills(bot_id: str, session: Session = Depends(get_session)): + bot = session.get(BotInstance, bot_id) + if not bot: + raise HTTPException(status_code=404, detail="Bot not found") + return list_workspace_bot_skills(bot_id) + +@router.get("/api/bots/{bot_id}/skill-market") +def list_bot_skill_market(bot_id: str, session: Session = Depends(get_session)): + bot = session.get(BotInstance, bot_id) + if not bot: + raise HTTPException(status_code=404, detail="Bot not found") + return list_bot_skill_market_items(session, bot_id=bot_id) + +@router.post("/api/bots/{bot_id}/skill-market/{skill_id}/install") +def install_bot_skill_from_market(bot_id: str, skill_id: int, session: Session = Depends(get_session)): + bot = session.get(BotInstance, bot_id) + if not bot: + raise HTTPException(status_code=404, detail="Bot not found") + return install_skill_market_item_for_bot(session, bot_id=bot_id, skill_id=skill_id) + +@router.post("/api/bots/{bot_id}/skills/upload") +async def upload_bot_skill_zip(bot_id: str, file: UploadFile = File(...), session: Session = Depends(get_session)): + bot = session.get(BotInstance, bot_id) + if not bot: + raise HTTPException(status_code=404, detail="Bot not found") + return await upload_bot_skill_zip_to_workspace(bot_id, upload=file) + +@router.delete("/api/bots/{bot_id}/skills/{skill_name}") +def delete_bot_skill(bot_id: str, skill_name: str, session: Session = Depends(get_session)): + bot = session.get(BotInstance, bot_id) + if not bot: + raise HTTPException(status_code=404, detail="Bot not found") + return delete_workspace_skill_entry(bot_id, skill_name=skill_name) diff --git a/backend/api/system_router.py b/backend/api/system_router.py new file mode 100644 index 0000000..c2a8a0b --- /dev/null +++ b/backend/api/system_router.py @@ -0,0 +1,121 @@ +from typing import Any, Dict + +from fastapi import APIRouter, HTTPException +from sqlmodel import Session, select + +from core.database import engine, get_session +from core.settings import DATABASE_ENGINE, PANEL_ACCESS_PASSWORD, REDIS_ENABLED, REDIS_PREFIX, REDIS_URL +from core.utils import _get_default_system_timezone +from models.bot import BotInstance +from schemas.system import PanelLoginRequest, SystemTemplatesUpdateRequest +from core.cache import cache +from services.platform_service import get_platform_settings_snapshot, get_speech_runtime_settings +from services.template_service import ( + get_agent_md_templates, + get_topic_presets, + update_agent_md_templates, + update_topic_presets, +) + +router = APIRouter() + + +@router.get("/api/panel/auth/status") +def get_panel_auth_status(): + configured = str(PANEL_ACCESS_PASSWORD or "").strip() + return {"enabled": bool(configured)} + +@router.post("/api/panel/auth/login") +def panel_login(payload: PanelLoginRequest): + configured = str(PANEL_ACCESS_PASSWORD or "").strip() + if not configured: + return {"success": True, "enabled": False} + supplied = str(payload.password or "").strip() + if supplied != configured: + raise HTTPException(status_code=401, detail="Invalid panel access password") + return {"success": True, "enabled": True} + +@router.get("/api/system/defaults") +def get_system_defaults(): + md_templates = get_agent_md_templates() + platform_settings = get_platform_settings_snapshot() + speech_settings = get_speech_runtime_settings() + return { + "templates": md_templates, + "limits": { + "upload_max_mb": platform_settings.upload_max_mb, + }, + "workspace": { + "download_extensions": list(platform_settings.workspace_download_extensions), + "allowed_attachment_extensions": list(platform_settings.allowed_attachment_extensions), + }, + "bot": { + "system_timezone": _get_default_system_timezone(), + }, + "loading_page": platform_settings.loading_page.model_dump(), + "chat": { + "pull_page_size": platform_settings.chat_pull_page_size, + "page_size": platform_settings.page_size, + "command_auto_unlock_seconds": platform_settings.command_auto_unlock_seconds, + }, + "topic_presets": get_topic_presets().get("presets", []), + "speech": { + "enabled": speech_settings["enabled"], + "model": speech_settings["model"], + "device": speech_settings["device"], + "max_audio_seconds": speech_settings["max_audio_seconds"], + "default_language": speech_settings["default_language"], + }, + } + +@router.get("/api/system/templates") +def get_system_templates(): + return { + "agent_md_templates": get_agent_md_templates(), + "topic_presets": get_topic_presets(), + } + +@router.put("/api/system/templates") +def update_system_templates(payload: SystemTemplatesUpdateRequest): + if payload.agent_md_templates is not None: + update_agent_md_templates(payload.agent_md_templates.model_dump()) + + if payload.topic_presets is not None: + try: + update_topic_presets(payload.topic_presets) + except ValueError as exc: + raise HTTPException(status_code=400, detail=str(exc)) from exc + + return { + "status": "ok", + "agent_md_templates": get_agent_md_templates(), + "topic_presets": get_topic_presets(), + } + +@router.get("/api/health") +def get_health(): + try: + with Session(engine) as session: + session.exec(select(BotInstance).limit(1)).first() + return {"status": "ok", "database": DATABASE_ENGINE} + except Exception as e: + raise HTTPException(status_code=503, detail=f"database check failed: {e}") + +@router.get("/api/health/cache") +def get_cache_health(): + redis_url = str(REDIS_URL or "").strip() + configured = bool(REDIS_ENABLED and redis_url) + client_enabled = bool(getattr(cache, "enabled", False)) + reachable = bool(cache.ping()) if client_enabled else False + status = "ok" + if configured and not reachable: + status = "degraded" + return { + "status": status, + "cache": { + "configured": configured, + "enabled": client_enabled, + "reachable": reachable, + "prefix": REDIS_PREFIX, + }, + } diff --git a/backend/api/workspace_router.py b/backend/api/workspace_router.py new file mode 100644 index 0000000..7624f71 --- /dev/null +++ b/backend/api/workspace_router.py @@ -0,0 +1,146 @@ +from typing import List, Optional + +from fastapi import APIRouter, Depends, File, HTTPException, Request, UploadFile +from sqlmodel import Session + +from core.database import get_session +from models.bot import BotInstance +from schemas.system import WorkspaceFileUpdateRequest +from services.workspace_service import ( + get_workspace_tree_data, + read_workspace_text_file, + serve_workspace_file, + update_workspace_markdown_file, + upload_workspace_files_to_workspace, +) + +router = APIRouter() + + +@router.get("/api/bots/{bot_id}/workspace/tree") +def get_workspace_tree( + bot_id: str, + path: Optional[str] = None, + recursive: bool = False, + session: Session = Depends(get_session), +): + bot = session.get(BotInstance, bot_id) + if not bot: + raise HTTPException(status_code=404, detail="Bot not found") + return get_workspace_tree_data(bot_id, path=path, recursive=recursive) + +@router.get("/api/bots/{bot_id}/workspace/file") +def read_workspace_file( + bot_id: str, + path: str, + max_bytes: int = 200000, + session: Session = Depends(get_session), +): + bot = session.get(BotInstance, bot_id) + if not bot: + raise HTTPException(status_code=404, detail="Bot not found") + return read_workspace_text_file(bot_id, path=path, max_bytes=max_bytes) + +@router.put("/api/bots/{bot_id}/workspace/file") +def update_workspace_file( + bot_id: str, + path: str, + payload: WorkspaceFileUpdateRequest, + session: Session = Depends(get_session), +): + bot = session.get(BotInstance, bot_id) + if not bot: + raise HTTPException(status_code=404, detail="Bot not found") + return update_workspace_markdown_file(bot_id, path=path, content=payload.content) + +@router.get("/api/bots/{bot_id}/workspace/download") +def download_workspace_file( + bot_id: str, + path: str, + download: bool = False, + request: Request = None, + session: Session = Depends(get_session), +): + bot = session.get(BotInstance, bot_id) + if not bot: + raise HTTPException(status_code=404, detail="Bot not found") + return serve_workspace_file( + bot_id=bot_id, + path=path, + download=download, + request=request, + public=False, + redirect_html_to_raw=True, + ) + +@router.get("/public/bots/{bot_id}/workspace/download") +def public_download_workspace_file( + bot_id: str, + path: str, + download: bool = False, + request: Request = None, + session: Session = Depends(get_session), +): + bot = session.get(BotInstance, bot_id) + if not bot: + raise HTTPException(status_code=404, detail="Bot not found") + return serve_workspace_file( + bot_id=bot_id, + path=path, + download=download, + request=request, + public=True, + redirect_html_to_raw=True, + ) + +@router.get("/api/bots/{bot_id}/workspace/raw/{path:path}") +def raw_workspace_file( + bot_id: str, + path: str, + download: bool = False, + request: Request = None, + session: Session = Depends(get_session), +): + bot = session.get(BotInstance, bot_id) + if not bot: + raise HTTPException(status_code=404, detail="Bot not found") + return serve_workspace_file( + bot_id=bot_id, + path=path, + download=download, + request=request, + public=False, + redirect_html_to_raw=False, + ) + +@router.get("/public/bots/{bot_id}/workspace/raw/{path:path}") +def public_raw_workspace_file( + bot_id: str, + path: str, + download: bool = False, + request: Request = None, + session: Session = Depends(get_session), +): + bot = session.get(BotInstance, bot_id) + if not bot: + raise HTTPException(status_code=404, detail="Bot not found") + return serve_workspace_file( + bot_id=bot_id, + path=path, + download=download, + request=request, + public=True, + redirect_html_to_raw=False, + ) + +@router.post("/api/bots/{bot_id}/workspace/upload") +async def upload_workspace_files( + bot_id: str, + files: List[UploadFile] = File(...), + path: Optional[str] = None, + session: Session = Depends(get_session), +): + bot = session.get(BotInstance, bot_id) + if not bot: + raise HTTPException(status_code=404, detail="Bot not found") + return await upload_workspace_files_to_workspace(bot_id, files=files, path=path) diff --git a/backend/app_factory.py b/backend/app_factory.py new file mode 100644 index 0000000..43a0377 --- /dev/null +++ b/backend/app_factory.py @@ -0,0 +1,59 @@ +import os + +from fastapi import FastAPI +from fastapi.middleware.cors import CORSMiddleware + +from api.bot_config_router import router as bot_config_router +from api.bot_management_router import router as bot_management_router +from api.bot_router import router as bot_router +from api.bot_runtime_router import router as bot_runtime_router +from api.bot_speech_router import router as bot_speech_router +from api.chat_history_router import router as chat_history_router +from api.chat_router import router as chat_router +from api.image_router import router as image_router +from api.platform_router import router as platform_router +from api.skill_router import router as skill_router +from api.system_router import router as system_router +from api.topic_router import router as topic_router +from api.workspace_router import router as workspace_router +from bootstrap.app_runtime import register_app_runtime +from core.auth_middleware import PasswordProtectionMiddleware +from core.docker_instance import docker_manager +from core.settings import BOTS_WORKSPACE_ROOT, DATA_ROOT +from core.speech_service import WhisperSpeechService + + +def create_app() -> FastAPI: + app = FastAPI(title="Dashboard Nanobot API") + + speech_service = WhisperSpeechService() + app.state.docker_manager = docker_manager + app.state.speech_service = speech_service + + app.add_middleware(PasswordProtectionMiddleware) + app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_methods=["*"], + allow_headers=["*"], + ) + + app.include_router(platform_router) + app.include_router(topic_router) + app.include_router(system_router) + app.include_router(image_router) + app.include_router(skill_router) + app.include_router(chat_router) + app.include_router(chat_history_router) + app.include_router(bot_speech_router) + app.include_router(workspace_router) + app.include_router(bot_config_router) + app.include_router(bot_runtime_router) + app.include_router(bot_management_router) + app.include_router(bot_router) + + os.makedirs(BOTS_WORKSPACE_ROOT, exist_ok=True) + os.makedirs(DATA_ROOT, exist_ok=True) + + register_app_runtime(app) + return app diff --git a/backend/bootstrap/app_runtime.py b/backend/bootstrap/app_runtime.py new file mode 100644 index 0000000..971711f --- /dev/null +++ b/backend/bootstrap/app_runtime.py @@ -0,0 +1,31 @@ +import asyncio + +from fastapi import FastAPI +from sqlmodel import Session, select + +from core.database import engine, init_database +from core.docker_instance import docker_manager +from core.settings import DATABASE_URL_DISPLAY, REDIS_ENABLED +from models.bot import BotInstance +from services.bot_storage_service import _migrate_bot_resources_store +from services.platform_service import prune_expired_activity_events +from services.runtime_service import docker_callback, set_main_loop + + +def register_app_runtime(app: FastAPI) -> None: + @app.on_event("startup") + async def _on_startup() -> None: + print( + f"🚀 Dashboard Backend 启动中... (DB: {DATABASE_URL_DISPLAY}, REDIS: {'Enabled' if REDIS_ENABLED else 'Disabled'})" + ) + current_loop = asyncio.get_running_loop() + app.state.main_loop = current_loop + set_main_loop(current_loop) + init_database() + with Session(engine) as session: + prune_expired_activity_events(session, force=True) + bots = session.exec(select(BotInstance)).all() + for bot in bots: + _migrate_bot_resources_store(bot.id) + docker_manager.ensure_monitor(bot.id, docker_callback) + print("✅ 启动自检完成") diff --git a/backend/core/auth_middleware.py b/backend/core/auth_middleware.py new file mode 100644 index 0000000..48b9c30 --- /dev/null +++ b/backend/core/auth_middleware.py @@ -0,0 +1,125 @@ +from __future__ import annotations + +from typing import Optional + +from fastapi import Request +from fastapi.responses import JSONResponse +from starlette.middleware.base import BaseHTTPMiddleware + +from core.settings import PANEL_ACCESS_PASSWORD +from services.bot_storage_service import _read_bot_config + +PANEL_ACCESS_PASSWORD_HEADER = "x-panel-password" +BOT_ACCESS_PASSWORD_HEADER = "X-Bot-Access-Password" +BOT_PANEL_ONLY_SUFFIXES = {"/enable", "/disable", "/deactivate"} + + +def _extract_bot_id_from_api_path(path: str) -> Optional[str]: + parts = [p for p in path.split("/") if p.strip()] + if len(parts) >= 3 and parts[0] == "api" and parts[1] == "bots": + return parts[2] + return None + + +def _get_supplied_panel_password_http(request: Request) -> str: + header_value = str(request.headers.get(PANEL_ACCESS_PASSWORD_HEADER) or "").strip() + if header_value: + return header_value + query_value = str(request.query_params.get("panel_access_password") or "").strip() + return query_value + + +def _get_supplied_bot_access_password_http(request: Request) -> str: + header_value = str(request.headers.get(BOT_ACCESS_PASSWORD_HEADER) or "").strip() + if header_value: + return header_value + query_value = str(request.query_params.get("bot_access_password") or "").strip() + return query_value + + +def _validate_panel_access_password(supplied: str) -> Optional[str]: + configured = str(PANEL_ACCESS_PASSWORD or "").strip() + if not configured: + return None + candidate = str(supplied or "").strip() + if not candidate: + return "Panel access password required" + if candidate != configured: + return "Invalid panel access password" + return None + + +def _validate_bot_access_password(bot_id: str, supplied: str) -> Optional[str]: + config = _read_bot_config(bot_id) + configured = str(config.get("access_password") or "").strip() + if not configured: + return None + candidate = str(supplied or "").strip() + if not candidate: + return "Bot access password required" + if candidate != configured: + return "Invalid bot access password" + return None + + +def _is_bot_panel_management_api_path(path: str, method: str = "GET") -> bool: + raw = str(path or "").strip() + verb = str(method or "GET").strip().upper() + if not raw.startswith("/api/bots/"): + return False + bot_id = _extract_bot_id_from_api_path(raw) + if not bot_id: + return False + return ( + raw.endswith("/start") + or raw.endswith("/stop") + or raw.endswith("/enable") + or raw.endswith("/disable") + or raw.endswith("/deactivate") + or (verb in {"PUT", "DELETE"} and raw == f"/api/bots/{bot_id}") + ) + + +def _is_panel_protected_api_path(path: str, method: str = "GET") -> bool: + raw = str(path or "").strip() + verb = str(method or "GET").strip().upper() + if not raw.startswith("/api/"): + return False + if raw in { + "/api/panel/auth/status", + "/api/panel/auth/login", + "/api/health", + "/api/health/cache", + }: + return False + if _is_bot_panel_management_api_path(raw, verb): + return True + if _extract_bot_id_from_api_path(raw): + return False + return True + + +class PasswordProtectionMiddleware(BaseHTTPMiddleware): + async def dispatch(self, request: Request, call_next): + path = request.url.path + method = request.method.upper() + + if method == "OPTIONS": + return await call_next(request) + + bot_id = _extract_bot_id_from_api_path(path) + if not bot_id: + if _is_panel_protected_api_path(path, method): + panel_error = _validate_panel_access_password(_get_supplied_panel_password_http(request)) + if panel_error: + return JSONResponse(status_code=401, content={"detail": panel_error}) + return await call_next(request) + + if _is_bot_panel_management_api_path(path, method): + panel_error = _validate_panel_access_password(_get_supplied_panel_password_http(request)) + if panel_error: + bot_error = _validate_bot_access_password(bot_id, _get_supplied_bot_access_password_http(request)) + if bot_error: + return JSONResponse(status_code=401, content={"detail": bot_error}) + + return await call_next(request) diff --git a/backend/core/config_manager.py b/backend/core/config_manager.py index b42fd27..327f643 100644 --- a/backend/core/config_manager.py +++ b/backend/core/config_manager.py @@ -2,13 +2,26 @@ import json import os from typing import Any, Dict, List -from core.settings import ( - DEFAULT_AGENTS_MD, - DEFAULT_IDENTITY_MD, - DEFAULT_SOUL_MD, - DEFAULT_TOOLS_MD, - DEFAULT_USER_MD, -) +from services.template_service import get_agent_md_templates + + +def _provider_default_api_base(provider: str) -> str: + normalized = str(provider or "").strip().lower() + if normalized == "openai": + return "https://api.openai.com/v1" + if normalized == "openrouter": + return "https://openrouter.ai/api/v1" + if normalized in {"dashscope", "aliyun", "qwen", "aliyun-qwen"}: + return "https://dashscope.aliyuncs.com/compatible-mode/v1" + if normalized == "deepseek": + return "https://api.deepseek.com/v1" + if normalized in {"xunfei", "iflytek", "xfyun"}: + return "https://spark-api-open.xf-yun.com/v1" + if normalized in {"kimi", "moonshot"}: + return "https://api.moonshot.cn/v1" + if normalized == "minimax": + return "https://api.minimax.chat/v1" + return "" class BotConfigManager: @@ -26,11 +39,39 @@ class BotConfigManager: for d in [dot_nanobot_dir, workspace_dir, memory_dir, skills_dir]: os.makedirs(d, exist_ok=True) - raw_provider_name = (bot_data.get("llm_provider") or "openrouter").strip().lower() + template_defaults = get_agent_md_templates() + existing_config: Dict[str, Any] = {} + config_path = os.path.join(dot_nanobot_dir, "config.json") + if os.path.isfile(config_path): + try: + with open(config_path, "r", encoding="utf-8") as f: + loaded = json.load(f) + if isinstance(loaded, dict): + existing_config = loaded + except Exception: + existing_config = {} + + existing_provider_name = "" + existing_provider_cfg: Dict[str, Any] = {} + existing_model_name = "" + providers_cfg = existing_config.get("providers") + if isinstance(providers_cfg, dict): + for provider_name, provider_cfg in providers_cfg.items(): + existing_provider_name = str(provider_name or "").strip().lower() + if isinstance(provider_cfg, dict): + existing_provider_cfg = provider_cfg + break + agents_cfg = existing_config.get("agents") + if isinstance(agents_cfg, dict): + defaults_cfg = agents_cfg.get("defaults") + if isinstance(defaults_cfg, dict): + existing_model_name = str(defaults_cfg.get("model") or "").strip() + + raw_provider_name = (bot_data.get("llm_provider") or existing_provider_name).strip().lower() provider_name = raw_provider_name - model_name = (bot_data.get("llm_model") or "openai/gpt-4o-mini").strip() - api_key = (bot_data.get("api_key") or "").strip() - api_base = (bot_data.get("api_base") or "").strip() or None + model_name = (bot_data.get("llm_model") or existing_model_name).strip() + api_key = (bot_data.get("api_key") or existing_provider_cfg.get("apiKey") or "").strip() + api_base = (bot_data.get("api_base") or existing_provider_cfg.get("apiBase") or "").strip() or None provider_alias = { "aliyun": "dashscope", @@ -47,6 +88,8 @@ class BotConfigManager: if provider_name == "openai" and raw_provider_name in {"xunfei", "iflytek", "xfyun"}: if model_name and "/" not in model_name: model_name = f"openai/{model_name}" + if not api_base: + api_base = _provider_default_api_base(raw_provider_name) or _provider_default_api_base(provider_name) or None provider_cfg: Dict[str, Any] = { "apiKey": api_key, @@ -61,17 +104,6 @@ class BotConfigManager: "sendToolHints": bool(bot_data.get("send_tool_hints", False)), } - existing_config: Dict[str, Any] = {} - config_path = os.path.join(dot_nanobot_dir, "config.json") - if os.path.isfile(config_path): - try: - with open(config_path, "r", encoding="utf-8") as f: - loaded = json.load(f) - if isinstance(loaded, dict): - existing_config = loaded - except Exception: - existing_config = {} - existing_tools = existing_config.get("tools") tools_cfg: Dict[str, Any] = dict(existing_tools) if isinstance(existing_tools, dict) else {} if "mcp_servers" in bot_data: @@ -88,9 +120,7 @@ class BotConfigManager: "maxTokens": int(bot_data.get("max_tokens") or 8192), } }, - "providers": { - provider_name: provider_cfg, - }, + "providers": {provider_name: provider_cfg} if provider_name else {}, "channels": channels_cfg, } if tools_cfg: @@ -189,6 +219,32 @@ class BotConfigManager: } continue + if channel_type == "weixin": + weixin_cfg: Dict[str, Any] = { + "enabled": enabled, + "allowFrom": self._normalize_allow_from(extra.get("allowFrom", [])), + } + route_tag = str(extra.get("routeTag") or "").strip() + if route_tag: + weixin_cfg["routeTag"] = route_tag + state_dir = str(extra.get("stateDir") or "").strip() + if state_dir: + weixin_cfg["stateDir"] = state_dir + base_url = str(extra.get("baseUrl") or "").strip() + if base_url: + weixin_cfg["baseUrl"] = base_url + cdn_base_url = str(extra.get("cdnBaseUrl") or "").strip() + if cdn_base_url: + weixin_cfg["cdnBaseUrl"] = cdn_base_url + poll_timeout = extra.get("pollTimeout", extra.get("poll_timeout")) + if poll_timeout not in {None, ""}: + try: + weixin_cfg["pollTimeout"] = max(1, int(poll_timeout)) + except (TypeError, ValueError): + pass + channels_cfg["weixin"] = weixin_cfg + continue + if channel_type == "email": channels_cfg["email"] = { "enabled": enabled, @@ -227,11 +283,11 @@ class BotConfigManager: json.dump(config_data, f, indent=4, ensure_ascii=False) bootstrap_files = { - "AGENTS.md": bot_data.get("agents_md") or DEFAULT_AGENTS_MD, - "SOUL.md": bot_data.get("soul_md") or bot_data.get("system_prompt") or DEFAULT_SOUL_MD, - "USER.md": bot_data.get("user_md") or DEFAULT_USER_MD, - "TOOLS.md": bot_data.get("tools_md") or DEFAULT_TOOLS_MD, - "IDENTITY.md": bot_data.get("identity_md") or DEFAULT_IDENTITY_MD, + "AGENTS.md": bot_data.get("agents_md") or template_defaults.get("agents_md", ""), + "SOUL.md": bot_data.get("soul_md") or bot_data.get("system_prompt") or template_defaults.get("soul_md", ""), + "USER.md": bot_data.get("user_md") or template_defaults.get("user_md", ""), + "TOOLS.md": bot_data.get("tools_md") or template_defaults.get("tools_md", ""), + "IDENTITY.md": bot_data.get("identity_md") or template_defaults.get("identity_md", ""), } for filename, content in bootstrap_files.items(): diff --git a/backend/core/docker_instance.py b/backend/core/docker_instance.py new file mode 100644 index 0000000..5d817c4 --- /dev/null +++ b/backend/core/docker_instance.py @@ -0,0 +1,4 @@ +from core.docker_manager import BotDockerManager +from core.settings import BOTS_WORKSPACE_ROOT + +docker_manager = BotDockerManager(host_data_root=BOTS_WORKSPACE_ROOT) diff --git a/backend/core/docker_manager.py b/backend/core/docker_manager.py index 5e1f05b..2e15ac5 100644 --- a/backend/core/docker_manager.py +++ b/backend/core/docker_manager.py @@ -24,6 +24,8 @@ class BotDockerManager: self.base_image = base_image self.active_monitors = {} self._last_delivery_error: Dict[str, str] = {} + self._storage_limit_supported: Optional[bool] = None + self._storage_limit_warning_emitted = False @staticmethod def _normalize_resource_limits( @@ -88,6 +90,84 @@ class BotDockerManager: print(f"[DockerManager] list_images_by_repo failed: {e}") return rows + @staticmethod + def _docker_error_message(exc: Exception) -> str: + explanation = getattr(exc, "explanation", None) + if isinstance(explanation, bytes): + try: + explanation = explanation.decode("utf-8", errors="replace") + except Exception: + explanation = str(explanation) + if explanation: + return str(explanation) + response = getattr(exc, "response", None) + text = getattr(response, "text", None) + if text: + return str(text) + return str(exc) + + @classmethod + def _is_unsupported_storage_opt_error(cls, exc: Exception) -> bool: + message = cls._docker_error_message(exc).lower() + if "storage-opt" not in message and "storage opt" not in message: + return False + markers = ( + "overlay over xfs", + "overlay2 over xfs", + "pquota", + "project quota", + "storage driver does not support", + "xfs", + ) + return any(marker in message for marker in markers) + + def _cleanup_container_if_exists(self, container_name: str) -> None: + if not self.client: + return + try: + container = self.client.containers.get(container_name) + container.remove(force=True) + except docker.errors.NotFound: + pass + except Exception as e: + print(f"[DockerManager] failed to cleanup container {container_name}: {e}") + + def _run_container_with_storage_fallback( + self, + bot_id: str, + container_name: str, + storage_gb: int, + **base_kwargs: Any, + ): + if not self.client: + raise RuntimeError("Docker client is not available") + if storage_gb <= 0: + return self.client.containers.run(**base_kwargs) + if self._storage_limit_supported is False: + return self.client.containers.run(**base_kwargs) + + try: + container = self.client.containers.run( + storage_opt={"size": f"{storage_gb}G"}, + **base_kwargs, + ) + self._storage_limit_supported = True + return container + except Exception as exc: + if not self._is_unsupported_storage_opt_error(exc): + raise + self._storage_limit_supported = False + if not self._storage_limit_warning_emitted: + print( + "[DockerManager] storage limit not supported by current Docker storage driver; " + f"falling back to unlimited container filesystem size. Details: {self._docker_error_message(exc)}" + ) + self._storage_limit_warning_emitted = True + else: + print(f"[DockerManager] storage limit skipped for {bot_id}: unsupported by current Docker storage driver") + self._cleanup_container_if_exists(container_name) + return self.client.containers.run(**base_kwargs) + def start_bot( self, bot_id: str, @@ -141,18 +221,12 @@ class BotDockerManager: pass container = None - if storage > 0: - try: - container = self.client.containers.run( - storage_opt={"size": f"{storage}G"}, - **base_kwargs, - ) - except Exception as e: - # Some Docker engines (e.g. Desktop/overlay2) may not support size storage option. - print(f"[DockerManager] storage limit not applied for {bot_id}: {e}") - container = self.client.containers.run(**base_kwargs) - else: - container = self.client.containers.run(**base_kwargs) + container = self._run_container_with_storage_fallback( + bot_id, + container_name, + storage, + **base_kwargs, + ) if on_state_change: monitor_thread = threading.Thread( @@ -538,19 +612,60 @@ class BotDockerManager: self._last_delivery_error[bot_id] = reason return False - def get_recent_logs(self, bot_id: str, tail: int = 300) -> List[str]: + def _read_log_lines(self, bot_id: str, tail: Optional[int] = None) -> List[str]: if not self.client: return [] container_name = f"worker_{bot_id}" try: container = self.client.containers.get(container_name) - raw = container.logs(tail=max(1, int(tail))) + raw = container.logs(tail=max(1, int(tail))) if tail is not None else container.logs() text = raw.decode("utf-8", errors="ignore") return [line for line in text.splitlines() if line.strip()] except Exception as e: print(f"[DockerManager] Error reading logs for {bot_id}: {e}") return [] + def get_recent_logs(self, bot_id: str, tail: int = 300) -> List[str]: + return self._read_log_lines(bot_id, tail=max(1, int(tail))) + + def get_logs_page( + self, + bot_id: str, + offset: int = 0, + limit: int = 50, + reverse: bool = True, + ) -> Dict[str, Any]: + safe_offset = max(0, int(offset)) + safe_limit = max(1, int(limit)) + if reverse: + # Docker logs API supports tail but not arbitrary offsets. For reverse pagination + # we only read the minimal newest slice needed for the requested page. + tail_count = safe_offset + safe_limit + 1 + lines = self._read_log_lines(bot_id, tail=tail_count) + ordered = list(reversed(lines)) + page = ordered[safe_offset:safe_offset + safe_limit] + has_more = len(lines) > safe_offset + safe_limit + return { + "logs": page, + "total": None, + "offset": safe_offset, + "limit": safe_limit, + "has_more": has_more, + "reverse": reverse, + } + + lines = self._read_log_lines(bot_id, tail=None) + total = len(lines) + page = lines[safe_offset:safe_offset + safe_limit] + return { + "logs": page, + "total": total, + "offset": safe_offset, + "limit": safe_limit, + "has_more": safe_offset + safe_limit < total, + "reverse": reverse, + } + def _monitor_container_logs(self, bot_id: str, container, callback: Callable[[str, dict], None]): try: buffer = "" diff --git a/backend/core/settings.py b/backend/core/settings.py index 33c1295..8ba1eba 100644 --- a/backend/core/settings.py +++ b/backend/core/settings.py @@ -1,4 +1,3 @@ -import json import os import re from pathlib import Path @@ -30,13 +29,6 @@ for _k, _v in _prod_env_values.items(): os.environ[_k] = str(_v) -def _env_text(name: str, default: str) -> str: - raw = os.getenv(name) - if raw is None: - return default - return str(raw).replace("\\n", "\n") - - def _env_bool(name: str, default: bool) -> bool: raw = os.getenv(name) if raw is None: @@ -95,23 +87,6 @@ def _normalize_dir_path(path_value: str) -> str: return str((BACKEND_ROOT / p).resolve()) -def _load_json_object(path: Path) -> dict[str, object]: - try: - with open(path, "r", encoding="utf-8") as f: - data = json.load(f) - if isinstance(data, dict): - return data - except Exception: - pass - return {} - - -def _read_template_md(raw: object) -> str: - if raw is None: - return "" - return str(raw).replace("\r\n", "\n").strip() - - DATA_ROOT: Final[str] = _normalize_dir_path(os.getenv("DATA_ROOT", str(PROJECT_ROOT / "data"))) BOTS_WORKSPACE_ROOT: Final[str] = _normalize_dir_path( os.getenv("BOTS_WORKSPACE_ROOT", str(PROJECT_ROOT / "workspace" / "bots")) @@ -226,47 +201,3 @@ PANEL_ACCESS_PASSWORD: Final[str] = str(os.getenv("PANEL_ACCESS_PASSWORD") or "" TEMPLATE_ROOT: Final[Path] = (BACKEND_ROOT / "templates").resolve() AGENT_MD_TEMPLATES_FILE: Final[Path] = TEMPLATE_ROOT / "agent_md_templates.json" TOPIC_PRESETS_TEMPLATES_FILE: Final[Path] = TEMPLATE_ROOT / "topic_presets.json" - -_agent_md_templates_raw = _load_json_object(AGENT_MD_TEMPLATES_FILE) -DEFAULT_AGENTS_MD: Final[str] = _env_text( - "DEFAULT_AGENTS_MD", - _read_template_md(_agent_md_templates_raw.get("agents_md")), -).strip() -DEFAULT_SOUL_MD: Final[str] = _env_text( - "DEFAULT_SOUL_MD", - _read_template_md(_agent_md_templates_raw.get("soul_md")), -).strip() -DEFAULT_USER_MD: Final[str] = _env_text( - "DEFAULT_USER_MD", - _read_template_md(_agent_md_templates_raw.get("user_md")), -).strip() -DEFAULT_TOOLS_MD: Final[str] = _env_text( - "DEFAULT_TOOLS_MD", - _read_template_md(_agent_md_templates_raw.get("tools_md")), -).strip() -DEFAULT_IDENTITY_MD: Final[str] = _env_text( - "DEFAULT_IDENTITY_MD", - _read_template_md(_agent_md_templates_raw.get("identity_md")), -).strip() - -_topic_presets_raw = _load_json_object(TOPIC_PRESETS_TEMPLATES_FILE) -_topic_presets_list = _topic_presets_raw.get("presets") -TOPIC_PRESET_TEMPLATES: Final[list[dict[str, object]]] = [ - dict(row) for row in (_topic_presets_list if isinstance(_topic_presets_list, list) else []) if isinstance(row, dict) -] - - -def load_agent_md_templates() -> dict[str, str]: - raw = _load_json_object(AGENT_MD_TEMPLATES_FILE) - rows: dict[str, str] = {} - for key in ("agents_md", "soul_md", "user_md", "tools_md", "identity_md"): - rows[key] = _read_template_md(raw.get(key)) - return rows - - -def load_topic_presets_template() -> dict[str, object]: - raw = _load_json_object(TOPIC_PRESETS_TEMPLATES_FILE) - presets = raw.get("presets") - if not isinstance(presets, list): - return {"presets": []} - return {"presets": [dict(row) for row in presets if isinstance(row, dict)]} diff --git a/backend/core/utils.py b/backend/core/utils.py new file mode 100644 index 0000000..31ea88e --- /dev/null +++ b/backend/core/utils.py @@ -0,0 +1,160 @@ +import os +import re +import json +from datetime import datetime, timezone, timedelta +from typing import Any, Dict, List, Optional +from zoneinfo import ZoneInfo +from fastapi import HTTPException +from core.settings import DEFAULT_BOT_SYSTEM_TIMEZONE + +_ENV_KEY_RE = re.compile(r"^[A-Z_][A-Z0-9_]{0,127}$") + +__all__ = [ + "_calc_dir_size_bytes", + "_get_default_system_timezone", + "_is_ignored_skill_zip_top_level", + "_is_image_attachment_path", + "_is_valid_top_level_skill_name", + "_is_video_attachment_path", + "_is_visual_attachment_path", + "_normalize_env_params", + "_normalize_system_timezone", + "_parse_env_params", + "_parse_json_string_list", + "_read_description_from_text", + "_resolve_local_day_range", + "_safe_float", + "_safe_int", + "_sanitize_skill_market_key", + "_sanitize_zip_filename", + "_workspace_stat_ctime_iso", +] + +def _resolve_local_day_range(date_text: str, tz_offset_minutes: Optional[int]) -> tuple[datetime, datetime]: + try: + local_day = datetime.strptime(str(date_text or "").strip(), "%Y-%m-%d") + except ValueError as exc: + raise HTTPException(status_code=400, detail="Invalid date, expected YYYY-MM-DD") from exc + + offset = timedelta(minutes=tz_offset_minutes if tz_offset_minutes is not None else 0) + utc_start = (local_day).replace(tzinfo=timezone.utc) + offset + utc_end = utc_start + timedelta(days=1) + return utc_start, utc_end + +def _sanitize_zip_filename(name: str) -> str: + s = str(name or "").strip() + s = re.sub(r"[^a-zA-Z0-9._-]", "_", s) + return s if s else "upload.zip" + +def _normalize_env_params(raw: Any) -> Dict[str, str]: + if not isinstance(raw, dict): + return {} + res: Dict[str, str] = {} + for k, v in raw.items(): + ks = str(k).strip() + if _ENV_KEY_RE.match(ks): + res[ks] = str(v or "").strip() + return res + +def _get_default_system_timezone() -> str: + return str(DEFAULT_BOT_SYSTEM_TIMEZONE or "Asia/Shanghai").strip() + +def _normalize_system_timezone(raw: Any) -> str: + s = str(raw or "").strip() + if not s: + return _get_default_system_timezone() + try: + ZoneInfo(s) + return s + except Exception: + return _get_default_system_timezone() + +def _safe_float(raw: Any, default: float) -> float: + try: + return float(raw) + except (ValueError, TypeError): + return default + +def _safe_int(raw: Any, default: int) -> int: + try: + return int(raw) + except (ValueError, TypeError): + return default + +def _parse_env_params(raw: Any) -> Dict[str, str]: + if isinstance(raw, dict): + return _normalize_env_params(raw) + if isinstance(raw, str): + try: + parsed = json.loads(raw) + return _normalize_env_params(parsed) + except Exception: + pass + return {} + +def _is_valid_top_level_skill_name(name: str) -> bool: + return bool(re.match(r"^[a-zA-Z0-9_-]+$", name)) + +def _parse_json_string_list(raw: Any) -> List[str]: + if not raw: + return [] + if isinstance(raw, list): + return [str(v) for v in raw] + if isinstance(raw, str): + try: + parsed = json.loads(raw) + if isinstance(parsed, list): + return [str(v) for v in parsed] + except Exception: + pass + return [] + +def _is_ignored_skill_zip_top_level(name: str) -> bool: + return name.startswith(".") or name.startswith("__") or name in {"venv", "node_modules"} + +def _read_description_from_text(text: str) -> str: + if not text: + return "" + lines = text.strip().split("\n") + for line in lines: + s = line.strip() + if s and not s.startswith("#"): + return s[:200] + return "" + +def _sanitize_skill_market_key(key: str) -> str: + s = str(key or "").strip().lower() + s = re.sub(r"[^a-z0-9_-]", "_", s) + return s + +def _calc_dir_size_bytes(path: str) -> int: + total = 0 + try: + for root, dirs, files in os.walk(path): + for f in files: + fp = os.path.join(root, f) + if not os.path.islink(fp): + total += os.path.getsize(fp) + except Exception: + pass + return total + +def _is_image_attachment_path(path: str) -> bool: + ext = (os.path.splitext(path)[1] or "").lower() + return ext in {".png", ".jpg", ".jpeg", ".gif", ".webp", ".svg", ".bmp"} + +def _is_video_attachment_path(path: str) -> bool: + ext = (os.path.splitext(path)[1] or "").lower() + return ext in {".mp4", ".mov", ".avi", ".mkv", ".webm"} + +def _is_visual_attachment_path(path: str) -> bool: + return _is_image_attachment_path(path) or _is_video_attachment_path(path) + +def _workspace_stat_ctime_iso(stat: os.stat_result) -> str: + ts = getattr(stat, "st_birthtime", None) + if ts is None: + ts = getattr(stat, "st_ctime", None) + try: + return datetime.fromtimestamp(float(ts), tz=timezone.utc).isoformat().replace("+00:00", "Z") + except Exception: + return datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc).isoformat().replace("+00:00", "Z") diff --git a/backend/core/websocket_manager.py b/backend/core/websocket_manager.py new file mode 100644 index 0000000..40ec2a9 --- /dev/null +++ b/backend/core/websocket_manager.py @@ -0,0 +1,27 @@ +from typing import Any, Dict, List +from fastapi import WebSocket + +class WSConnectionManager: + def __init__(self): + self.connections: Dict[str, List[WebSocket]] = {} + + async def connect(self, bot_id: str, websocket: WebSocket): + await websocket.accept() + self.connections.setdefault(bot_id, []).append(websocket) + + def disconnect(self, bot_id: str, websocket: WebSocket): + conns = self.connections.get(bot_id, []) + if websocket in conns: + conns.remove(websocket) + if not conns and bot_id in self.connections: + del self.connections[bot_id] + + async def broadcast(self, bot_id: str, data: Dict[str, Any]): + conns = list(self.connections.get(bot_id, [])) + for ws in conns: + try: + await ws.send_json(data) + except Exception: + self.disconnect(bot_id, ws) + +manager = WSConnectionManager() diff --git a/backend/main.py b/backend/main.py index 4c47a02..be8b6d6 100644 --- a/backend/main.py +++ b/backend/main.py @@ -1,4410 +1,8 @@ -import asyncio -import json -import logging -import mimetypes -import os -import re -import shutil -import tempfile -import time -import zipfile -from datetime import datetime, timedelta, timezone -from typing import Any, Dict, List, Optional -from urllib.parse import quote, unquote -from zoneinfo import ZoneInfo - -import httpx -from pydantic import BaseModel -from fastapi import Depends, FastAPI, File, Form, HTTPException, Request, UploadFile, WebSocket, WebSocketDisconnect -from fastapi.responses import FileResponse, JSONResponse, RedirectResponse, StreamingResponse -from fastapi.middleware.cors import CORSMiddleware -from sqlmodel import Session, select - -from core.config_manager import BotConfigManager -from core.cache import cache -from core.database import engine, get_session, init_database -from core.docker_manager import BotDockerManager -from core.speech_service import ( - SpeechDisabledError, - SpeechDurationError, - SpeechServiceError, - WhisperSpeechService, -) -from core.settings import ( - BOTS_WORKSPACE_ROOT, - DATA_ROOT, - DATABASE_ECHO, - DATABASE_ENGINE, - DATABASE_URL_DISPLAY, - AGENT_MD_TEMPLATES_FILE, - DEFAULT_AGENTS_MD, - DEFAULT_BOT_SYSTEM_TIMEZONE, - DEFAULT_IDENTITY_MD, - DEFAULT_SOUL_MD, - DEFAULT_TOOLS_MD, - DEFAULT_USER_MD, - PANEL_ACCESS_PASSWORD, - PROJECT_ROOT, - REDIS_ENABLED, - REDIS_PREFIX, - REDIS_URL, - TOPIC_PRESET_TEMPLATES, - TOPIC_PRESETS_TEMPLATES_FILE, - load_agent_md_templates, - load_topic_presets_template, -) -from models.bot import BotInstance, BotMessage, NanobotImage -from models.platform import BotActivityEvent, BotRequestUsage -from models.skill import BotSkillInstall, SkillMarketItem -from models.topic import TopicItem, TopicTopic -from api.platform_router import router as platform_router -from api.topic_router import router as topic_router -from services.topic_runtime import publish_runtime_topic_packet -from services.platform_service import ( - bind_usage_message, - create_usage_request, - fail_latest_usage, - finalize_usage_from_packet, - get_chat_pull_page_size, - get_platform_settings_snapshot, - get_speech_runtime_settings, - prune_expired_activity_events, - record_activity_event, -) - -app = FastAPI(title="Dashboard Nanobot API") -logger = logging.getLogger("dashboard.backend") - -app.add_middleware( - CORSMiddleware, - allow_origins=["*"], - allow_methods=["*"], - allow_headers=["*"], -) -app.include_router(topic_router) -app.include_router(platform_router) - -os.makedirs(BOTS_WORKSPACE_ROOT, exist_ok=True) -os.makedirs(DATA_ROOT, exist_ok=True) - -docker_manager = BotDockerManager(host_data_root=BOTS_WORKSPACE_ROOT) -config_manager = BotConfigManager(host_data_root=BOTS_WORKSPACE_ROOT) -speech_service = WhisperSpeechService() -app.state.docker_manager = docker_manager -app.state.speech_service = speech_service -BOT_ID_PATTERN = re.compile(r"^[A-Za-z0-9_]+$") -BOT_ACCESS_PASSWORD_HEADER = "X-Bot-Access-Password" - - -class ChannelConfigRequest(BaseModel): - channel_type: str - external_app_id: Optional[str] = None - app_secret: Optional[str] = None - internal_port: Optional[int] = None - is_active: bool = True - extra_config: Optional[Dict[str, Any]] = None - - -class ChannelConfigUpdateRequest(BaseModel): - channel_type: Optional[str] = None - external_app_id: Optional[str] = None - app_secret: Optional[str] = None - internal_port: Optional[int] = None - is_active: Optional[bool] = None - extra_config: Optional[Dict[str, Any]] = None - - -class BotCreateRequest(BaseModel): - id: str - name: str - enabled: Optional[bool] = True - access_password: Optional[str] = None - llm_provider: str - llm_model: str - api_key: str - image_tag: str - system_prompt: Optional[str] = None - api_base: Optional[str] = None - temperature: float = 0.2 - top_p: float = 1.0 - max_tokens: int = 8192 - cpu_cores: float = 1.0 - memory_mb: int = 1024 - storage_gb: int = 10 - system_timezone: Optional[str] = None - soul_md: Optional[str] = None - agents_md: Optional[str] = None - user_md: Optional[str] = None - tools_md: Optional[str] = None - tools_config: Optional[Dict[str, Any]] = None - env_params: Optional[Dict[str, str]] = None - identity_md: Optional[str] = None - channels: Optional[List[ChannelConfigRequest]] = None - send_progress: Optional[bool] = None - send_tool_hints: Optional[bool] = None - - -class BotUpdateRequest(BaseModel): - name: Optional[str] = None - enabled: Optional[bool] = None - access_password: Optional[str] = None - llm_provider: Optional[str] = None - llm_model: Optional[str] = None - api_key: Optional[str] = None - api_base: Optional[str] = None - image_tag: Optional[str] = None - system_prompt: Optional[str] = None - temperature: Optional[float] = None - top_p: Optional[float] = None - max_tokens: Optional[int] = None - cpu_cores: Optional[float] = None - memory_mb: Optional[int] = None - storage_gb: Optional[int] = None - system_timezone: Optional[str] = None - soul_md: Optional[str] = None - agents_md: Optional[str] = None - user_md: Optional[str] = None - tools_md: Optional[str] = None - tools_config: Optional[Dict[str, Any]] = None - env_params: Optional[Dict[str, str]] = None - identity_md: Optional[str] = None - send_progress: Optional[bool] = None - send_tool_hints: Optional[bool] = None - - -class BotToolsConfigUpdateRequest(BaseModel): - tools_config: Optional[Dict[str, Any]] = None - - -class BotMcpConfigUpdateRequest(BaseModel): - mcp_servers: Optional[Dict[str, Any]] = None - - -class BotEnvParamsUpdateRequest(BaseModel): - env_params: Optional[Dict[str, str]] = None - - -class BotPageAuthLoginRequest(BaseModel): - password: str - - -class CommandRequest(BaseModel): - command: Optional[str] = None - attachments: Optional[List[str]] = None - - -class MessageFeedbackRequest(BaseModel): - feedback: Optional[str] = None # up | down | null - - -class WorkspaceFileUpdateRequest(BaseModel): - content: str - - -class PanelLoginRequest(BaseModel): - password: Optional[str] = None - - -class SystemTemplatesUpdateRequest(BaseModel): - agent_md_templates: Optional[Dict[str, str]] = None - topic_presets: Optional[Dict[str, Any]] = None - - -def _normalize_packet_channel(packet: Dict[str, Any]) -> str: - raw = str(packet.get("channel") or packet.get("source") or "").strip().lower() - if raw in {"dashboard", "dashboard_channel", "dashboard-channel"}: - return "dashboard" - return raw - - -def _normalize_media_item(bot_id: str, value: Any) -> str: - raw = str(value or "").strip().replace("\\", "/") - if not raw: - return "" - if raw.startswith("/root/.nanobot/workspace/"): - return raw[len("/root/.nanobot/workspace/") :].lstrip("/") - root = _workspace_root(bot_id) - if os.path.isabs(raw): - try: - if os.path.commonpath([root, raw]) == root: - return os.path.relpath(raw, root).replace("\\", "/") - except Exception: - pass - return raw.lstrip("/") - - -def _normalize_media_list(raw: Any, bot_id: str) -> List[str]: - if not isinstance(raw, list): - return [] - rows: List[str] = [] - for v in raw: - s = _normalize_media_item(bot_id, v) - if s: - rows.append(s) - return rows - - -def _persist_runtime_packet(bot_id: str, packet: Dict[str, Any]) -> Optional[int]: - packet_type = str(packet.get("type", "")).upper() - if packet_type not in {"AGENT_STATE", "ASSISTANT_MESSAGE", "USER_COMMAND", "BUS_EVENT"}: - return None - source_channel = _normalize_packet_channel(packet) - if source_channel != "dashboard": - return None - persisted_message_id: Optional[int] = None - with Session(engine) as session: - bot = session.get(BotInstance, bot_id) - if not bot: - return None - if packet_type == "AGENT_STATE": - payload = packet.get("payload") or {} - state = str(payload.get("state") or "").strip() - action = str(payload.get("action_msg") or payload.get("msg") or "").strip() - if state: - bot.current_state = state - if action: - bot.last_action = action[:4000] - elif packet_type == "ASSISTANT_MESSAGE": - bot.current_state = "IDLE" - text_msg = str(packet.get("text") or "").strip() - media_list = _normalize_media_list(packet.get("media"), bot_id) - if text_msg or media_list: - if text_msg: - bot.last_action = " ".join(text_msg.split())[:4000] - message_row = BotMessage( - bot_id=bot_id, - role="assistant", - text=text_msg, - media_json=json.dumps(media_list, ensure_ascii=False) if media_list else None, - ) - session.add(message_row) - session.flush() - persisted_message_id = message_row.id - usage_row = finalize_usage_from_packet( - session, - bot_id, - { - **packet, - "message_id": persisted_message_id, - }, - ) - elif packet_type == "USER_COMMAND": - text_msg = str(packet.get("text") or "").strip() - media_list = _normalize_media_list(packet.get("media"), bot_id) - if text_msg or media_list: - message_row = BotMessage( - bot_id=bot_id, - role="user", - text=text_msg, - media_json=json.dumps(media_list, ensure_ascii=False) if media_list else None, - ) - session.add(message_row) - session.flush() - persisted_message_id = message_row.id - bind_usage_message( - session, - bot_id, - str(packet.get("request_id") or "").strip(), - persisted_message_id, - ) - elif packet_type == "BUS_EVENT": - # Dashboard channel emits BUS_EVENT for both progress and final replies. - # Persist only non-progress events to keep durable chat history clean. - is_progress = bool(packet.get("is_progress")) - detail_text = str(packet.get("content") or packet.get("text") or "").strip() - if not is_progress: - text_msg = detail_text - media_list = _normalize_media_list(packet.get("media"), bot_id) - if text_msg or media_list: - bot.current_state = "IDLE" - if text_msg: - bot.last_action = " ".join(text_msg.split())[:4000] - message_row = BotMessage( - bot_id=bot_id, - role="assistant", - text=text_msg, - media_json=json.dumps(media_list, ensure_ascii=False) if media_list else None, - ) - session.add(message_row) - session.flush() - persisted_message_id = message_row.id - usage_row = finalize_usage_from_packet( - session, - bot_id, - { - "text": text_msg, - "usage": packet.get("usage"), - "request_id": packet.get("request_id"), - "provider": packet.get("provider"), - "model": packet.get("model"), - "message_id": persisted_message_id, - }, - ) - - bot.updated_at = datetime.utcnow() - session.add(bot) - session.commit() - - publish_runtime_topic_packet( - engine, - bot_id, - packet, - source_channel, - persisted_message_id, - logger, - ) - - if persisted_message_id: - packet["message_id"] = persisted_message_id - if packet_type in {"ASSISTANT_MESSAGE", "USER_COMMAND", "BUS_EVENT"}: - _invalidate_bot_messages_cache(bot_id) - _invalidate_bot_detail_cache(bot_id) - return persisted_message_id - - -class WSConnectionManager: - def __init__(self): - self.connections: Dict[str, List[WebSocket]] = {} - - async def connect(self, bot_id: str, websocket: WebSocket): - await websocket.accept() - self.connections.setdefault(bot_id, []).append(websocket) - - def disconnect(self, bot_id: str, websocket: WebSocket): - conns = self.connections.get(bot_id, []) - if websocket in conns: - conns.remove(websocket) - if not conns and bot_id in self.connections: - del self.connections[bot_id] - - async def broadcast(self, bot_id: str, data: Dict[str, Any]): - conns = list(self.connections.get(bot_id, [])) - for ws in conns: - try: - await ws.send_json(data) - except Exception: - self.disconnect(bot_id, ws) - - -manager = WSConnectionManager() - -PANEL_ACCESS_PASSWORD_HEADER = "x-panel-password" - - -def _extract_bot_id_from_api_path(path: str) -> Optional[str]: - raw = str(path or "").strip() - if not raw.startswith("/api/bots/"): - return None - rest = raw[len("/api/bots/") :] - if not rest: - return None - bot_id_segment = rest.split("/", 1)[0].strip() - if not bot_id_segment: - return None - try: - decoded = unquote(bot_id_segment) - except Exception: - decoded = bot_id_segment - return str(decoded).strip() or None - - -def _get_supplied_panel_password_http(request: Request) -> str: - header_value = str(request.headers.get(PANEL_ACCESS_PASSWORD_HEADER) or "").strip() - if header_value: - return header_value - query_value = str(request.query_params.get("panel_access_password") or "").strip() - return query_value - - -def _get_supplied_bot_access_password_http(request: Request) -> str: - header_value = str(request.headers.get(BOT_ACCESS_PASSWORD_HEADER) or "").strip() - if header_value: - return header_value - query_value = str(request.query_params.get("bot_access_password") or "").strip() - return query_value - - -def _validate_panel_access_password(supplied: str) -> Optional[str]: - configured = str(PANEL_ACCESS_PASSWORD or "").strip() - if not configured: - return None - candidate = str(supplied or "").strip() - if not candidate: - return "Panel access password required" - if candidate != configured: - return "Invalid panel access password" - return None - - -def _validate_bot_access_password(bot: BotInstance, supplied: str) -> Optional[str]: - configured = str(getattr(bot, "access_password", "") or "").strip() - if not configured: - return None - candidate = str(supplied or "").strip() - if not candidate: - return "Bot access password required" - if candidate != configured: - return "Invalid bot access password" - return None - - -def _is_panel_protected_api_path(path: str, method: str = "GET") -> bool: - raw = str(path or "").strip() - verb = str(method or "GET").strip().upper() - if not raw.startswith("/api/"): - return False - if raw in { - "/api/panel/auth/status", - "/api/panel/auth/login", - "/api/health", - "/api/health/cache", - }: - return False - if _is_bot_panel_management_api_path(raw, verb): - return True - # Other bot-scoped APIs are not protected by panel password. - if _extract_bot_id_from_api_path(raw): - return False - return True - - -def _is_bot_panel_management_api_path(path: str, method: str = "GET") -> bool: - raw = str(path or "").strip() - verb = str(method or "GET").strip().upper() - if not raw.startswith("/api/bots/"): - return False - bot_id = _extract_bot_id_from_api_path(raw) - if not bot_id: - return False - return ( - raw.endswith("/start") - or raw.endswith("/stop") - or raw.endswith("/enable") - or raw.endswith("/disable") - or raw.endswith("/deactivate") - or (verb in {"PUT", "DELETE"} and raw == f"/api/bots/{bot_id}") - ) - - -def _is_bot_enable_api_path(path: str, method: str = "GET") -> bool: - raw = str(path or "").strip() - verb = str(method or "GET").strip().upper() - if verb != "POST": - return False - bot_id = _extract_bot_id_from_api_path(raw) - if not bot_id: - return False - return raw == f"/api/bots/{bot_id}/enable" - - -@app.middleware("http") -async def bot_access_password_guard(request: Request, call_next): - if request.method.upper() == "OPTIONS": - return await call_next(request) - - bot_id = _extract_bot_id_from_api_path(request.url.path) - if not bot_id: - if _is_panel_protected_api_path(request.url.path, request.method): - panel_error = _validate_panel_access_password(_get_supplied_panel_password_http(request)) - if panel_error: - return JSONResponse(status_code=401, content={"detail": panel_error}) - return await call_next(request) - - with Session(engine) as session: - bot = session.get(BotInstance, bot_id) - if not bot: - return JSONResponse(status_code=404, content={"detail": "Bot not found"}) - - if _is_bot_panel_management_api_path(request.url.path, request.method): - panel_error = _validate_panel_access_password(_get_supplied_panel_password_http(request)) - if panel_error: - bot_error = _validate_bot_access_password(bot, _get_supplied_bot_access_password_http(request)) - if bot_error: - return JSONResponse(status_code=401, content={"detail": bot_error}) - - enabled = bool(getattr(bot, "enabled", True)) - if not enabled: - is_enable_api = _is_bot_enable_api_path(request.url.path, request.method) - is_read_api = request.method.upper() == "GET" - is_auth_login = request.method.upper() == "POST" and request.url.path == f"/api/bots/{bot_id}/auth/login" - if not (is_enable_api or is_read_api or is_auth_login): - return JSONResponse(status_code=403, content={"detail": "Bot is disabled. Enable it first."}) - return await call_next(request) - - -@app.get("/api/panel/auth/status") -def get_panel_auth_status(): - configured = str(PANEL_ACCESS_PASSWORD or "").strip() - return {"enabled": bool(configured)} - - -@app.post("/api/panel/auth/login") -def panel_login(payload: PanelLoginRequest): - configured = str(PANEL_ACCESS_PASSWORD or "").strip() - if not configured: - return {"success": True, "enabled": False} - supplied = str(payload.password or "").strip() - if supplied != configured: - raise HTTPException(status_code=401, detail="Invalid panel access password") - return {"success": True, "enabled": True} - - -def docker_callback(bot_id: str, packet: Dict[str, Any]): - _persist_runtime_packet(bot_id, packet) - loop = getattr(app.state, "main_loop", None) - if not loop or not loop.is_running(): - return - asyncio.run_coroutine_threadsafe(manager.broadcast(bot_id, packet), loop) - - -def _cache_key_bots_list() -> str: - return "bots:list" - - -def _cache_key_bot_detail(bot_id: str) -> str: - return f"bot:detail:{bot_id}" - - -def _cache_key_bot_messages(bot_id: str, limit: int) -> str: - return f"bot:messages:v2:{bot_id}:limit:{limit}" - - -def _cache_key_bot_messages_page(bot_id: str, limit: int, before_id: Optional[int]) -> str: - cursor = str(int(before_id)) if isinstance(before_id, int) and before_id > 0 else "latest" - return f"bot:messages:page:v2:{bot_id}:before:{cursor}:limit:{limit}" - - -def _serialize_bot_message_row(bot_id: str, row: BotMessage) -> Dict[str, Any]: - created_at = row.created_at - if created_at.tzinfo is None: - created_at = created_at.replace(tzinfo=timezone.utc) - return { - "id": row.id, - "bot_id": row.bot_id, - "role": row.role, - "text": row.text, - "media": _parse_message_media(bot_id, getattr(row, "media_json", None)), - "feedback": str(getattr(row, "feedback", "") or "").strip() or None, - "ts": int(created_at.timestamp() * 1000), - } - - -def _resolve_local_day_range(date_text: str, tz_offset_minutes: Optional[int]) -> tuple[datetime, datetime]: - try: - local_day = datetime.strptime(str(date_text or "").strip(), "%Y-%m-%d") - except ValueError as exc: - raise HTTPException(status_code=400, detail="Invalid date, expected YYYY-MM-DD") from exc - - offset_minutes = 0 - if tz_offset_minutes is not None: - try: - offset_minutes = int(tz_offset_minutes) - except (TypeError, ValueError) as exc: - raise HTTPException(status_code=400, detail="Invalid timezone offset") from exc - - utc_start = local_day + timedelta(minutes=offset_minutes) - utc_end = utc_start + timedelta(days=1) - return utc_start, utc_end - - -def _cache_key_images() -> str: - return "images:list" - - -def _invalidate_bot_detail_cache(bot_id: str) -> None: - cache.delete(_cache_key_bots_list(), _cache_key_bot_detail(bot_id)) - - -def _invalidate_bot_messages_cache(bot_id: str) -> None: - cache.delete_prefix(f"bot:messages:{bot_id}:") - - -def _invalidate_images_cache() -> None: - cache.delete(_cache_key_images()) - - -@app.on_event("startup") -async def on_startup(): - app.state.main_loop = asyncio.get_running_loop() - print(f"📁 项目根目录: {PROJECT_ROOT}") - print(f"🗄️ 数据库引擎: {DATABASE_ENGINE} (echo={DATABASE_ECHO})") - print(f"📁 数据库连接: {DATABASE_URL_DISPLAY}") - print(f"🧠 Redis 缓存: {'enabled' if cache.ping() else 'disabled'} ({REDIS_URL if REDIS_ENABLED else 'not configured'})") - print(f"🔐 面板访问密码: {'enabled' if str(PANEL_ACCESS_PASSWORD or '').strip() else 'disabled'}") - init_database() - cache.delete_prefix("") - with Session(engine) as session: - pruned_events = prune_expired_activity_events(session, force=True) - if pruned_events > 0: - session.commit() - for bot in session.exec(select(BotInstance)).all(): - _migrate_bot_resources_store(bot.id) - running_bots = session.exec(select(BotInstance).where(BotInstance.docker_status == "RUNNING")).all() - for bot in running_bots: - docker_manager.ensure_monitor(bot.id, docker_callback) - - -def _provider_defaults(provider: str) -> tuple[str, str]: - p = provider.lower().strip() - if p in {"openrouter"}: - return "openrouter", "https://openrouter.ai/api/v1" - if p in {"dashscope", "aliyun", "qwen", "aliyun-qwen"}: - return "dashscope", "https://dashscope.aliyuncs.com/compatible-mode/v1" - if p in {"xunfei", "iflytek", "xfyun"}: - return "openai", "https://spark-api-open.xf-yun.com/v1" - if p in {"vllm"}: - return "openai", "" - if p in {"kimi", "moonshot"}: - return "kimi", "https://api.moonshot.cn/v1" - if p in {"minimax"}: - return "minimax", "https://api.minimax.chat/v1" - return p, "" - - -@app.get("/api/system/defaults") -def get_system_defaults(): - md_templates = load_agent_md_templates() - topic_presets = load_topic_presets_template() - platform_settings = get_platform_settings_snapshot() - speech_settings = get_speech_runtime_settings() - return { - "templates": { - "soul_md": md_templates.get("soul_md") or DEFAULT_SOUL_MD, - "agents_md": md_templates.get("agents_md") or DEFAULT_AGENTS_MD, - "user_md": md_templates.get("user_md") or DEFAULT_USER_MD, - "tools_md": md_templates.get("tools_md") or DEFAULT_TOOLS_MD, - "identity_md": md_templates.get("identity_md") or DEFAULT_IDENTITY_MD, - }, - "limits": { - "upload_max_mb": platform_settings.upload_max_mb, - }, - "workspace": { - "download_extensions": list(platform_settings.workspace_download_extensions), - "allowed_attachment_extensions": list(platform_settings.allowed_attachment_extensions), - }, - "bot": { - "system_timezone": _get_default_system_timezone(), - }, - "loading_page": platform_settings.loading_page.model_dump(), - "chat": { - "pull_page_size": platform_settings.chat_pull_page_size, - "page_size": platform_settings.page_size, - "command_auto_unlock_seconds": platform_settings.command_auto_unlock_seconds, - }, - "topic_presets": topic_presets.get("presets") or TOPIC_PRESET_TEMPLATES, - "speech": { - "enabled": speech_settings["enabled"], - "model": speech_settings["model"], - "device": speech_settings["device"], - "max_audio_seconds": speech_settings["max_audio_seconds"], - "default_language": speech_settings["default_language"], - }, - } - - -def _write_json_atomic(path: str, payload: Dict[str, Any]) -> None: - os.makedirs(os.path.dirname(path), exist_ok=True) - tmp = f"{path}.tmp" - with open(tmp, "w", encoding="utf-8") as f: - json.dump(payload, f, ensure_ascii=False, indent=2) - os.replace(tmp, path) - - -def _write_text_atomic(path: str, content: str) -> None: - os.makedirs(os.path.dirname(path), exist_ok=True) - tmp = f"{path}.tmp" - with open(tmp, "w", encoding="utf-8", newline="") as f: - f.write(str(content or "")) - os.replace(tmp, path) - - -@app.get("/api/system/templates") -def get_system_templates(): - return { - "agent_md_templates": load_agent_md_templates(), - "topic_presets": load_topic_presets_template(), - } - - -@app.put("/api/system/templates") -def update_system_templates(payload: SystemTemplatesUpdateRequest): - if payload.agent_md_templates is not None: - sanitized_agent: Dict[str, str] = {} - for key in ("agents_md", "soul_md", "user_md", "tools_md", "identity_md"): - sanitized_agent[key] = str(payload.agent_md_templates.get(key, "") or "").replace("\r\n", "\n") - _write_json_atomic(str(AGENT_MD_TEMPLATES_FILE), sanitized_agent) - - if payload.topic_presets is not None: - presets = payload.topic_presets.get("presets") if isinstance(payload.topic_presets, dict) else None - if presets is None: - normalized_topic: Dict[str, Any] = {"presets": []} - elif isinstance(presets, list): - normalized_topic = {"presets": [dict(row) for row in presets if isinstance(row, dict)]} - else: - raise HTTPException(status_code=400, detail="topic_presets.presets must be an array") - _write_json_atomic(str(TOPIC_PRESETS_TEMPLATES_FILE), normalized_topic) - - return { - "status": "ok", - "agent_md_templates": load_agent_md_templates(), - "topic_presets": load_topic_presets_template(), - } - - -@app.get("/api/health") -def get_health(): - try: - with Session(engine) as session: - session.exec(select(BotInstance).limit(1)).first() - return {"status": "ok", "database": DATABASE_ENGINE} - except Exception as e: - raise HTTPException(status_code=503, detail=f"database check failed: {e}") - - -@app.get("/api/health/cache") -def get_cache_health(): - redis_url = str(REDIS_URL or "").strip() - configured = bool(REDIS_ENABLED and redis_url) - client_enabled = bool(getattr(cache, "enabled", False)) - reachable = bool(cache.ping()) if client_enabled else False - status = "ok" - if configured and not reachable: - status = "degraded" - return { - "status": status, - "cache": { - "configured": configured, - "enabled": client_enabled, - "reachable": reachable, - "prefix": REDIS_PREFIX, - }, - } - - -def _config_json_path(bot_id: str) -> str: - return os.path.join(_bot_data_root(bot_id), "config.json") - - -def _read_bot_config(bot_id: str) -> Dict[str, Any]: - path = _config_json_path(bot_id) - if not os.path.isfile(path): - return {} - try: - with open(path, "r", encoding="utf-8") as f: - data = json.load(f) - return data if isinstance(data, dict) else {} - except Exception: - return {} - - -def _write_bot_config(bot_id: str, config_data: Dict[str, Any]) -> None: - path = _config_json_path(bot_id) - os.makedirs(os.path.dirname(path), exist_ok=True) - tmp = f"{path}.tmp" - with open(tmp, "w", encoding="utf-8") as f: - json.dump(config_data, f, ensure_ascii=False, indent=2) - os.replace(tmp, path) - - -def _resources_json_path(bot_id: str) -> str: - return os.path.join(_bot_data_root(bot_id), "resources.json") - - -def _write_bot_resources(bot_id: str, cpu_cores: Any, memory_mb: Any, storage_gb: Any) -> None: - normalized = _normalize_resource_limits(cpu_cores, memory_mb, storage_gb) - payload = { - "cpuCores": normalized["cpu_cores"], - "memoryMB": normalized["memory_mb"], - "storageGB": normalized["storage_gb"], - } - path = _resources_json_path(bot_id) - os.makedirs(os.path.dirname(path), exist_ok=True) - tmp = f"{path}.tmp" - with open(tmp, "w", encoding="utf-8") as f: - json.dump(payload, f, ensure_ascii=False, indent=2) - os.replace(tmp, path) - - -def _read_bot_resources(bot_id: str, config_data: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: - cpu_raw: Any = None - memory_raw: Any = None - storage_raw: Any = None - - path = _resources_json_path(bot_id) - if os.path.isfile(path): - try: - with open(path, "r", encoding="utf-8") as f: - data = json.load(f) - if isinstance(data, dict): - cpu_raw = data.get("cpuCores", data.get("cpu_cores")) - memory_raw = data.get("memoryMB", data.get("memory_mb")) - storage_raw = data.get("storageGB", data.get("storage_gb")) - except Exception: - pass - - # Backward compatibility: read old runtime.resources only if new file is missing/incomplete. - if cpu_raw is None or memory_raw is None or storage_raw is None: - cfg = config_data if isinstance(config_data, dict) else _read_bot_config(bot_id) - runtime_cfg = cfg.get("runtime") - if isinstance(runtime_cfg, dict): - resources_raw = runtime_cfg.get("resources") - if isinstance(resources_raw, dict): - if cpu_raw is None: - cpu_raw = resources_raw.get("cpuCores", resources_raw.get("cpu_cores")) - if memory_raw is None: - memory_raw = resources_raw.get("memoryMB", resources_raw.get("memory_mb")) - if storage_raw is None: - storage_raw = resources_raw.get("storageGB", resources_raw.get("storage_gb")) - - return _normalize_resource_limits(cpu_raw, memory_raw, storage_raw) - - -def _migrate_bot_resources_store(bot_id: str) -> None: - config_data = _read_bot_config(bot_id) - runtime_cfg = config_data.get("runtime") - resources_raw: Dict[str, Any] = {} - if isinstance(runtime_cfg, dict): - legacy_raw = runtime_cfg.get("resources") - if isinstance(legacy_raw, dict): - resources_raw = legacy_raw - - path = _resources_json_path(bot_id) - if not os.path.isfile(path): - _write_bot_resources( - bot_id, - resources_raw.get("cpuCores", resources_raw.get("cpu_cores")), - resources_raw.get("memoryMB", resources_raw.get("memory_mb")), - resources_raw.get("storageGB", resources_raw.get("storage_gb")), - ) - - if isinstance(runtime_cfg, dict) and "resources" in runtime_cfg: - runtime_cfg.pop("resources", None) - if not runtime_cfg: - config_data.pop("runtime", None) - _write_bot_config(bot_id, config_data) - - -def _normalize_channel_extra(raw: Any) -> Dict[str, Any]: - if not isinstance(raw, dict): - return {} - return raw - - -def _normalize_allow_from(raw: Any) -> List[str]: - rows: List[str] = [] - if isinstance(raw, list): - for item in raw: - text = str(item or "").strip() - if text and text not in rows: - rows.append(text) - if not rows: - return ["*"] - return rows - - -def _read_global_delivery_flags(channels_cfg: Any) -> tuple[bool, bool]: - if not isinstance(channels_cfg, dict): - return False, False - send_progress = channels_cfg.get("sendProgress") - send_tool_hints = channels_cfg.get("sendToolHints") - dashboard_cfg = channels_cfg.get("dashboard") - if isinstance(dashboard_cfg, dict): - if send_progress is None and "sendProgress" in dashboard_cfg: - send_progress = dashboard_cfg.get("sendProgress") - if send_tool_hints is None and "sendToolHints" in dashboard_cfg: - send_tool_hints = dashboard_cfg.get("sendToolHints") - return bool(send_progress), bool(send_tool_hints) - - -def _channel_cfg_to_api_dict(bot_id: str, ctype: str, cfg: Dict[str, Any]) -> Dict[str, Any]: - ctype = str(ctype or "").strip().lower() - enabled = bool(cfg.get("enabled", True)) - port = max(1, min(int(cfg.get("port", 8080) or 8080), 65535)) - extra: Dict[str, Any] = {} - external_app_id = "" - app_secret = "" - - if ctype == "feishu": - external_app_id = str(cfg.get("appId") or "") - app_secret = str(cfg.get("appSecret") or "") - extra = { - "encryptKey": cfg.get("encryptKey", ""), - "verificationToken": cfg.get("verificationToken", ""), - "allowFrom": _normalize_allow_from(cfg.get("allowFrom", [])), - } - elif ctype == "dingtalk": - external_app_id = str(cfg.get("clientId") or "") - app_secret = str(cfg.get("clientSecret") or "") - extra = {"allowFrom": _normalize_allow_from(cfg.get("allowFrom", []))} - elif ctype == "telegram": - app_secret = str(cfg.get("token") or "") - extra = { - "proxy": cfg.get("proxy", ""), - "replyToMessage": bool(cfg.get("replyToMessage", False)), - "allowFrom": _normalize_allow_from(cfg.get("allowFrom", [])), - } - elif ctype == "slack": - external_app_id = str(cfg.get("botToken") or "") - app_secret = str(cfg.get("appToken") or "") - extra = { - "mode": cfg.get("mode", "socket"), - "replyInThread": bool(cfg.get("replyInThread", True)), - "groupPolicy": cfg.get("groupPolicy", "mention"), - "groupAllowFrom": cfg.get("groupAllowFrom", []), - "reactEmoji": cfg.get("reactEmoji", "eyes"), - } - elif ctype == "qq": - external_app_id = str(cfg.get("appId") or "") - app_secret = str(cfg.get("secret") or "") - extra = {"allowFrom": _normalize_allow_from(cfg.get("allowFrom", []))} - elif ctype == "email": - extra = { - "consentGranted": bool(cfg.get("consentGranted", False)), - "imapHost": str(cfg.get("imapHost") or ""), - "imapPort": int(cfg.get("imapPort") or 993), - "imapUsername": str(cfg.get("imapUsername") or ""), - "imapPassword": str(cfg.get("imapPassword") or ""), - "imapMailbox": str(cfg.get("imapMailbox") or "INBOX"), - "imapUseSsl": bool(cfg.get("imapUseSsl", True)), - "smtpHost": str(cfg.get("smtpHost") or ""), - "smtpPort": int(cfg.get("smtpPort") or 587), - "smtpUsername": str(cfg.get("smtpUsername") or ""), - "smtpPassword": str(cfg.get("smtpPassword") or ""), - "smtpUseTls": bool(cfg.get("smtpUseTls", True)), - "smtpUseSsl": bool(cfg.get("smtpUseSsl", False)), - "fromAddress": str(cfg.get("fromAddress") or ""), - "autoReplyEnabled": bool(cfg.get("autoReplyEnabled", True)), - "pollIntervalSeconds": int(cfg.get("pollIntervalSeconds") or 30), - "markSeen": bool(cfg.get("markSeen", True)), - "maxBodyChars": int(cfg.get("maxBodyChars") or 12000), - "subjectPrefix": str(cfg.get("subjectPrefix") or "Re: "), - "allowFrom": _normalize_allow_from(cfg.get("allowFrom", [])), - } - else: - external_app_id = str( - cfg.get("appId") or cfg.get("clientId") or cfg.get("botToken") or cfg.get("externalAppId") or "" - ) - app_secret = str( - cfg.get("appSecret") or cfg.get("clientSecret") or cfg.get("secret") or cfg.get("token") or cfg.get("appToken") or "" - ) - extra = {k: v for k, v in cfg.items() if k not in {"enabled", "port", "appId", "clientId", "botToken", "externalAppId", "appSecret", "clientSecret", "secret", "token", "appToken"}} - - return { - "id": ctype, - "bot_id": bot_id, - "channel_type": ctype, - "external_app_id": external_app_id, - "app_secret": app_secret, - "internal_port": port, - "is_active": enabled, - "extra_config": extra, - "locked": ctype == "dashboard", - } - - -def _channel_api_to_cfg(row: Dict[str, Any]) -> Dict[str, Any]: - ctype = str(row.get("channel_type") or "").strip().lower() - enabled = bool(row.get("is_active", True)) - extra = _normalize_channel_extra(row.get("extra_config")) - external_app_id = str(row.get("external_app_id") or "") - app_secret = str(row.get("app_secret") or "") - port = max(1, min(int(row.get("internal_port") or 8080), 65535)) - - if ctype == "feishu": - return { - "enabled": enabled, - "appId": external_app_id, - "appSecret": app_secret, - "encryptKey": extra.get("encryptKey", ""), - "verificationToken": extra.get("verificationToken", ""), - "allowFrom": _normalize_allow_from(extra.get("allowFrom", [])), - } - if ctype == "dingtalk": - return { - "enabled": enabled, - "clientId": external_app_id, - "clientSecret": app_secret, - "allowFrom": _normalize_allow_from(extra.get("allowFrom", [])), - } - if ctype == "telegram": - return { - "enabled": enabled, - "token": app_secret, - "proxy": extra.get("proxy", ""), - "replyToMessage": bool(extra.get("replyToMessage", False)), - "allowFrom": _normalize_allow_from(extra.get("allowFrom", [])), - } - if ctype == "slack": - return { - "enabled": enabled, - "mode": extra.get("mode", "socket"), - "botToken": external_app_id, - "appToken": app_secret, - "replyInThread": bool(extra.get("replyInThread", True)), - "groupPolicy": extra.get("groupPolicy", "mention"), - "groupAllowFrom": extra.get("groupAllowFrom", []), - "reactEmoji": extra.get("reactEmoji", "eyes"), - } - if ctype == "qq": - return { - "enabled": enabled, - "appId": external_app_id, - "secret": app_secret, - "allowFrom": _normalize_allow_from(extra.get("allowFrom", [])), - } - if ctype == "email": - return { - "enabled": enabled, - "consentGranted": bool(extra.get("consentGranted", False)), - "imapHost": str(extra.get("imapHost") or ""), - "imapPort": max(1, min(int(extra.get("imapPort") or 993), 65535)), - "imapUsername": str(extra.get("imapUsername") or ""), - "imapPassword": str(extra.get("imapPassword") or ""), - "imapMailbox": str(extra.get("imapMailbox") or "INBOX"), - "imapUseSsl": bool(extra.get("imapUseSsl", True)), - "smtpHost": str(extra.get("smtpHost") or ""), - "smtpPort": max(1, min(int(extra.get("smtpPort") or 587), 65535)), - "smtpUsername": str(extra.get("smtpUsername") or ""), - "smtpPassword": str(extra.get("smtpPassword") or ""), - "smtpUseTls": bool(extra.get("smtpUseTls", True)), - "smtpUseSsl": bool(extra.get("smtpUseSsl", False)), - "fromAddress": str(extra.get("fromAddress") or ""), - "autoReplyEnabled": bool(extra.get("autoReplyEnabled", True)), - "pollIntervalSeconds": max(5, int(extra.get("pollIntervalSeconds") or 30)), - "markSeen": bool(extra.get("markSeen", True)), - "maxBodyChars": max(1, int(extra.get("maxBodyChars") or 12000)), - "subjectPrefix": str(extra.get("subjectPrefix") or "Re: "), - "allowFrom": _normalize_allow_from(extra.get("allowFrom", [])), - } - merged = dict(extra) - merged.update( - { - "enabled": enabled, - "appId": external_app_id, - "appSecret": app_secret, - "port": port, - } - ) - return merged - - -def _get_bot_channels_from_config(bot: BotInstance) -> List[Dict[str, Any]]: - config_data = _read_bot_config(bot.id) - channels_cfg = config_data.get("channels") - if not isinstance(channels_cfg, dict): - channels_cfg = {} - - send_progress, send_tool_hints = _read_global_delivery_flags(channels_cfg) - rows: List[Dict[str, Any]] = [ - { - "id": "dashboard", - "bot_id": bot.id, - "channel_type": "dashboard", - "external_app_id": f"dashboard-{bot.id}", - "app_secret": "", - "internal_port": 9000, - "is_active": True, - "extra_config": { - "sendProgress": send_progress, - "sendToolHints": send_tool_hints, - }, - "locked": True, - } - ] - - for ctype, cfg in channels_cfg.items(): - if ctype in {"sendProgress", "sendToolHints", "dashboard"}: - continue - if not isinstance(cfg, dict): - continue - rows.append(_channel_cfg_to_api_dict(bot.id, ctype, cfg)) - return rows - - -def _normalize_initial_channels(bot_id: str, channels: Optional[List[ChannelConfigRequest]]) -> List[Dict[str, Any]]: - rows: List[Dict[str, Any]] = [] - seen_types: set[str] = set() - for c in channels or []: - ctype = (c.channel_type or "").strip().lower() - if not ctype or ctype == "dashboard" or ctype in seen_types: - continue - seen_types.add(ctype) - rows.append( - { - "id": ctype, - "bot_id": bot_id, - "channel_type": ctype, - "external_app_id": (c.external_app_id or "").strip() or f"{ctype}-{bot_id}", - "app_secret": (c.app_secret or "").strip(), - "internal_port": max(1, min(int(c.internal_port or 8080), 65535)), - "is_active": bool(c.is_active), - "extra_config": _normalize_channel_extra(c.extra_config), - "locked": False, - } - ) - return rows - - -def _parse_message_media(bot_id: str, media_raw: Optional[str]) -> List[str]: - if not media_raw: - return [] - try: - parsed = json.loads(media_raw) - return _normalize_media_list(parsed, bot_id) - except Exception: - return [] - - -_ENV_KEY_RE = re.compile(r"^[A-Z_][A-Z0-9_]{0,127}$") - - -def _normalize_env_params(raw: Any) -> Dict[str, str]: - if not isinstance(raw, dict): - return {} - rows: Dict[str, str] = {} - for k, v in raw.items(): - key = str(k or "").strip().upper() - if not key or not _ENV_KEY_RE.fullmatch(key): - continue - rows[key] = str(v or "").strip() - return rows - - -def _get_default_system_timezone() -> str: - value = str(DEFAULT_BOT_SYSTEM_TIMEZONE or "").strip() or "Asia/Shanghai" - try: - ZoneInfo(value) - return value - except Exception: - return "Asia/Shanghai" - - -def _normalize_system_timezone(raw: Any) -> str: - value = str(raw or "").strip() - if not value: - return _get_default_system_timezone() - try: - ZoneInfo(value) - except Exception as exc: - raise ValueError("Invalid system timezone. Use an IANA timezone such as Asia/Shanghai.") from exc - return value - - -def _resolve_bot_env_params(bot_id: str, raw: Optional[Dict[str, str]] = None) -> Dict[str, str]: - env_params = _normalize_env_params(raw if isinstance(raw, dict) else _read_env_store(bot_id)) - try: - env_params["TZ"] = _normalize_system_timezone(env_params.get("TZ")) - except ValueError: - env_params["TZ"] = _get_default_system_timezone() - return env_params - - -_MCP_SERVER_NAME_RE = re.compile(r"^[A-Za-z0-9._-]{1,64}$") - - -def _normalize_mcp_servers(raw: Any) -> Dict[str, Dict[str, Any]]: - if not isinstance(raw, dict): - return {} - rows: Dict[str, Dict[str, Any]] = {} - for server_name, server_cfg in raw.items(): - name = str(server_name or "").strip() - if not name or not _MCP_SERVER_NAME_RE.fullmatch(name): - continue - if not isinstance(server_cfg, dict): - continue - - url = str(server_cfg.get("url") or "").strip() - if not url: - continue - - transport_type = str(server_cfg.get("type") or "streamableHttp").strip() - if transport_type not in {"streamableHttp", "sse"}: - transport_type = "streamableHttp" - - headers_raw = server_cfg.get("headers") - headers: Dict[str, str] = {} - if isinstance(headers_raw, dict): - for k, v in headers_raw.items(): - hk = str(k or "").strip() - if not hk: - continue - headers[hk] = str(v or "").strip() - - timeout_raw = server_cfg.get("toolTimeout", 60) - try: - timeout = int(timeout_raw) - except Exception: - timeout = 60 - timeout = max(1, min(timeout, 600)) - - rows[name] = { - "type": transport_type, - "url": url, - "headers": headers, - "toolTimeout": timeout, - } - return rows - - -def _merge_mcp_servers_preserving_extras( - current_raw: Any, - normalized: Dict[str, Dict[str, Any]], -) -> Dict[str, Dict[str, Any]]: - """Preserve unknown per-server fields already present in config.json. - - Dashboard only edits a subset of MCP fields (type/url/headers/toolTimeout). - Some MCP providers may rely on additional keys; dropping them can break startup. - """ - current_map = current_raw if isinstance(current_raw, dict) else {} - merged: Dict[str, Dict[str, Any]] = {} - for name, normalized_cfg in normalized.items(): - base = current_map.get(name) - base_cfg = dict(base) if isinstance(base, dict) else {} - next_cfg = dict(base_cfg) - next_cfg.update(normalized_cfg) - merged[name] = next_cfg - return merged - - -def _sanitize_mcp_servers_in_config_data(config_data: Dict[str, Any]) -> Dict[str, Dict[str, Any]]: - """Normalize tools.mcpServers and drop hidden invalid entries safely. - - Returns the sanitized mcpServers map written into config_data["tools"]["mcpServers"]. - """ - if not isinstance(config_data, dict): - return {} - tools_cfg = config_data.get("tools") - if not isinstance(tools_cfg, dict): - tools_cfg = {} - current_raw = tools_cfg.get("mcpServers") - normalized = _normalize_mcp_servers(current_raw) - merged = _merge_mcp_servers_preserving_extras(current_raw, normalized) - tools_cfg["mcpServers"] = merged - config_data["tools"] = tools_cfg - return merged - - -def _parse_env_params(raw: Any) -> Dict[str, str]: - return _normalize_env_params(raw) - - -def _safe_float(raw: Any, default: float) -> float: - try: - return float(raw) - except Exception: - return default - - -def _safe_int(raw: Any, default: int) -> int: - try: - return int(raw) - except Exception: - return default - - -def _normalize_resource_limits(cpu_cores: Any, memory_mb: Any, storage_gb: Any) -> Dict[str, Any]: - cpu = _safe_float(cpu_cores, 1.0) - mem = _safe_int(memory_mb, 1024) - storage = _safe_int(storage_gb, 10) - if cpu < 0: - cpu = 1.0 - if mem < 0: - mem = 1024 - if storage < 0: - storage = 10 - normalized_cpu = 0.0 if cpu == 0 else min(16.0, max(0.1, cpu)) - normalized_mem = 0 if mem == 0 else min(65536, max(256, mem)) - normalized_storage = 0 if storage == 0 else min(1024, max(1, storage)) - return { - "cpu_cores": normalized_cpu, - "memory_mb": normalized_mem, - "storage_gb": normalized_storage, - } - - -def _read_workspace_md(bot_id: str, filename: str, default_value: str) -> str: - path = os.path.join(_workspace_root(bot_id), filename) - if not os.path.isfile(path): - return default_value - try: - with open(path, "r", encoding="utf-8") as f: - return f.read().strip() - except Exception: - return default_value - - -def _read_bot_runtime_snapshot(bot: BotInstance) -> Dict[str, Any]: - config_data = _read_bot_config(bot.id) - env_params = _resolve_bot_env_params(bot.id) - - provider_name = "" - provider_cfg: Dict[str, Any] = {} - providers_cfg = config_data.get("providers") - if isinstance(providers_cfg, dict): - for p_name, p_cfg in providers_cfg.items(): - provider_name = str(p_name or "").strip() - if isinstance(p_cfg, dict): - provider_cfg = p_cfg - break - - agents_defaults: Dict[str, Any] = {} - agents_cfg = config_data.get("agents") - if isinstance(agents_cfg, dict): - defaults = agents_cfg.get("defaults") - if isinstance(defaults, dict): - agents_defaults = defaults - - channels_cfg = config_data.get("channels") - send_progress, send_tool_hints = _read_global_delivery_flags(channels_cfg) - - llm_provider = provider_name or "dashscope" - llm_model = str(agents_defaults.get("model") or "") - api_key = str(provider_cfg.get("apiKey") or "").strip() - api_base = str(provider_cfg.get("apiBase") or "").strip() - api_base_lower = api_base.lower() - provider_alias = str(provider_cfg.get("dashboardProviderAlias") or "").strip().lower() - if llm_provider == "openai" and provider_alias in {"xunfei", "iflytek", "xfyun", "vllm"}: - llm_provider = "xunfei" if provider_alias in {"iflytek", "xfyun"} else provider_alias - elif llm_provider == "openai" and ("spark-api-open.xf-yun.com" in api_base_lower or "xf-yun.com" in api_base_lower): - llm_provider = "xunfei" - - soul_md = _read_workspace_md(bot.id, "SOUL.md", DEFAULT_SOUL_MD) - resources = _read_bot_resources(bot.id, config_data=config_data) - return { - "llm_provider": llm_provider, - "llm_model": llm_model, - "api_key": api_key, - "api_base": api_base, - "temperature": _safe_float(agents_defaults.get("temperature"), 0.2), - "top_p": _safe_float(agents_defaults.get("topP"), 1.0), - "max_tokens": _safe_int(agents_defaults.get("maxTokens"), 8192), - "cpu_cores": resources["cpu_cores"], - "memory_mb": resources["memory_mb"], - "storage_gb": resources["storage_gb"], - "system_timezone": env_params.get("TZ") or _get_default_system_timezone(), - "send_progress": send_progress, - "send_tool_hints": send_tool_hints, - "soul_md": soul_md, - "agents_md": _read_workspace_md(bot.id, "AGENTS.md", DEFAULT_AGENTS_MD), - "user_md": _read_workspace_md(bot.id, "USER.md", DEFAULT_USER_MD), - "tools_md": _read_workspace_md(bot.id, "TOOLS.md", DEFAULT_TOOLS_MD), - "identity_md": _read_workspace_md(bot.id, "IDENTITY.md", DEFAULT_IDENTITY_MD), - "system_prompt": soul_md, - } - - -def _serialize_bot(bot: BotInstance) -> Dict[str, Any]: - runtime = _read_bot_runtime_snapshot(bot) - return { - "id": bot.id, - "name": bot.name, - "enabled": bool(getattr(bot, "enabled", True)), - "access_password": bot.access_password or "", - "has_access_password": bool(str(bot.access_password or "").strip()), - "avatar_model": "base", - "avatar_skin": "blue_suit", - "image_tag": bot.image_tag, - "llm_provider": runtime.get("llm_provider") or "", - "llm_model": runtime.get("llm_model") or "", - "system_prompt": runtime.get("system_prompt") or "", - "api_base": runtime.get("api_base") or "", - "temperature": _safe_float(runtime.get("temperature"), 0.2), - "top_p": _safe_float(runtime.get("top_p"), 1.0), - "max_tokens": _safe_int(runtime.get("max_tokens"), 8192), - "cpu_cores": _safe_float(runtime.get("cpu_cores"), 1.0), - "memory_mb": _safe_int(runtime.get("memory_mb"), 1024), - "storage_gb": _safe_int(runtime.get("storage_gb"), 10), - "system_timezone": str(runtime.get("system_timezone") or _get_default_system_timezone()), - "send_progress": bool(runtime.get("send_progress")), - "send_tool_hints": bool(runtime.get("send_tool_hints")), - "soul_md": runtime.get("soul_md") or "", - "agents_md": runtime.get("agents_md") or "", - "user_md": runtime.get("user_md") or "", - "tools_md": runtime.get("tools_md") or "", - "identity_md": runtime.get("identity_md") or "", - "workspace_dir": bot.workspace_dir, - "docker_status": bot.docker_status, - "current_state": bot.current_state, - "last_action": bot.last_action, - "created_at": bot.created_at, - "updated_at": bot.updated_at, - } - - -def _serialize_bot_list_item(bot: BotInstance) -> Dict[str, Any]: - return { - "id": bot.id, - "name": bot.name, - "enabled": bool(getattr(bot, "enabled", True)), - "has_access_password": bool(str(bot.access_password or "").strip()), - "image_tag": bot.image_tag, - "docker_status": bot.docker_status, - "current_state": bot.current_state, - "last_action": bot.last_action, - "updated_at": bot.updated_at, - } - - -_AGENT_LOOP_READY_MARKER = "Agent loop started" - - -async def _wait_for_agent_loop_ready( - bot_id: str, - timeout_seconds: float = 12.0, - poll_interval_seconds: float = 0.5, -) -> bool: - deadline = time.monotonic() + max(1.0, timeout_seconds) - marker = _AGENT_LOOP_READY_MARKER.lower() - while time.monotonic() < deadline: - logs = docker_manager.get_recent_logs(bot_id, tail=200) - if any(marker in str(line or "").lower() for line in logs): - return True - await asyncio.sleep(max(0.1, poll_interval_seconds)) - return False - - -async def _record_agent_loop_ready_warning( - bot_id: str, - timeout_seconds: float = 12.0, - poll_interval_seconds: float = 0.5, -) -> None: - try: - agent_loop_ready = await _wait_for_agent_loop_ready( - bot_id, - timeout_seconds=timeout_seconds, - poll_interval_seconds=poll_interval_seconds, - ) - if agent_loop_ready: - return - if docker_manager.get_bot_status(bot_id) != "RUNNING": - return - detail = ( - "Bot container started, but ready marker was not found in logs within " - f"{int(timeout_seconds)}s. Check bot logs or MCP config if the bot stays unavailable." - ) - logger.warning("bot_id=%s agent loop ready marker not found within %ss", bot_id, timeout_seconds) - with Session(engine) as background_session: - if not background_session.get(BotInstance, bot_id): - return - record_activity_event( - background_session, - bot_id, - "bot_warning", - channel="system", - detail=detail, - metadata={ - "kind": "agent_loop_ready_timeout", - "marker": _AGENT_LOOP_READY_MARKER, - "timeout_seconds": timeout_seconds, - }, - ) - background_session.commit() - _invalidate_bot_detail_cache(bot_id) - except Exception: - logger.exception("Failed to record agent loop readiness warning for bot_id=%s", bot_id) - -def _sync_workspace_channels( - session: Session, - bot_id: str, - channels_override: Optional[List[Dict[str, Any]]] = None, - global_delivery_override: Optional[Dict[str, Any]] = None, - runtime_overrides: Optional[Dict[str, Any]] = None, -) -> None: - bot = session.get(BotInstance, bot_id) - if not bot: - return - snapshot = _read_bot_runtime_snapshot(bot) - bot_data: Dict[str, Any] = { - "name": bot.name, - "system_prompt": snapshot.get("system_prompt") or DEFAULT_SOUL_MD, - "soul_md": snapshot.get("soul_md") or DEFAULT_SOUL_MD, - "agents_md": snapshot.get("agents_md") or DEFAULT_AGENTS_MD, - "user_md": snapshot.get("user_md") or DEFAULT_USER_MD, - "tools_md": snapshot.get("tools_md") or DEFAULT_TOOLS_MD, - "identity_md": snapshot.get("identity_md") or DEFAULT_IDENTITY_MD, - "llm_provider": snapshot.get("llm_provider") or "dashscope", - "llm_model": snapshot.get("llm_model") or "", - "api_key": snapshot.get("api_key") or "", - "api_base": snapshot.get("api_base") or "", - "temperature": _safe_float(snapshot.get("temperature"), 0.2), - "top_p": _safe_float(snapshot.get("top_p"), 1.0), - "max_tokens": _safe_int(snapshot.get("max_tokens"), 8192), - "cpu_cores": _safe_float(snapshot.get("cpu_cores"), 1.0), - "memory_mb": _safe_int(snapshot.get("memory_mb"), 1024), - "storage_gb": _safe_int(snapshot.get("storage_gb"), 10), - "send_progress": bool(snapshot.get("send_progress")), - "send_tool_hints": bool(snapshot.get("send_tool_hints")), - } - if isinstance(runtime_overrides, dict): - for key, value in runtime_overrides.items(): - # Keep existing runtime secrets/config when caller sends empty placeholder values. - if key in {"api_key", "llm_provider", "llm_model"}: - text = str(value or "").strip() - if not text: - continue - bot_data[key] = text - continue - if key == "api_base": - # api_base may be intentionally empty (use provider default), so keep explicit value. - bot_data[key] = str(value or "").strip() - continue - bot_data[key] = value - resources = _normalize_resource_limits( - bot_data.get("cpu_cores"), - bot_data.get("memory_mb"), - bot_data.get("storage_gb"), - ) - bot_data["cpu_cores"] = resources["cpu_cores"] - bot_data["memory_mb"] = resources["memory_mb"] - bot_data["storage_gb"] = resources["storage_gb"] - send_progress = bool(bot_data.get("send_progress", False)) - send_tool_hints = bool(bot_data.get("send_tool_hints", False)) - if isinstance(global_delivery_override, dict): - if "sendProgress" in global_delivery_override: - send_progress = bool(global_delivery_override.get("sendProgress")) - if "sendToolHints" in global_delivery_override: - send_tool_hints = bool(global_delivery_override.get("sendToolHints")) - - channels_data = channels_override if channels_override is not None else _get_bot_channels_from_config(bot) - bot_data["send_progress"] = send_progress - bot_data["send_tool_hints"] = send_tool_hints - normalized_channels: List[Dict[str, Any]] = [] - for row in channels_data: - ctype = str(row.get("channel_type") or "").strip().lower() - if not ctype or ctype == "dashboard": - continue - normalized_channels.append( - { - "channel_type": ctype, - "external_app_id": str(row.get("external_app_id") or ""), - "app_secret": str(row.get("app_secret") or ""), - "internal_port": max(1, min(int(row.get("internal_port") or 8080), 65535)), - "is_active": bool(row.get("is_active", True)), - "extra_config": _normalize_channel_extra(row.get("extra_config")), - } - ) - config_manager.update_workspace( - bot_id=bot_id, - bot_data=bot_data, - channels=normalized_channels, - ) - _write_bot_resources( - bot_id, - bot_data.get("cpu_cores"), - bot_data.get("memory_mb"), - bot_data.get("storage_gb"), - ) - - -def reconcile_image_registry(session: Session): - """Only reconcile status for images explicitly registered in DB.""" - db_images = session.exec(select(NanobotImage)).all() - for img in db_images: - if docker_manager.has_image(img.tag): - try: - docker_img = docker_manager.client.images.get(img.tag) if docker_manager.client else None - img.image_id = docker_img.id if docker_img else img.image_id - except Exception: - pass - img.status = "READY" - else: - img.status = "UNKNOWN" - session.add(img) - - session.commit() - - -def _workspace_root(bot_id: str) -> str: - return os.path.abspath(os.path.join(BOTS_WORKSPACE_ROOT, bot_id, ".nanobot", "workspace")) - - -def _bot_data_root(bot_id: str) -> str: - return os.path.abspath(os.path.join(BOTS_WORKSPACE_ROOT, bot_id, ".nanobot")) - - -def _skills_root(bot_id: str) -> str: - return os.path.join(_workspace_root(bot_id), "skills") - - -def _is_valid_top_level_skill_name(name: str) -> bool: - text = str(name or "").strip() - if not text: - return False - if "/" in text or "\\" in text: - return False - if text in {".", ".."}: - return False - return True - - -def _read_skill_description(entry_path: str) -> str: - candidates: List[str] = [] - if os.path.isdir(entry_path): - candidates = [ - os.path.join(entry_path, "SKILL.md"), - os.path.join(entry_path, "skill.md"), - os.path.join(entry_path, "README.md"), - os.path.join(entry_path, "readme.md"), - ] - elif entry_path.lower().endswith(".md"): - candidates = [entry_path] - - for candidate in candidates: - if not os.path.isfile(candidate): - continue - try: - with open(candidate, "r", encoding="utf-8") as f: - for line in f: - text = line.strip() - if text and not text.startswith("#"): - return text[:240] - except Exception: - continue - return "" - - -def _list_workspace_skills(bot_id: str) -> List[Dict[str, Any]]: - root = _skills_root(bot_id) - os.makedirs(root, exist_ok=True) - rows: List[Dict[str, Any]] = [] - names = sorted(os.listdir(root), key=lambda n: (not os.path.isdir(os.path.join(root, n)), n.lower())) - for name in names: - if not name or name.startswith("."): - continue - if not _is_valid_top_level_skill_name(name): - continue - abs_path = os.path.join(root, name) - if not os.path.exists(abs_path): - continue - stat = os.stat(abs_path) - rows.append( - { - "id": name, - "name": name, - "type": "dir" if os.path.isdir(abs_path) else "file", - "path": f"skills/{name}", - "size": stat.st_size if os.path.isfile(abs_path) else None, - "mtime": datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z", - "description": _read_skill_description(abs_path), - } - ) - return rows - - -def _skill_market_root() -> str: - return os.path.abspath(os.path.join(DATA_ROOT, "skills")) - - -def _parse_json_string_list(raw: Any) -> List[str]: - if not raw: - return [] - try: - data = json.loads(str(raw)) - except Exception: - return [] - if not isinstance(data, list): - return [] - rows: List[str] = [] - for item in data: - text = str(item or "").strip() - if text and text not in rows: - rows.append(text) - return rows - - -def _is_ignored_skill_zip_top_level(name: str) -> bool: - text = str(name or "").strip() - if not text: - return True - lowered = text.lower() - if lowered == "__macosx": - return True - if text.startswith("."): - return True - return False - - -def _read_description_from_text(raw: str) -> str: - for line in str(raw or "").splitlines(): - text = line.strip() - if text and not text.startswith("#"): - return text[:240] - return "" - - -def _extract_skill_zip_summary(zip_path: str) -> Dict[str, Any]: - entry_names: List[str] = [] - description = "" - with zipfile.ZipFile(zip_path) as archive: - members = archive.infolist() - file_members = [member for member in members if not member.is_dir()] - for member in file_members: - raw_name = str(member.filename or "").replace("\\", "/").lstrip("/") - if not raw_name: - continue - first = raw_name.split("/", 1)[0].strip() - if _is_ignored_skill_zip_top_level(first): - continue - if _is_valid_top_level_skill_name(first) and first not in entry_names: - entry_names.append(first) - - candidates = sorted( - [ - str(member.filename or "").replace("\\", "/").lstrip("/") - for member in file_members - if str(member.filename or "").replace("\\", "/").rsplit("/", 1)[-1].lower() - in {"skill.md", "readme.md"} - ], - key=lambda value: (value.count("/"), value.lower()), - ) - for candidate in candidates: - try: - with archive.open(candidate, "r") as fh: - preview = fh.read(4096).decode("utf-8", errors="ignore") - description = _read_description_from_text(preview) - if description: - break - except Exception: - continue - return { - "entry_names": entry_names, - "description": description, - } - - -def _sanitize_skill_market_key(raw: Any) -> str: - value = str(raw or "").strip().lower() - value = re.sub(r"[^a-z0-9._-]+", "-", value) - value = re.sub(r"-{2,}", "-", value).strip("._-") - return value[:120] - - -def _sanitize_zip_filename(raw: Any) -> str: - filename = os.path.basename(str(raw or "").strip()) - if not filename: - return "" - filename = filename.replace("\\", "/").rsplit("/", 1)[-1] - stem, ext = os.path.splitext(filename) - safe_stem = re.sub(r"[^A-Za-z0-9._-]+", "-", stem).strip("._-") - if not safe_stem: - safe_stem = "skill-package" - safe_ext = ".zip" if ext.lower() == ".zip" else "" - return f"{safe_stem[:180]}{safe_ext}" - - -def _resolve_unique_skill_market_key(session: Session, preferred_key: str, exclude_id: Optional[int] = None) -> str: - base_key = _sanitize_skill_market_key(preferred_key) or "skill" - candidate = base_key - counter = 2 - while True: - stmt = select(SkillMarketItem).where(SkillMarketItem.skill_key == candidate) - rows = session.exec(stmt).all() - conflict = next((row for row in rows if exclude_id is None or row.id != exclude_id), None) - if not conflict: - return candidate - candidate = f"{base_key}-{counter}" - counter += 1 - - -def _resolve_unique_skill_market_zip_filename( - session: Session, - filename: str, - *, - exclude_filename: Optional[str] = None, - exclude_id: Optional[int] = None, -) -> str: - root = _skill_market_root() - os.makedirs(root, exist_ok=True) - safe_name = _sanitize_zip_filename(filename) - if not safe_name.lower().endswith(".zip"): - raise HTTPException(status_code=400, detail="Only .zip skill package is supported") - candidate = safe_name - stem, ext = os.path.splitext(safe_name) - counter = 2 - while True: - file_conflict = os.path.exists(os.path.join(root, candidate)) and candidate != str(exclude_filename or "").strip() - rows = session.exec(select(SkillMarketItem).where(SkillMarketItem.zip_filename == candidate)).all() - db_conflict = next((row for row in rows if exclude_id is None or row.id != exclude_id), None) - if not file_conflict and not db_conflict: - return candidate - candidate = f"{stem}-{counter}{ext}" - counter += 1 - - -async def _store_skill_market_zip_upload( - session: Session, - upload: UploadFile, - *, - exclude_filename: Optional[str] = None, - exclude_id: Optional[int] = None, -) -> Dict[str, Any]: - root = _skill_market_root() - os.makedirs(root, exist_ok=True) - - incoming_name = _sanitize_zip_filename(upload.filename or "") - if not incoming_name.lower().endswith(".zip"): - raise HTTPException(status_code=400, detail="Only .zip skill package is supported") - - target_filename = _resolve_unique_skill_market_zip_filename( - session, - incoming_name, - exclude_filename=exclude_filename, - exclude_id=exclude_id, - ) - max_bytes = get_platform_settings_snapshot().upload_max_mb * 1024 * 1024 - total_size = 0 - tmp_path: Optional[str] = None - try: - with tempfile.NamedTemporaryFile(prefix=".skill_market_", suffix=".zip", dir=root, delete=False) as tmp_zip: - tmp_path = tmp_zip.name - while True: - chunk = await upload.read(1024 * 1024) - if not chunk: - break - total_size += len(chunk) - if total_size > max_bytes: - raise HTTPException( - status_code=413, - detail=f"Zip package too large (max {max_bytes // (1024 * 1024)}MB)", - ) - tmp_zip.write(chunk) - if total_size == 0: - raise HTTPException(status_code=400, detail="Zip package is empty") - summary = _extract_skill_zip_summary(tmp_path) - if not summary["entry_names"]: - raise HTTPException(status_code=400, detail="Zip package has no valid skill entries") - final_path = os.path.join(root, target_filename) - os.replace(tmp_path, final_path) - tmp_path = None - return { - "zip_filename": target_filename, - "zip_size_bytes": total_size, - "entry_names": summary["entry_names"], - "description": summary["description"], - } - except zipfile.BadZipFile as exc: - raise HTTPException(status_code=400, detail="Invalid zip file") from exc - finally: - await upload.close() - if tmp_path and os.path.exists(tmp_path): - os.remove(tmp_path) - -def _serialize_skill_market_item( - item: SkillMarketItem, - *, - install_count: int = 0, - install_row: Optional[BotSkillInstall] = None, - workspace_installed: Optional[bool] = None, - installed_entries: Optional[List[str]] = None, -) -> Dict[str, Any]: - zip_path = os.path.join(_skill_market_root(), str(item.zip_filename or "")) - entry_names = _parse_json_string_list(item.entry_names_json) - payload = { - "id": item.id, - "skill_key": item.skill_key, - "display_name": item.display_name or item.skill_key, - "description": item.description or "", - "zip_filename": item.zip_filename, - "zip_size_bytes": int(item.zip_size_bytes or 0), - "entry_names": entry_names, - "entry_count": len(entry_names), - "zip_exists": os.path.isfile(zip_path), - "install_count": int(install_count or 0), - "created_at": item.created_at.isoformat() + "Z" if item.created_at else None, - "updated_at": item.updated_at.isoformat() + "Z" if item.updated_at else None, - } - if install_row is not None: - resolved_entries = installed_entries if installed_entries is not None else _parse_json_string_list(install_row.installed_entries_json) - resolved_installed = workspace_installed if workspace_installed is not None else install_row.status == "INSTALLED" - payload.update( - { - "installed": resolved_installed, - "install_status": install_row.status, - "installed_at": install_row.installed_at.isoformat() + "Z" if install_row.installed_at else None, - "installed_entries": resolved_entries, - "install_error": install_row.last_error, - } - ) - return payload - - -def _install_skill_zip_into_workspace(bot_id: str, zip_path: str) -> Dict[str, Any]: - try: - archive = zipfile.ZipFile(zip_path) - except Exception as exc: - raise HTTPException(status_code=400, detail="Invalid zip file") from exc - - skills_root = _skills_root(bot_id) - os.makedirs(skills_root, exist_ok=True) - - installed: List[str] = [] - with archive: - members = archive.infolist() - file_members = [m for m in members if not m.is_dir()] - if not file_members: - raise HTTPException(status_code=400, detail="Zip package has no files") - - top_names: List[str] = [] - for member in file_members: - raw_name = str(member.filename or "").replace("\\", "/").lstrip("/") - if not raw_name: - continue - first = raw_name.split("/", 1)[0].strip() - if _is_ignored_skill_zip_top_level(first): - continue - if not _is_valid_top_level_skill_name(first): - raise HTTPException(status_code=400, detail=f"Invalid skill entry name in zip: {first}") - if first not in top_names: - top_names.append(first) - - if not top_names: - raise HTTPException(status_code=400, detail="Zip package has no valid skill entries") - - conflicts = [name for name in top_names if os.path.exists(os.path.join(skills_root, name))] - if conflicts: - raise HTTPException(status_code=400, detail=f"Skill already exists: {', '.join(conflicts)}") - - with tempfile.TemporaryDirectory(prefix=".skill_upload_", dir=skills_root) as tmp_dir: - tmp_root = os.path.abspath(tmp_dir) - for member in members: - raw_name = str(member.filename or "").replace("\\", "/").lstrip("/") - if not raw_name: - continue - target = os.path.abspath(os.path.join(tmp_root, raw_name)) - if os.path.commonpath([tmp_root, target]) != tmp_root: - raise HTTPException(status_code=400, detail=f"Unsafe zip entry path: {raw_name}") - if member.is_dir(): - os.makedirs(target, exist_ok=True) - continue - os.makedirs(os.path.dirname(target), exist_ok=True) - with archive.open(member, "r") as source, open(target, "wb") as dest: - shutil.copyfileobj(source, dest) - - for name in top_names: - src = os.path.join(tmp_root, name) - dst = os.path.join(skills_root, name) - if not os.path.exists(src): - continue - shutil.move(src, dst) - installed.append(name) - - if not installed: - raise HTTPException(status_code=400, detail="No skill entries installed from zip") - - return { - "installed": installed, - "skills": _list_workspace_skills(bot_id), - } - - -def _cron_store_path(bot_id: str) -> str: - return os.path.join(_bot_data_root(bot_id), "cron", "jobs.json") - - -def _env_store_path(bot_id: str) -> str: - return os.path.join(_bot_data_root(bot_id), "env.json") - - -def _sessions_root(bot_id: str) -> str: - return os.path.join(_workspace_root(bot_id), "sessions") - - -def _clear_bot_sessions(bot_id: str) -> int: - """Remove persisted session files for the bot workspace.""" - root = _sessions_root(bot_id) - if not os.path.isdir(root): - return 0 - deleted = 0 - for name in os.listdir(root): - path = os.path.join(root, name) - if not os.path.isfile(path): - continue - if not name.lower().endswith(".jsonl"): - continue - try: - os.remove(path) - deleted += 1 - except Exception: - continue - return deleted - - -def _clear_bot_dashboard_direct_session(bot_id: str) -> Dict[str, Any]: - """Truncate the dashboard:direct session file while preserving the workspace session root.""" - root = _sessions_root(bot_id) - os.makedirs(root, exist_ok=True) - path = os.path.join(root, "dashboard_direct.jsonl") - existed = os.path.exists(path) - with open(path, "w", encoding="utf-8"): - pass - return {"path": path, "existed": existed} - - -def _read_env_store(bot_id: str) -> Dict[str, str]: - path = _env_store_path(bot_id) - if not os.path.isfile(path): - return {} - try: - with open(path, "r", encoding="utf-8") as f: - data = json.load(f) - return _normalize_env_params(data) - except Exception: - return {} - - -def _write_env_store(bot_id: str, env_params: Dict[str, str]) -> None: - path = _env_store_path(bot_id) - os.makedirs(os.path.dirname(path), exist_ok=True) - tmp = f"{path}.tmp" - with open(tmp, "w", encoding="utf-8") as f: - json.dump(_normalize_env_params(env_params), f, ensure_ascii=False, indent=2) - os.replace(tmp, path) - - -def _read_cron_store(bot_id: str) -> Dict[str, Any]: - path = _cron_store_path(bot_id) - if not os.path.isfile(path): - return {"version": 1, "jobs": []} - try: - with open(path, "r", encoding="utf-8") as f: - data = json.load(f) - if not isinstance(data, dict): - return {"version": 1, "jobs": []} - jobs = data.get("jobs") - if not isinstance(jobs, list): - data["jobs"] = [] - if "version" not in data: - data["version"] = 1 - return data - except Exception: - return {"version": 1, "jobs": []} - - -def _write_cron_store(bot_id: str, store: Dict[str, Any]) -> None: - path = _cron_store_path(bot_id) - os.makedirs(os.path.dirname(path), exist_ok=True) - tmp = f"{path}.tmp" - with open(tmp, "w", encoding="utf-8") as f: - json.dump(store, f, ensure_ascii=False, indent=2) - os.replace(tmp, path) - - -def _resolve_workspace_path(bot_id: str, rel_path: Optional[str] = None) -> tuple[str, str]: - root = _workspace_root(bot_id) - rel = (rel_path or "").strip().replace("\\", "/") - target = os.path.abspath(os.path.join(root, rel)) - if os.path.commonpath([root, target]) != root: - raise HTTPException(status_code=400, detail="invalid workspace path") - return root, target - - -def _calc_dir_size_bytes(path: str) -> int: - total = 0 - if not os.path.exists(path): - return 0 - for root, _, files in os.walk(path): - for filename in files: - try: - file_path = os.path.join(root, filename) - if os.path.islink(file_path): - continue - total += os.path.getsize(file_path) - except Exception: - continue - return max(0, total) - - -def _is_image_attachment_path(path: str) -> bool: - lower = str(path or "").strip().lower() - return lower.endswith(".png") or lower.endswith(".jpg") or lower.endswith(".jpeg") or lower.endswith(".webp") - - -def _is_video_attachment_path(path: str) -> bool: - lower = str(path or "").strip().lower() - return ( - lower.endswith(".mp4") - or lower.endswith(".mov") - or lower.endswith(".m4v") - or lower.endswith(".webm") - or lower.endswith(".mkv") - or lower.endswith(".avi") - ) - - -def _is_visual_attachment_path(path: str) -> bool: - return _is_image_attachment_path(path) or _is_video_attachment_path(path) - - -def _workspace_stat_ctime_iso(stat: os.stat_result) -> str: - ts = getattr(stat, "st_birthtime", None) - if ts is None: - ts = getattr(stat, "st_ctime", None) - try: - return datetime.utcfromtimestamp(float(ts)).isoformat() + "Z" - except Exception: - return datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z" - - -def _build_workspace_tree(path: str, root: str, depth: int) -> List[Dict[str, Any]]: - rows: List[Dict[str, Any]] = [] - try: - names = sorted(os.listdir(path), key=lambda v: (not os.path.isdir(os.path.join(path, v)), v.lower())) - except FileNotFoundError: - return rows - - for name in names: - if name in {".DS_Store"}: - continue - abs_path = os.path.join(path, name) - rel_path = os.path.relpath(abs_path, root).replace("\\", "/") - stat = os.stat(abs_path) - base: Dict[str, Any] = { - "name": name, - "path": rel_path, - "ctime": _workspace_stat_ctime_iso(stat), - "mtime": datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z", - } - if os.path.isdir(abs_path): - node = {**base, "type": "dir"} - if depth > 0: - node["children"] = _build_workspace_tree(abs_path, root, depth - 1) - rows.append(node) - continue - rows.append( - { - **base, - "type": "file", - "size": stat.st_size, - "ext": os.path.splitext(name)[1].lower(), - } - ) - return rows - - -def _list_workspace_dir(path: str, root: str) -> List[Dict[str, Any]]: - rows: List[Dict[str, Any]] = [] - names = sorted(os.listdir(path), key=lambda v: (not os.path.isdir(os.path.join(path, v)), v.lower())) - for name in names: - if name in {".DS_Store"}: - continue - abs_path = os.path.join(path, name) - rel_path = os.path.relpath(abs_path, root).replace("\\", "/") - stat = os.stat(abs_path) - rows.append( - { - "name": name, - "path": rel_path, - "type": "dir" if os.path.isdir(abs_path) else "file", - "size": stat.st_size if os.path.isfile(abs_path) else None, - "ext": os.path.splitext(name)[1].lower() if os.path.isfile(abs_path) else "", - "ctime": _workspace_stat_ctime_iso(stat), - "mtime": datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z", - } - ) - return rows - - -def _list_workspace_dir_recursive(path: str, root: str) -> List[Dict[str, Any]]: - rows: List[Dict[str, Any]] = [] - for walk_root, dirnames, filenames in os.walk(path): - dirnames.sort(key=lambda v: v.lower()) - filenames.sort(key=lambda v: v.lower()) - - for name in dirnames: - if name in {".DS_Store"}: - continue - abs_path = os.path.join(walk_root, name) - rel_path = os.path.relpath(abs_path, root).replace("\\", "/") - stat = os.stat(abs_path) - rows.append( - { - "name": name, - "path": rel_path, - "type": "dir", - "size": None, - "ext": "", - "ctime": _workspace_stat_ctime_iso(stat), - "mtime": datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z", - } - ) - - for name in filenames: - if name in {".DS_Store"}: - continue - abs_path = os.path.join(walk_root, name) - rel_path = os.path.relpath(abs_path, root).replace("\\", "/") - stat = os.stat(abs_path) - rows.append( - { - "name": name, - "path": rel_path, - "type": "file", - "size": stat.st_size, - "ext": os.path.splitext(name)[1].lower(), - "ctime": _workspace_stat_ctime_iso(stat), - "mtime": datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z", - } - ) - - rows.sort(key=lambda v: (v.get("type") != "dir", str(v.get("path", "")).lower())) - return rows - - -@app.get("/api/images", response_model=List[NanobotImage]) -def list_images(session: Session = Depends(get_session)): - cached = cache.get_json(_cache_key_images()) - if isinstance(cached, list) and all(isinstance(row, dict) for row in cached): - return cached - if isinstance(cached, list): - _invalidate_images_cache() - reconcile_image_registry(session) - rows = session.exec(select(NanobotImage)).all() - payload = [row.model_dump() for row in rows] - cache.set_json(_cache_key_images(), payload, ttl=60) - return payload - - -@app.delete("/api/images/{tag:path}") -def delete_image(tag: str, session: Session = Depends(get_session)): - image = session.get(NanobotImage, tag) - if not image: - raise HTTPException(status_code=404, detail="Image not found") - - # 检查是否有机器人正在使用此镜像 - bots_using = session.exec(select(BotInstance).where(BotInstance.image_tag == tag)).all() - if bots_using: - raise HTTPException(status_code=400, detail=f"Cannot delete image: {len(bots_using)} bots are using it.") - - session.delete(image) - session.commit() - _invalidate_images_cache() - return {"status": "deleted"} - - -@app.get("/api/docker-images") -def list_docker_images(repository: str = "nanobot-base"): - rows = docker_manager.list_images_by_repo(repository) - return rows - - -@app.post("/api/images/register") -def register_image(payload: dict, session: Session = Depends(get_session)): - tag = (payload.get("tag") or "").strip() - source_dir = (payload.get("source_dir") or "manual").strip() or "manual" - if not tag: - raise HTTPException(status_code=400, detail="tag is required") - - if not docker_manager.has_image(tag): - raise HTTPException(status_code=404, detail=f"Docker image not found: {tag}") - - version = tag.split(":")[-1].removeprefix("v") if ":" in tag else tag - try: - docker_img = docker_manager.client.images.get(tag) if docker_manager.client else None - image_id = docker_img.id if docker_img else None - except Exception: - image_id = None - - row = session.get(NanobotImage, tag) - if not row: - row = NanobotImage( - tag=tag, - version=version, - status="READY", - source_dir=source_dir, - image_id=image_id, - ) - else: - row.version = version - row.status = "READY" - row.source_dir = source_dir - row.image_id = image_id - session.add(row) - session.commit() - session.refresh(row) - _invalidate_images_cache() - return row - - -@app.post("/api/providers/test") -async def test_provider(payload: dict): - provider = (payload.get("provider") or "").strip() - api_key = (payload.get("api_key") or "").strip() - model = (payload.get("model") or "").strip() - api_base = (payload.get("api_base") or "").strip() - - if not provider or not api_key: - raise HTTPException(status_code=400, detail="provider and api_key are required") - - normalized_provider, default_base = _provider_defaults(provider) - base = (api_base or default_base).rstrip("/") - - if normalized_provider not in {"openrouter", "dashscope", "kimi", "minimax", "openai", "deepseek"}: - raise HTTPException(status_code=400, detail=f"provider not supported for test: {provider}") - - if not base: - raise HTTPException(status_code=400, detail=f"api_base is required for provider: {provider}") - - headers = {"Authorization": f"Bearer {api_key}"} - timeout = httpx.Timeout(20.0, connect=10.0) - url = f"{base}/models" - - try: - async with httpx.AsyncClient(timeout=timeout) as client: - resp = await client.get(url, headers=headers) - - if resp.status_code >= 400: - return { - "ok": False, - "provider": normalized_provider, - "status_code": resp.status_code, - "detail": resp.text[:500], - } - - data = resp.json() - models_raw = data.get("data", []) if isinstance(data, dict) else [] - model_ids: List[str] = [] - for item in models_raw[:20]: - if isinstance(item, dict) and item.get("id"): - model_ids.append(str(item["id"])) - - model_hint = "" - if model: - model_hint = "model_found" if any(model in m for m in model_ids) else "model_not_listed" - - return { - "ok": True, - "provider": normalized_provider, - "endpoint": url, - "models_preview": model_ids[:8], - "model_hint": model_hint, - } - except Exception as e: - return { - "ok": False, - "provider": normalized_provider, - "endpoint": url, - "detail": str(e), - } - - -@app.post("/api/bots") -def create_bot(payload: BotCreateRequest, session: Session = Depends(get_session)): - normalized_bot_id = str(payload.id or "").strip() - if not normalized_bot_id: - raise HTTPException(status_code=400, detail="Bot ID is required") - if not BOT_ID_PATTERN.fullmatch(normalized_bot_id): - raise HTTPException(status_code=400, detail="Bot ID can only contain letters, numbers, and underscores") - if session.get(BotInstance, normalized_bot_id): - raise HTTPException(status_code=409, detail=f"Bot ID already exists: {normalized_bot_id}") - - image_row = session.get(NanobotImage, payload.image_tag) - if not image_row: - raise HTTPException(status_code=400, detail=f"Image not registered in DB: {payload.image_tag}") - if image_row.status != "READY": - raise HTTPException(status_code=400, detail=f"Image status is not READY: {payload.image_tag} ({image_row.status})") - if not docker_manager.has_image(payload.image_tag): - raise HTTPException(status_code=400, detail=f"Docker image not found locally: {payload.image_tag}") - - normalized_env_params = _normalize_env_params(payload.env_params) - try: - normalized_env_params["TZ"] = _normalize_system_timezone(payload.system_timezone) - except ValueError as exc: - raise HTTPException(status_code=400, detail=str(exc)) from exc - - bot = BotInstance( - id=normalized_bot_id, - name=payload.name, - enabled=bool(payload.enabled) if payload.enabled is not None else True, - access_password=str(payload.access_password or ""), - image_tag=payload.image_tag, - workspace_dir=os.path.join(BOTS_WORKSPACE_ROOT, normalized_bot_id), - ) - - session.add(bot) - session.commit() - session.refresh(bot) - resource_limits = _normalize_resource_limits(payload.cpu_cores, payload.memory_mb, payload.storage_gb) - _write_env_store(normalized_bot_id, normalized_env_params) - _sync_workspace_channels( - session, - normalized_bot_id, - channels_override=_normalize_initial_channels(normalized_bot_id, payload.channels), - global_delivery_override={ - "sendProgress": bool(payload.send_progress) if payload.send_progress is not None else False, - "sendToolHints": bool(payload.send_tool_hints) if payload.send_tool_hints is not None else False, - }, - runtime_overrides={ - "llm_provider": payload.llm_provider, - "llm_model": payload.llm_model, - "api_key": payload.api_key, - "api_base": payload.api_base or "", - "temperature": payload.temperature, - "top_p": payload.top_p, - "max_tokens": payload.max_tokens, - "cpu_cores": resource_limits["cpu_cores"], - "memory_mb": resource_limits["memory_mb"], - "storage_gb": resource_limits["storage_gb"], - "system_prompt": payload.system_prompt or payload.soul_md or DEFAULT_SOUL_MD, - "soul_md": payload.soul_md or payload.system_prompt or DEFAULT_SOUL_MD, - "agents_md": payload.agents_md or DEFAULT_AGENTS_MD, - "user_md": payload.user_md or DEFAULT_USER_MD, - "tools_md": payload.tools_md or DEFAULT_TOOLS_MD, - "identity_md": payload.identity_md or DEFAULT_IDENTITY_MD, - "send_progress": bool(payload.send_progress) if payload.send_progress is not None else False, - "send_tool_hints": bool(payload.send_tool_hints) if payload.send_tool_hints is not None else False, - }, - ) - session.refresh(bot) - record_activity_event( - session, - normalized_bot_id, - "bot_created", - channel="system", - detail=f"Bot {normalized_bot_id} created", - metadata={"image_tag": payload.image_tag}, - ) - session.commit() - _invalidate_bot_detail_cache(normalized_bot_id) - return _serialize_bot(bot) - - -@app.get("/api/bots") -def list_bots(session: Session = Depends(get_session)): - cached = cache.get_json(_cache_key_bots_list()) - if isinstance(cached, list): - return cached - bots = session.exec(select(BotInstance)).all() - dirty = False - for bot in bots: - actual_status = docker_manager.get_bot_status(bot.id) - if bot.docker_status != actual_status: - bot.docker_status = actual_status - if actual_status != "RUNNING" and str(bot.current_state or "").upper() not in {"ERROR"}: - bot.current_state = "IDLE" - session.add(bot) - dirty = True - if dirty: - session.commit() - for bot in bots: - session.refresh(bot) - rows = [_serialize_bot_list_item(bot) for bot in bots] - cache.set_json(_cache_key_bots_list(), rows, ttl=30) - return rows - - -@app.get("/api/bots/{bot_id}") -def get_bot_detail(bot_id: str, session: Session = Depends(get_session)): - cached = cache.get_json(_cache_key_bot_detail(bot_id)) - if isinstance(cached, dict): - return cached - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - row = _serialize_bot(bot) - cache.set_json(_cache_key_bot_detail(bot_id), row, ttl=30) - return row - - -@app.post("/api/bots/{bot_id}/auth/login") -def login_bot_page(bot_id: str, payload: BotPageAuthLoginRequest, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - - configured = str(bot.access_password or "").strip() - if not configured: - return {"ok": True, "enabled": False, "bot_id": bot_id} - - candidate = str(payload.password or "").strip() - if not candidate: - raise HTTPException(status_code=401, detail="Bot access password required") - if candidate != configured: - raise HTTPException(status_code=401, detail="Invalid bot access password") - return {"ok": True, "enabled": True, "bot_id": bot_id} - - -@app.get("/api/bots/{bot_id}/resources") -def get_bot_resources(bot_id: str, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - - configured = _read_bot_resources(bot_id) - runtime = docker_manager.get_bot_resource_snapshot(bot_id) - workspace_root = _workspace_root(bot_id) - workspace_bytes = _calc_dir_size_bytes(workspace_root) - configured_storage_bytes = int(configured.get("storage_gb", 0) or 0) * 1024 * 1024 * 1024 - workspace_percent = 0.0 - if configured_storage_bytes > 0: - workspace_percent = (workspace_bytes / configured_storage_bytes) * 100.0 - - limits = runtime.get("limits") or {} - cpu_limited = (limits.get("cpu_cores") or 0) > 0 - memory_limited = (limits.get("memory_bytes") or 0) > 0 - storage_limited = bool(limits.get("storage_bytes")) or bool(limits.get("storage_opt_raw")) - - return { - "bot_id": bot_id, - "docker_status": runtime.get("docker_status") or bot.docker_status, - "configured": configured, - "runtime": runtime, - "workspace": { - "path": workspace_root, - "usage_bytes": workspace_bytes, - "configured_limit_bytes": configured_storage_bytes if configured_storage_bytes > 0 else None, - "usage_percent": max(0.0, workspace_percent), - }, - "enforcement": { - "cpu_limited": cpu_limited, - "memory_limited": memory_limited, - "storage_limited": storage_limited, - }, - "note": ( - "Resource value 0 means unlimited. CPU/Memory limits come from Docker HostConfig and are enforced by cgroup. " - "Storage limit depends on Docker storage driver support." - ), - "collected_at": datetime.utcnow().isoformat() + "Z", - } - - -@app.put("/api/bots/{bot_id}") -def update_bot(bot_id: str, payload: BotUpdateRequest, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - - update_data = payload.model_dump(exclude_unset=True) - - if "image_tag" in update_data and update_data["image_tag"]: - image_tag = str(update_data["image_tag"]).strip() - image_row = session.get(NanobotImage, image_tag) - if not image_row: - raise HTTPException(status_code=400, detail=f"Image not registered in DB: {image_tag}") - if image_row.status != "READY": - raise HTTPException(status_code=400, detail=f"Image status is not READY: {image_tag} ({image_row.status})") - if not docker_manager.has_image(image_tag): - raise HTTPException(status_code=400, detail=f"Docker image not found locally: {image_tag}") - - env_params = update_data.pop("env_params", None) if isinstance(update_data, dict) else None - system_timezone = update_data.pop("system_timezone", None) if isinstance(update_data, dict) else None - normalized_system_timezone: Optional[str] = None - if system_timezone is not None: - try: - normalized_system_timezone = _normalize_system_timezone(system_timezone) - except ValueError as exc: - raise HTTPException(status_code=400, detail=str(exc)) from exc - runtime_overrides: Dict[str, Any] = {} - update_data.pop("tools_config", None) if isinstance(update_data, dict) else None - - runtime_fields = { - "llm_provider", - "llm_model", - "api_key", - "api_base", - "temperature", - "top_p", - "max_tokens", - "cpu_cores", - "memory_mb", - "storage_gb", - "soul_md", - "agents_md", - "user_md", - "tools_md", - "identity_md", - "send_progress", - "send_tool_hints", - "system_prompt", - } - for field in runtime_fields: - if field in update_data: - runtime_overrides[field] = update_data.pop(field) - - # Never allow empty placeholders to overwrite existing runtime model settings. - for text_field in ("llm_provider", "llm_model", "api_key"): - if text_field in runtime_overrides: - text = str(runtime_overrides.get(text_field) or "").strip() - if not text: - runtime_overrides.pop(text_field, None) - else: - runtime_overrides[text_field] = text - if "api_base" in runtime_overrides: - runtime_overrides["api_base"] = str(runtime_overrides.get("api_base") or "").strip() - - if "system_prompt" in runtime_overrides and "soul_md" not in runtime_overrides: - runtime_overrides["soul_md"] = runtime_overrides["system_prompt"] - if "soul_md" in runtime_overrides and "system_prompt" not in runtime_overrides: - runtime_overrides["system_prompt"] = runtime_overrides["soul_md"] - if {"cpu_cores", "memory_mb", "storage_gb"} & set(runtime_overrides.keys()): - normalized_resources = _normalize_resource_limits( - runtime_overrides.get("cpu_cores"), - runtime_overrides.get("memory_mb"), - runtime_overrides.get("storage_gb"), - ) - runtime_overrides.update(normalized_resources) - - db_fields = {"name", "image_tag", "access_password", "enabled"} - for key, value in update_data.items(): - if key in db_fields: - setattr(bot, key, value) - - session.add(bot) - session.commit() - session.refresh(bot) - if env_params is not None or normalized_system_timezone is not None: - next_env_params = _resolve_bot_env_params(bot_id) - if env_params is not None: - next_env_params = _normalize_env_params(env_params) - if normalized_system_timezone is not None: - next_env_params["TZ"] = normalized_system_timezone - _write_env_store(bot_id, next_env_params) - global_delivery_override: Optional[Dict[str, Any]] = None - if "send_progress" in runtime_overrides or "send_tool_hints" in runtime_overrides: - global_delivery_override = {} - if "send_progress" in runtime_overrides: - global_delivery_override["sendProgress"] = bool(runtime_overrides.get("send_progress")) - if "send_tool_hints" in runtime_overrides: - global_delivery_override["sendToolHints"] = bool(runtime_overrides.get("send_tool_hints")) - - _sync_workspace_channels( - session, - bot_id, - runtime_overrides=runtime_overrides if runtime_overrides else None, - global_delivery_override=global_delivery_override, - ) - session.refresh(bot) - _invalidate_bot_detail_cache(bot_id) - return _serialize_bot(bot) - - -@app.post("/api/bots/{bot_id}/start") -async def start_bot(bot_id: str, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - if not bool(getattr(bot, "enabled", True)): - raise HTTPException(status_code=403, detail="Bot is disabled. Enable it first.") - _sync_workspace_channels(session, bot_id) - runtime_snapshot = _read_bot_runtime_snapshot(bot) - env_params = _resolve_bot_env_params(bot_id) - _write_env_store(bot_id, env_params) - success = docker_manager.start_bot( - bot_id, - image_tag=bot.image_tag, - on_state_change=docker_callback, - env_vars=env_params, - cpu_cores=_safe_float(runtime_snapshot.get("cpu_cores"), 1.0), - memory_mb=_safe_int(runtime_snapshot.get("memory_mb"), 1024), - storage_gb=_safe_int(runtime_snapshot.get("storage_gb"), 10), - ) - if not success: - bot.docker_status = "STOPPED" - session.add(bot) - session.commit() - raise HTTPException(status_code=500, detail=f"Failed to start container with image {bot.image_tag}") - - actual_status = docker_manager.get_bot_status(bot_id) - bot.docker_status = actual_status - if actual_status != "RUNNING": - session.add(bot) - session.commit() - _invalidate_bot_detail_cache(bot_id) - raise HTTPException( - status_code=500, - detail="Bot container failed shortly after startup. Check bot logs/config.", - ) - asyncio.create_task(_record_agent_loop_ready_warning(bot_id)) - session.add(bot) - record_activity_event(session, bot_id, "bot_started", channel="system", detail=f"Container started for {bot_id}") - session.commit() - _invalidate_bot_detail_cache(bot_id) - return {"status": "started"} - - -@app.post("/api/bots/{bot_id}/stop") -def stop_bot(bot_id: str, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - if not bool(getattr(bot, "enabled", True)): - raise HTTPException(status_code=403, detail="Bot is disabled. Enable it first.") - - docker_manager.stop_bot(bot_id) - bot.docker_status = "STOPPED" - session.add(bot) - record_activity_event(session, bot_id, "bot_stopped", channel="system", detail=f"Container stopped for {bot_id}") - session.commit() - _invalidate_bot_detail_cache(bot_id) - return {"status": "stopped"} - - -@app.post("/api/bots/{bot_id}/enable") -def enable_bot(bot_id: str, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - - bot.enabled = True - session.add(bot) - record_activity_event(session, bot_id, "bot_enabled", channel="system", detail=f"Bot {bot_id} enabled") - session.commit() - _invalidate_bot_detail_cache(bot_id) - return {"status": "enabled", "enabled": True} - - -@app.post("/api/bots/{bot_id}/disable") -def disable_bot(bot_id: str, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - - docker_manager.stop_bot(bot_id) - bot.enabled = False - bot.docker_status = "STOPPED" - if str(bot.current_state or "").upper() not in {"ERROR"}: - bot.current_state = "IDLE" - session.add(bot) - record_activity_event(session, bot_id, "bot_disabled", channel="system", detail=f"Bot {bot_id} disabled") - session.commit() - _invalidate_bot_detail_cache(bot_id) - return {"status": "disabled", "enabled": False} - - -@app.post("/api/bots/{bot_id}/deactivate") -def deactivate_bot(bot_id: str, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - - docker_manager.stop_bot(bot_id) - bot.enabled = False - bot.docker_status = "STOPPED" - if str(bot.current_state or "").upper() not in {"ERROR"}: - bot.current_state = "IDLE" - session.add(bot) - record_activity_event(session, bot_id, "bot_deactivated", channel="system", detail=f"Bot {bot_id} deactivated") - session.commit() - _invalidate_bot_detail_cache(bot_id) - return {"status": "deactivated"} - - -@app.delete("/api/bots/{bot_id}") -def delete_bot(bot_id: str, delete_workspace: bool = True, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - - docker_manager.stop_bot(bot_id) - - messages = session.exec(select(BotMessage).where(BotMessage.bot_id == bot_id)).all() - for row in messages: - session.delete(row) - topic_items = session.exec(select(TopicItem).where(TopicItem.bot_id == bot_id)).all() - for row in topic_items: - session.delete(row) - topics = session.exec(select(TopicTopic).where(TopicTopic.bot_id == bot_id)).all() - for row in topics: - session.delete(row) - usage_rows = session.exec(select(BotRequestUsage).where(BotRequestUsage.bot_id == bot_id)).all() - for row in usage_rows: - session.delete(row) - activity_rows = session.exec(select(BotActivityEvent).where(BotActivityEvent.bot_id == bot_id)).all() - for row in activity_rows: - session.delete(row) - skill_install_rows = session.exec(select(BotSkillInstall).where(BotSkillInstall.bot_id == bot_id)).all() - for row in skill_install_rows: - session.delete(row) - - session.delete(bot) - session.commit() - - if delete_workspace: - workspace_root = os.path.join(BOTS_WORKSPACE_ROOT, bot_id) - if os.path.isdir(workspace_root): - shutil.rmtree(workspace_root, ignore_errors=True) - - _invalidate_bot_detail_cache(bot_id) - _invalidate_bot_messages_cache(bot_id) - return {"status": "deleted", "workspace_deleted": bool(delete_workspace)} - - -@app.get("/api/bots/{bot_id}/channels") -def list_bot_channels(bot_id: str, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - return _get_bot_channels_from_config(bot) - - -@app.get("/api/platform/skills") -def list_skill_market(session: Session = Depends(get_session)): - items = session.exec(select(SkillMarketItem).order_by(SkillMarketItem.display_name, SkillMarketItem.id)).all() - installs = session.exec(select(BotSkillInstall)).all() - install_count_by_skill: Dict[int, int] = {} - for row in installs: - skill_id = int(row.skill_market_item_id or 0) - if skill_id <= 0 or row.status != "INSTALLED": - continue - install_count_by_skill[skill_id] = install_count_by_skill.get(skill_id, 0) + 1 - return [ - _serialize_skill_market_item(item, install_count=install_count_by_skill.get(int(item.id or 0), 0)) - for item in items - ] - - -@app.post("/api/platform/skills") -async def create_skill_market_item( - skill_key: str = Form(""), - display_name: str = Form(""), - description: str = Form(""), - file: UploadFile = File(...), - session: Session = Depends(get_session), -): - upload_meta = await _store_skill_market_zip_upload(session, file) - try: - preferred_key = skill_key or display_name or os.path.splitext(upload_meta["zip_filename"])[0] - next_key = _resolve_unique_skill_market_key(session, preferred_key) - item = SkillMarketItem( - skill_key=next_key, - display_name=str(display_name or next_key).strip() or next_key, - description=str(description or upload_meta["description"] or "").strip(), - zip_filename=upload_meta["zip_filename"], - zip_size_bytes=int(upload_meta["zip_size_bytes"] or 0), - entry_names_json=json.dumps(upload_meta["entry_names"], ensure_ascii=False), - ) - session.add(item) - session.commit() - session.refresh(item) - return _serialize_skill_market_item(item, install_count=0) - except Exception: - target_path = os.path.join(_skill_market_root(), upload_meta["zip_filename"]) - if os.path.exists(target_path): - os.remove(target_path) - raise - - -@app.put("/api/platform/skills/{skill_id}") -async def update_skill_market_item( - skill_id: int, - skill_key: str = Form(""), - display_name: str = Form(""), - description: str = Form(""), - file: Optional[UploadFile] = File(None), - session: Session = Depends(get_session), -): - item = session.get(SkillMarketItem, skill_id) - if not item: - raise HTTPException(status_code=404, detail="Skill market item not found") - - old_filename = str(item.zip_filename or "").strip() - upload_meta: Optional[Dict[str, Any]] = None - if file is not None: - upload_meta = await _store_skill_market_zip_upload( - session, - file, - exclude_filename=old_filename or None, - exclude_id=item.id, - ) - - next_key = _resolve_unique_skill_market_key( - session, - skill_key or item.skill_key or display_name or os.path.splitext(upload_meta["zip_filename"] if upload_meta else old_filename)[0], - exclude_id=item.id, - ) - item.skill_key = next_key - item.display_name = str(display_name or item.display_name or next_key).strip() or next_key - item.description = str(description or (upload_meta["description"] if upload_meta else item.description) or "").strip() - item.updated_at = datetime.utcnow() - if upload_meta: - item.zip_filename = upload_meta["zip_filename"] - item.zip_size_bytes = int(upload_meta["zip_size_bytes"] or 0) - item.entry_names_json = json.dumps(upload_meta["entry_names"], ensure_ascii=False) - session.add(item) - session.commit() - session.refresh(item) - - if upload_meta and old_filename and old_filename != upload_meta["zip_filename"]: - old_path = os.path.join(_skill_market_root(), old_filename) - if os.path.exists(old_path): - os.remove(old_path) - - installs = session.exec(select(BotSkillInstall).where(BotSkillInstall.skill_market_item_id == skill_id)).all() - install_count = sum(1 for row in installs if row.status == "INSTALLED") - return _serialize_skill_market_item(item, install_count=install_count) - - -@app.delete("/api/platform/skills/{skill_id}") -def delete_skill_market_item(skill_id: int, session: Session = Depends(get_session)): - item = session.get(SkillMarketItem, skill_id) - if not item: - raise HTTPException(status_code=404, detail="Skill market item not found") - zip_filename = str(item.zip_filename or "").strip() - installs = session.exec(select(BotSkillInstall).where(BotSkillInstall.skill_market_item_id == skill_id)).all() - for row in installs: - session.delete(row) - session.delete(item) - session.commit() - if zip_filename: - zip_path = os.path.join(_skill_market_root(), zip_filename) - if os.path.exists(zip_path): - os.remove(zip_path) - return {"status": "deleted", "id": skill_id} - - -@app.get("/api/bots/{bot_id}/skills") -def list_bot_skills(bot_id: str, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - return _list_workspace_skills(bot_id) - - -@app.get("/api/bots/{bot_id}/skill-market") -def list_bot_skill_market(bot_id: str, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - items = session.exec(select(SkillMarketItem).order_by(SkillMarketItem.display_name, SkillMarketItem.id)).all() - install_rows = session.exec(select(BotSkillInstall).where(BotSkillInstall.bot_id == bot_id)).all() - install_lookup = {int(row.skill_market_item_id): row for row in install_rows} - all_install_rows = session.exec(select(BotSkillInstall)).all() - install_count_by_skill: Dict[int, int] = {} - for row in all_install_rows: - skill_id = int(row.skill_market_item_id or 0) - if skill_id <= 0 or row.status != "INSTALLED": - continue - install_count_by_skill[skill_id] = install_count_by_skill.get(skill_id, 0) + 1 - return [ - _serialize_skill_market_item( - item, - install_count=install_count_by_skill.get(int(item.id or 0), 0), - install_row=install_lookup.get(int(item.id or 0)), - workspace_installed=( - None - if install_lookup.get(int(item.id or 0)) is None - else ( - install_lookup[int(item.id or 0)].status == "INSTALLED" - and all( - os.path.exists(os.path.join(_skills_root(bot_id), name)) - for name in _parse_json_string_list(install_lookup[int(item.id or 0)].installed_entries_json) - ) - ) - ), - installed_entries=( - None - if install_lookup.get(int(item.id or 0)) is None - else _parse_json_string_list(install_lookup[int(item.id or 0)].installed_entries_json) - ), - ) - for item in items - ] - - -@app.post("/api/bots/{bot_id}/skill-market/{skill_id}/install") -def install_bot_skill_from_market(bot_id: str, skill_id: int, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - item = session.get(SkillMarketItem, skill_id) - if not item: - raise HTTPException(status_code=404, detail="Skill market item not found") - - zip_path = os.path.join(_skill_market_root(), str(item.zip_filename or "")) - if not os.path.isfile(zip_path): - raise HTTPException(status_code=404, detail="Skill zip package not found") - - install_row = session.exec( - select(BotSkillInstall).where( - BotSkillInstall.bot_id == bot_id, - BotSkillInstall.skill_market_item_id == skill_id, - ) - ).first() - - try: - install_result = _install_skill_zip_into_workspace(bot_id, zip_path) - now = datetime.utcnow() - if not install_row: - install_row = BotSkillInstall( - bot_id=bot_id, - skill_market_item_id=skill_id, - ) - install_row.installed_entries_json = json.dumps(install_result["installed"], ensure_ascii=False) - install_row.source_zip_filename = str(item.zip_filename or "") - install_row.status = "INSTALLED" - install_row.last_error = None - install_row.installed_at = now - install_row.updated_at = now - session.add(install_row) - session.commit() - session.refresh(install_row) - return { - "status": "installed", - "bot_id": bot_id, - "skill_market_item_id": skill_id, - "installed": install_result["installed"], - "skills": install_result["skills"], - "market_item": _serialize_skill_market_item(item, install_count=0, install_row=install_row), - } - except HTTPException as exc: - now = datetime.utcnow() - if not install_row: - install_row = BotSkillInstall( - bot_id=bot_id, - skill_market_item_id=skill_id, - installed_at=now, - ) - install_row.source_zip_filename = str(item.zip_filename or "") - install_row.status = "FAILED" - install_row.last_error = str(exc.detail or "Install failed") - install_row.updated_at = now - session.add(install_row) - session.commit() - raise - except Exception as exc: - now = datetime.utcnow() - if not install_row: - install_row = BotSkillInstall( - bot_id=bot_id, - skill_market_item_id=skill_id, - installed_at=now, - ) - install_row.source_zip_filename = str(item.zip_filename or "") - install_row.status = "FAILED" - install_row.last_error = str(exc or "Install failed")[:1000] - install_row.updated_at = now - session.add(install_row) - session.commit() - raise HTTPException(status_code=500, detail="Skill install failed unexpectedly") from exc - - -@app.get("/api/bots/{bot_id}/tools-config") -def get_bot_tools_config(bot_id: str, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - return { - "bot_id": bot_id, - "tools_config": {}, - "managed_by_dashboard": False, - "hint": "Tools config is disabled in dashboard. Configure tool-related env vars manually.", - } - - -@app.put("/api/bots/{bot_id}/tools-config") -def update_bot_tools_config(bot_id: str, payload: BotToolsConfigUpdateRequest, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - raise HTTPException( - status_code=400, - detail="Tools config is no longer managed by dashboard. Please set required env vars manually.", - ) - - -@app.get("/api/bots/{bot_id}/mcp-config") -def get_bot_mcp_config(bot_id: str, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - config_data = _read_bot_config(bot_id) - tools_cfg = config_data.get("tools") if isinstance(config_data, dict) else {} - if not isinstance(tools_cfg, dict): - tools_cfg = {} - mcp_servers = _normalize_mcp_servers(tools_cfg.get("mcpServers")) - return { - "bot_id": bot_id, - "mcp_servers": mcp_servers, - "locked_servers": [], - "restart_required": True, - } - - -@app.put("/api/bots/{bot_id}/mcp-config") -def update_bot_mcp_config(bot_id: str, payload: BotMcpConfigUpdateRequest, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - config_data = _read_bot_config(bot_id) - if not isinstance(config_data, dict): - config_data = {} - tools_cfg = config_data.get("tools") - if not isinstance(tools_cfg, dict): - tools_cfg = {} - normalized_mcp_servers = _normalize_mcp_servers(payload.mcp_servers or {}) - current_mcp_servers = tools_cfg.get("mcpServers") - merged_mcp_servers = _merge_mcp_servers_preserving_extras(current_mcp_servers, normalized_mcp_servers) - tools_cfg["mcpServers"] = merged_mcp_servers - config_data["tools"] = tools_cfg - sanitized_after_save = _sanitize_mcp_servers_in_config_data(config_data) - _write_bot_config(bot_id, config_data) - _invalidate_bot_detail_cache(bot_id) - return { - "status": "updated", - "bot_id": bot_id, - "mcp_servers": _normalize_mcp_servers(sanitized_after_save), - "locked_servers": [], - "restart_required": True, - } - - -@app.get("/api/bots/{bot_id}/env-params") -def get_bot_env_params(bot_id: str, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - return { - "bot_id": bot_id, - "env_params": _read_env_store(bot_id), - } - - -@app.put("/api/bots/{bot_id}/env-params") -def update_bot_env_params(bot_id: str, payload: BotEnvParamsUpdateRequest, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - normalized = _normalize_env_params(payload.env_params) - _write_env_store(bot_id, normalized) - _invalidate_bot_detail_cache(bot_id) - return { - "status": "updated", - "bot_id": bot_id, - "env_params": normalized, - "restart_required": True, - } - - -@app.post("/api/bots/{bot_id}/skills/upload") -async def upload_bot_skill_zip(bot_id: str, file: UploadFile = File(...), session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - - tmp_zip_path: Optional[str] = None - try: - with tempfile.NamedTemporaryFile(prefix=".skill_upload_", suffix=".zip", delete=False) as tmp_zip: - tmp_zip_path = tmp_zip.name - filename = str(file.filename or "").strip() - if not filename.lower().endswith(".zip"): - raise HTTPException(status_code=400, detail="Only .zip skill package is supported") - max_bytes = get_platform_settings_snapshot().upload_max_mb * 1024 * 1024 - total_size = 0 - while True: - chunk = await file.read(1024 * 1024) - if not chunk: - break - total_size += len(chunk) - if total_size > max_bytes: - raise HTTPException( - status_code=413, - detail=f"Zip package too large (max {max_bytes // (1024 * 1024)}MB)", - ) - tmp_zip.write(chunk) - if total_size == 0: - raise HTTPException(status_code=400, detail="Zip package is empty") - finally: - await file.close() - try: - install_result = _install_skill_zip_into_workspace(bot_id, tmp_zip_path) - finally: - if tmp_zip_path and os.path.exists(tmp_zip_path): - os.remove(tmp_zip_path) - - return { - "status": "installed", - "bot_id": bot_id, - "installed": install_result["installed"], - "skills": install_result["skills"], - } - - -@app.delete("/api/bots/{bot_id}/skills/{skill_name}") -def delete_bot_skill(bot_id: str, skill_name: str, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - name = str(skill_name or "").strip() - if not _is_valid_top_level_skill_name(name): - raise HTTPException(status_code=400, detail="Invalid skill name") - root = _skills_root(bot_id) - target = os.path.abspath(os.path.join(root, name)) - if os.path.commonpath([os.path.abspath(root), target]) != os.path.abspath(root): - raise HTTPException(status_code=400, detail="Invalid skill path") - if not os.path.exists(target): - raise HTTPException(status_code=404, detail="Skill not found in workspace") - if os.path.isdir(target): - shutil.rmtree(target, ignore_errors=False) - else: - os.remove(target) - return {"status": "deleted", "bot_id": bot_id, "skill": name} - - -@app.post("/api/bots/{bot_id}/channels") -def create_bot_channel(bot_id: str, payload: ChannelConfigRequest, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - - ctype = (payload.channel_type or "").strip().lower() - if not ctype: - raise HTTPException(status_code=400, detail="channel_type is required") - if ctype == "dashboard": - raise HTTPException(status_code=400, detail="dashboard channel is built-in and cannot be created manually") - current_rows = _get_bot_channels_from_config(bot) - if any(str(row.get("channel_type") or "").lower() == ctype for row in current_rows): - raise HTTPException(status_code=400, detail=f"Channel already exists: {ctype}") - - new_row = { - "id": ctype, - "bot_id": bot_id, - "channel_type": ctype, - "external_app_id": (payload.external_app_id or "").strip() or f"{ctype}-{bot_id}", - "app_secret": (payload.app_secret or "").strip(), - "internal_port": max(1, min(int(payload.internal_port or 8080), 65535)), - "is_active": bool(payload.is_active), - "extra_config": _normalize_channel_extra(payload.extra_config), - "locked": False, - } - - config_data = _read_bot_config(bot_id) - channels_cfg = config_data.get("channels") - if not isinstance(channels_cfg, dict): - channels_cfg = {} - config_data["channels"] = channels_cfg - channels_cfg[ctype] = _channel_api_to_cfg(new_row) - _write_bot_config(bot_id, config_data) - _sync_workspace_channels(session, bot_id) - _invalidate_bot_detail_cache(bot_id) - return new_row - - -@app.put("/api/bots/{bot_id}/channels/{channel_id}") -def update_bot_channel( - bot_id: str, - channel_id: str, - payload: ChannelConfigUpdateRequest, - session: Session = Depends(get_session), -): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - - channel_key = str(channel_id or "").strip().lower() - rows = _get_bot_channels_from_config(bot) - row = next((r for r in rows if str(r.get("id") or "").lower() == channel_key), None) - if not row: - raise HTTPException(status_code=404, detail="Channel not found") - if str(row.get("channel_type") or "").strip().lower() == "dashboard" or bool(row.get("locked")): - raise HTTPException(status_code=400, detail="dashboard channel is built-in and cannot be modified") - - update_data = payload.model_dump(exclude_unset=True) - existing_type = str(row.get("channel_type") or "").strip().lower() - new_type = existing_type - if "channel_type" in update_data and update_data["channel_type"] is not None: - new_type = str(update_data["channel_type"]).strip().lower() - if not new_type: - raise HTTPException(status_code=400, detail="channel_type cannot be empty") - if existing_type == "dashboard" and new_type != "dashboard": - raise HTTPException(status_code=400, detail="dashboard channel type cannot be changed") - if new_type != existing_type and any(str(r.get("channel_type") or "").lower() == new_type for r in rows): - raise HTTPException(status_code=400, detail=f"Channel already exists: {new_type}") - - if "external_app_id" in update_data and update_data["external_app_id"] is not None: - row["external_app_id"] = str(update_data["external_app_id"]).strip() - if "app_secret" in update_data and update_data["app_secret"] is not None: - row["app_secret"] = str(update_data["app_secret"]).strip() - if "internal_port" in update_data and update_data["internal_port"] is not None: - row["internal_port"] = max(1, min(int(update_data["internal_port"]), 65535)) - if "is_active" in update_data and update_data["is_active"] is not None: - next_active = bool(update_data["is_active"]) - if existing_type == "dashboard" and not next_active: - raise HTTPException(status_code=400, detail="dashboard channel must remain enabled") - row["is_active"] = next_active - if "extra_config" in update_data: - row["extra_config"] = _normalize_channel_extra(update_data.get("extra_config")) - row["channel_type"] = new_type - row["id"] = new_type - row["locked"] = new_type == "dashboard" - - config_data = _read_bot_config(bot_id) - channels_cfg = config_data.get("channels") - if not isinstance(channels_cfg, dict): - channels_cfg = {} - config_data["channels"] = channels_cfg - current_send_progress, current_send_tool_hints = _read_global_delivery_flags(channels_cfg) - if new_type == "dashboard": - extra = _normalize_channel_extra(row.get("extra_config")) - channels_cfg["sendProgress"] = bool(extra.get("sendProgress", current_send_progress)) - channels_cfg["sendToolHints"] = bool(extra.get("sendToolHints", current_send_tool_hints)) - else: - channels_cfg["sendProgress"] = current_send_progress - channels_cfg["sendToolHints"] = current_send_tool_hints - channels_cfg.pop("dashboard", None) - if existing_type != "dashboard" and existing_type in channels_cfg and existing_type != new_type: - channels_cfg.pop(existing_type, None) - if new_type != "dashboard": - channels_cfg[new_type] = _channel_api_to_cfg(row) - _write_bot_config(bot_id, config_data) - session.commit() - _sync_workspace_channels(session, bot_id) - _invalidate_bot_detail_cache(bot_id) - return row - - -@app.delete("/api/bots/{bot_id}/channels/{channel_id}") -def delete_bot_channel(bot_id: str, channel_id: str, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - - channel_key = str(channel_id or "").strip().lower() - rows = _get_bot_channels_from_config(bot) - row = next((r for r in rows if str(r.get("id") or "").lower() == channel_key), None) - if not row: - raise HTTPException(status_code=404, detail="Channel not found") - if str(row.get("channel_type") or "").lower() == "dashboard": - raise HTTPException(status_code=400, detail="dashboard channel cannot be deleted") - - config_data = _read_bot_config(bot_id) - channels_cfg = config_data.get("channels") - if not isinstance(channels_cfg, dict): - channels_cfg = {} - config_data["channels"] = channels_cfg - channels_cfg.pop(str(row.get("channel_type") or "").lower(), None) - _write_bot_config(bot_id, config_data) - session.commit() - _sync_workspace_channels(session, bot_id) - _invalidate_bot_detail_cache(bot_id) - return {"status": "deleted"} - - -@app.post("/api/bots/{bot_id}/command") -def send_command(bot_id: str, payload: CommandRequest, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - runtime_snapshot = _read_bot_runtime_snapshot(bot) - - attachments = _normalize_media_list(payload.attachments, bot_id) - command = str(payload.command or "").strip() - if not command and not attachments: - raise HTTPException(status_code=400, detail="Command or attachments is required") - - checked_attachments: List[str] = [] - for rel in attachments: - _, target = _resolve_workspace_path(bot_id, rel) - if not os.path.isfile(target): - raise HTTPException(status_code=400, detail=f"attachment not found: {rel}") - checked_attachments.append(rel) - delivery_media = [f"/root/.nanobot/workspace/{p.lstrip('/')}" for p in checked_attachments] - - display_command = command if command else "[attachment message]" - delivery_command = display_command - if checked_attachments: - attachment_block = "\n".join(f"- {p}" for p in checked_attachments) - all_visual = all(_is_visual_attachment_path(p) for p in checked_attachments) - if all_visual: - has_video = any(_is_video_attachment_path(p) for p in checked_attachments) - media_label = "图片/视频" if has_video else "图片" - capability_hint = ( - "1) 附件已随请求附带;图片在可用时可直接作为多模态输入理解,视频请按附件路径处理。\n" - if has_video - else "1) 附件中的图片已作为多模态输入提供,优先直接理解并回答。\n" - ) - if command: - delivery_command = ( - f"{command}\n\n" - "[Attached files]\n" - f"{attachment_block}\n\n" - "【附件处理要求】\n" - f"{capability_hint}" - "2) 若当前模型或接口不支持直接理解该附件,请明确说明后再调用工具解析。\n" - "3) 除非用户明确要求,不要先调用工具读取附件文件。\n" - "4) 回复语言必须遵循 USER.md;若未指定,则与用户当前输入语言保持一致。\n" - "5) 仅基于可见内容回答;看不清或无法确认的部分请明确说明,不要猜测。" - ) - else: - delivery_command = ( - "请先处理已附带的附件列表:\n" - f"{attachment_block}\n\n" - f"请直接分析已附带的{media_label}并总结关键信息。\n" - f"{'图片在可用时可直接作为多模态输入理解,视频请按附件路径处理。' if has_video else ''}\n" - "若当前模型或接口不支持直接理解该附件,请明确说明后再调用工具解析。\n" - "回复语言必须遵循 USER.md;若未指定,则与用户当前输入语言保持一致。\n" - "仅基于可见内容回答;看不清或无法确认的部分请明确说明,不要猜测。" - ) - else: - command_has_paths = all(p in command for p in checked_attachments) if command else False - if command and not command_has_paths: - delivery_command = ( - f"{command}\n\n" - "[Attached files]\n" - f"{attachment_block}\n\n" - "Please process the attached file(s) listed above when answering this request.\n" - "Reply language must follow USER.md. If not specified, use the same language as the user input." - ) - elif not command: - delivery_command = ( - "Please process the uploaded file(s) listed below:\n" - f"{attachment_block}\n\n" - "Reply language must follow USER.md. If not specified, use the same language as the user input." - ) - - request_id = create_usage_request( - session, - bot_id, - display_command, - attachments=checked_attachments, - channel="dashboard", - metadata={"attachment_count": len(checked_attachments)}, - provider=str(runtime_snapshot.get("llm_provider") or "").strip() or None, - model=str(runtime_snapshot.get("llm_model") or "").strip() or None, - ) - record_activity_event( - session, - bot_id, - "command_submitted", - request_id=request_id, - channel="dashboard", - detail="command submitted", - metadata={ - "attachment_count": len(checked_attachments), - "has_text": bool(command), - }, - ) - session.commit() - - outbound_user_packet: Optional[Dict[str, Any]] = None - if display_command or checked_attachments: - outbound_user_packet = { - "type": "USER_COMMAND", - "channel": "dashboard", - "text": display_command, - "media": checked_attachments, - "request_id": request_id, - } - _persist_runtime_packet(bot_id, outbound_user_packet) - - loop = getattr(app.state, "main_loop", None) - if loop and loop.is_running() and outbound_user_packet: - asyncio.run_coroutine_threadsafe( - manager.broadcast(bot_id, outbound_user_packet), - loop, - ) - - success = docker_manager.send_command(bot_id, delivery_command, media=delivery_media) - if not success: - detail = docker_manager.get_last_delivery_error(bot_id) - fail_latest_usage(session, bot_id, detail or "command delivery failed") - record_activity_event( - session, - bot_id, - "command_failed", - request_id=request_id, - channel="dashboard", - detail=(detail or "command delivery failed")[:400], - ) - session.commit() - if loop and loop.is_running(): - asyncio.run_coroutine_threadsafe( - manager.broadcast( - bot_id, - { - "type": "AGENT_STATE", - "channel": "dashboard", - "payload": { - "state": "ERROR", - "action_msg": detail or "command delivery failed", - }, - }, - ), - loop, - ) - raise HTTPException( - status_code=502, - detail=f"Failed to deliver command to bot dashboard channel{': ' + detail if detail else ''}", - ) - return {"success": True} - - -@app.get("/api/bots/{bot_id}/messages") -def list_bot_messages(bot_id: str, limit: int = 200, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - - safe_limit = max(1, min(int(limit), 500)) - cached = cache.get_json(_cache_key_bot_messages(bot_id, safe_limit)) - if isinstance(cached, list): - return cached - rows = session.exec( - select(BotMessage) - .where(BotMessage.bot_id == bot_id) - .order_by(BotMessage.created_at.desc(), BotMessage.id.desc()) - .limit(safe_limit) - ).all() - ordered = list(reversed(rows)) - payload = [_serialize_bot_message_row(bot_id, row) for row in ordered] - cache.set_json(_cache_key_bot_messages(bot_id, safe_limit), payload, ttl=30) - return payload - - -@app.get("/api/bots/{bot_id}/messages/page") -def list_bot_messages_page( - bot_id: str, - limit: Optional[int] = None, - before_id: Optional[int] = None, - session: Session = Depends(get_session), -): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - - configured_limit = get_chat_pull_page_size() - safe_limit = max(1, min(int(limit if limit is not None else configured_limit), 500)) - safe_before_id = int(before_id) if isinstance(before_id, int) and before_id > 0 else None - cache_key = _cache_key_bot_messages_page(bot_id, safe_limit, safe_before_id) - cached = cache.get_json(cache_key) - if isinstance(cached, dict) and isinstance(cached.get("items"), list): - return cached - - stmt = ( - select(BotMessage) - .where(BotMessage.bot_id == bot_id) - .order_by(BotMessage.created_at.desc(), BotMessage.id.desc()) - .limit(safe_limit + 1) - ) - if safe_before_id is not None: - stmt = stmt.where(BotMessage.id < safe_before_id) - - rows = session.exec(stmt).all() - has_more = len(rows) > safe_limit - if has_more: - rows = rows[:safe_limit] - ordered = list(reversed(rows)) - items = [_serialize_bot_message_row(bot_id, row) for row in ordered] - next_before_id = rows[-1].id if rows else None - payload = { - "items": items, - "has_more": bool(has_more), - "next_before_id": next_before_id, - "limit": safe_limit, - } - cache.set_json(cache_key, payload, ttl=30) - return payload - - -@app.get("/api/bots/{bot_id}/messages/by-date") -def list_bot_messages_by_date( - bot_id: str, - date: str, - tz_offset_minutes: Optional[int] = None, - limit: Optional[int] = None, - session: Session = Depends(get_session), -): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - - utc_start, utc_end = _resolve_local_day_range(date, tz_offset_minutes) - configured_limit = max(60, get_chat_pull_page_size()) - safe_limit = max(12, min(int(limit if limit is not None else configured_limit), 240)) - before_limit = max(3, min(18, safe_limit // 4)) - after_limit = max(0, safe_limit - before_limit - 1) - - exact_anchor = session.exec( - select(BotMessage) - .where( - BotMessage.bot_id == bot_id, - BotMessage.created_at >= utc_start, - BotMessage.created_at < utc_end, - ) - .order_by(BotMessage.created_at.asc(), BotMessage.id.asc()) - .limit(1) - ).first() - - anchor = exact_anchor - matched_exact_date = exact_anchor is not None - if anchor is None: - next_row = session.exec( - select(BotMessage) - .where(BotMessage.bot_id == bot_id, BotMessage.created_at >= utc_end) - .order_by(BotMessage.created_at.asc(), BotMessage.id.asc()) - .limit(1) - ).first() - prev_row = session.exec( - select(BotMessage) - .where(BotMessage.bot_id == bot_id, BotMessage.created_at < utc_start) - .order_by(BotMessage.created_at.desc(), BotMessage.id.desc()) - .limit(1) - ).first() - - if next_row and prev_row: - gap_after = next_row.created_at - utc_end - gap_before = utc_start - prev_row.created_at - anchor = next_row if gap_after <= gap_before else prev_row - else: - anchor = next_row or prev_row - - if anchor is None or anchor.id is None: - return { - "items": [], - "anchor_id": None, - "resolved_ts": None, - "matched_exact_date": False, - "has_more_before": False, - "has_more_after": False, - } - - before_rows = session.exec( - select(BotMessage) - .where(BotMessage.bot_id == bot_id, BotMessage.id < anchor.id) - .order_by(BotMessage.created_at.desc(), BotMessage.id.desc()) - .limit(before_limit) - ).all() - after_rows = session.exec( - select(BotMessage) - .where(BotMessage.bot_id == bot_id, BotMessage.id > anchor.id) - .order_by(BotMessage.created_at.asc(), BotMessage.id.asc()) - .limit(after_limit) - ).all() - - ordered = list(reversed(before_rows)) + [anchor] + after_rows - first_row = ordered[0] if ordered else None - last_row = ordered[-1] if ordered else None - - has_more_before = False - if first_row is not None and first_row.id is not None: - has_more_before = session.exec( - select(BotMessage.id) - .where(BotMessage.bot_id == bot_id, BotMessage.id < first_row.id) - .order_by(BotMessage.id.desc()) - .limit(1) - ).first() is not None - - has_more_after = False - if last_row is not None and last_row.id is not None: - has_more_after = session.exec( - select(BotMessage.id) - .where(BotMessage.bot_id == bot_id, BotMessage.id > last_row.id) - .order_by(BotMessage.id.asc()) - .limit(1) - ).first() is not None - - return { - "items": [_serialize_bot_message_row(bot_id, row) for row in ordered], - "anchor_id": anchor.id, - "resolved_ts": int(anchor.created_at.timestamp() * 1000), - "matched_exact_date": matched_exact_date, - "has_more_before": has_more_before, - "has_more_after": has_more_after, - } - - -@app.put("/api/bots/{bot_id}/messages/{message_id}/feedback") -def update_bot_message_feedback( - bot_id: str, - message_id: int, - payload: MessageFeedbackRequest, - session: Session = Depends(get_session), -): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - row = session.get(BotMessage, message_id) - if not row or row.bot_id != bot_id: - raise HTTPException(status_code=404, detail="Message not found") - if row.role != "assistant": - raise HTTPException(status_code=400, detail="Only assistant messages support feedback") - - raw = str(payload.feedback or "").strip().lower() - if raw in {"", "none", "null"}: - row.feedback = None - row.feedback_at = None - elif raw in {"up", "down"}: - row.feedback = raw - row.feedback_at = datetime.utcnow() - else: - raise HTTPException(status_code=400, detail="feedback must be 'up' or 'down'") - - session.add(row) - session.commit() - _invalidate_bot_messages_cache(bot_id) - return { - "status": "updated", - "bot_id": bot_id, - "message_id": row.id, - "feedback": row.feedback, - "feedback_at": row.feedback_at.isoformat() if row.feedback_at else None, - } - - -@app.delete("/api/bots/{bot_id}/messages") -def clear_bot_messages(bot_id: str, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - - rows = session.exec(select(BotMessage).where(BotMessage.bot_id == bot_id)).all() - deleted = 0 - for row in rows: - session.delete(row) - deleted += 1 - cleared_sessions = _clear_bot_sessions(bot_id) - # Also reset in-memory session cache for running bot process. - if str(bot.docker_status or "").upper() == "RUNNING": - try: - docker_manager.send_command(bot_id, "/new") - except Exception: - pass - bot.last_action = "" - bot.current_state = "IDLE" - bot.updated_at = datetime.utcnow() - session.add(bot) - record_activity_event( - session, - bot_id, - "history_cleared", - channel="system", - detail=f"Cleared {deleted} stored messages", - metadata={"deleted_messages": deleted, "cleared_sessions": cleared_sessions}, - ) - session.commit() - _invalidate_bot_detail_cache(bot_id) - _invalidate_bot_messages_cache(bot_id) - return {"bot_id": bot_id, "deleted": deleted, "cleared_sessions": cleared_sessions} - - -@app.post("/api/bots/{bot_id}/sessions/dashboard-direct/clear") -def clear_bot_dashboard_direct_session(bot_id: str, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - - result = _clear_bot_dashboard_direct_session(bot_id) - if str(bot.docker_status or "").upper() == "RUNNING": - try: - docker_manager.send_command(bot_id, "/new") - except Exception: - pass - - bot.updated_at = datetime.utcnow() - session.add(bot) - record_activity_event( - session, - bot_id, - "dashboard_session_cleared", - channel="dashboard", - detail="Cleared dashboard_direct session file", - metadata={"session_file": result["path"], "previously_existed": result["existed"]}, - ) - session.commit() - _invalidate_bot_detail_cache(bot_id) - return {"bot_id": bot_id, "cleared": True, "session_file": result["path"], "previously_existed": result["existed"]} - - -@app.get("/api/bots/{bot_id}/logs") -def get_bot_logs(bot_id: str, tail: int = 300, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - return {"bot_id": bot_id, "logs": docker_manager.get_recent_logs(bot_id, tail=tail)} - - -@app.get("/api/bots/{bot_id}/workspace/tree") -def get_workspace_tree( - bot_id: str, - path: Optional[str] = None, - recursive: bool = False, - session: Session = Depends(get_session), -): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - - root = _workspace_root(bot_id) - if not os.path.isdir(root): - return {"bot_id": bot_id, "root": root, "cwd": "", "parent": None, "entries": []} - - _, target = _resolve_workspace_path(bot_id, path) - if not os.path.isdir(target): - raise HTTPException(status_code=400, detail="workspace path is not a directory") - cwd = os.path.relpath(target, root).replace("\\", "/") - if cwd == ".": - cwd = "" - parent = None - if cwd: - parent = os.path.dirname(cwd).replace("\\", "/") - if parent == ".": - parent = "" - - return { - "bot_id": bot_id, - "root": root, - "cwd": cwd, - "parent": parent, - "entries": _list_workspace_dir_recursive(target, root) if recursive else _list_workspace_dir(target, root), - } - - -@app.get("/api/bots/{bot_id}/workspace/file") -def read_workspace_file( - bot_id: str, - path: str, - max_bytes: int = 200000, - session: Session = Depends(get_session), -): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - - root, target = _resolve_workspace_path(bot_id, path) - if not os.path.isfile(target): - raise HTTPException(status_code=404, detail="workspace file not found") - - ext = os.path.splitext(target)[1].lower() - text_ext = { - "", - ".md", - ".txt", - ".log", - ".json", - ".yaml", - ".yml", - ".cfg", - ".ini", - ".csv", - ".tsv", - ".toml", - ".py", - ".sh", - } - if ext not in text_ext: - raise HTTPException(status_code=400, detail=f"unsupported file type: {ext or '(none)'}") - - safe_max = max(4096, min(int(max_bytes), 1000000)) - with open(target, "rb") as f: - raw = f.read(safe_max + 1) - - if b"\x00" in raw: - raise HTTPException(status_code=400, detail="binary file is not previewable") - - truncated = len(raw) > safe_max - body = raw[:safe_max] if truncated else raw - text_body = body.decode("utf-8", errors="replace") - rel_path = os.path.relpath(target, root).replace("\\", "/") - is_markdown = ext in {".md", ".markdown"} - - return { - "bot_id": bot_id, - "path": rel_path, - "size": os.path.getsize(target), - "is_markdown": is_markdown, - "truncated": truncated, - "content": text_body, - } - - -@app.put("/api/bots/{bot_id}/workspace/file") -def update_workspace_file( - bot_id: str, - path: str, - payload: WorkspaceFileUpdateRequest, - session: Session = Depends(get_session), -): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - - root, target = _resolve_workspace_path(bot_id, path) - if not os.path.isfile(target): - raise HTTPException(status_code=404, detail="workspace file not found") - - ext = os.path.splitext(target)[1].lower() - if ext not in {".md", ".markdown"}: - raise HTTPException(status_code=400, detail=f"editing is only supported for markdown files: {ext or '(none)'}") - - content = str(payload.content or "") - encoded = content.encode("utf-8") - if len(encoded) > 2_000_000: - raise HTTPException(status_code=413, detail="markdown file too large to save") - if "\x00" in content: - raise HTTPException(status_code=400, detail="markdown content contains invalid null bytes") - - _write_text_atomic(target, content) - rel_path = os.path.relpath(target, root).replace("\\", "/") - return { - "bot_id": bot_id, - "path": rel_path, - "size": os.path.getsize(target), - "is_markdown": True, - "truncated": False, - "content": content, - } - - -def _stream_file_range(target: str, start: int, end: int, chunk_size: int = 1024 * 1024): - with open(target, "rb") as fh: - fh.seek(start) - remaining = end - start + 1 - while remaining > 0: - chunk = fh.read(min(chunk_size, remaining)) - if not chunk: - break - remaining -= len(chunk) - yield chunk - - -def _build_ranged_workspace_response(target: str, media_type: str, range_header: str): - file_size = os.path.getsize(target) - range_match = re.match(r"bytes=(\d*)-(\d*)", range_header.strip()) - if not range_match: - raise HTTPException(status_code=416, detail="Invalid range") - - start_raw, end_raw = range_match.groups() - if start_raw == "" and end_raw == "": - raise HTTPException(status_code=416, detail="Invalid range") - - if start_raw == "": - length = int(end_raw) - if length <= 0: - raise HTTPException(status_code=416, detail="Invalid range") - start = max(file_size - length, 0) - end = file_size - 1 - else: - start = int(start_raw) - end = int(end_raw) if end_raw else file_size - 1 - - if start >= file_size or start < 0: - raise HTTPException(status_code=416, detail="Requested range not satisfiable") - end = min(end, file_size - 1) - if end < start: - raise HTTPException(status_code=416, detail="Requested range not satisfiable") - - content_length = end - start + 1 - headers = { - "Accept-Ranges": "bytes", - "Content-Range": f"bytes {start}-{end}/{file_size}", - "Content-Length": str(content_length), - } - return StreamingResponse( - _stream_file_range(target, start, end), - status_code=206, - media_type=media_type or "application/octet-stream", - headers=headers, - ) - - -def _build_workspace_raw_url(bot_id: str, path: str, public: bool) -> str: - normalized = "/".join(part for part in str(path or "").strip().split("/") if part) - if not normalized: - return "" - prefix = "/public" if public else "/api" - return f"{prefix}/bots/{quote(bot_id, safe='')}/workspace/raw/{quote(normalized, safe='/')}" - - -def _serve_workspace_file( - bot_id: str, - path: str, - download: bool, - request: Request, - session: Session, - *, - public: bool = False, - redirect_html_to_raw: bool = False, -): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - - _root, target = _resolve_workspace_path(bot_id, path) - if not os.path.isfile(target): - raise HTTPException(status_code=404, detail="File not found") - - media_type, _ = mimetypes.guess_type(target) - if redirect_html_to_raw and not download and str(media_type or "").startswith("text/html"): - raw_url = _build_workspace_raw_url(bot_id, path, public=public) - if raw_url: - return RedirectResponse(url=raw_url, status_code=307) - range_header = request.headers.get("range", "") - if range_header and not download: - return _build_ranged_workspace_response(target, media_type or "application/octet-stream", range_header) - - common_headers = {"Accept-Ranges": "bytes"} - if download: - return FileResponse( - target, - media_type=media_type or "application/octet-stream", - filename=os.path.basename(target), - headers=common_headers, - ) - return FileResponse(target, media_type=media_type or "application/octet-stream", headers=common_headers) - - -@app.get("/api/bots/{bot_id}/cron/jobs") -def list_cron_jobs(bot_id: str, include_disabled: bool = True, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - store = _read_cron_store(bot_id) - rows = [] - for row in store.get("jobs", []): - if not isinstance(row, dict): - continue - enabled = bool(row.get("enabled", True)) - if not include_disabled and not enabled: - continue - rows.append(row) - rows.sort(key=lambda v: int(((v.get("state") or {}).get("nextRunAtMs")) or 2**62)) - return {"bot_id": bot_id, "version": int(store.get("version", 1) or 1), "jobs": rows} - - -@app.post("/api/bots/{bot_id}/cron/jobs/{job_id}/stop") -def stop_cron_job(bot_id: str, job_id: str, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - store = _read_cron_store(bot_id) - jobs = store.get("jobs", []) - if not isinstance(jobs, list): - jobs = [] - found = None - for row in jobs: - if isinstance(row, dict) and str(row.get("id")) == job_id: - found = row - break - if not found: - raise HTTPException(status_code=404, detail="Cron job not found") - found["enabled"] = False - found["updatedAtMs"] = int(datetime.utcnow().timestamp() * 1000) - _write_cron_store(bot_id, {"version": int(store.get("version", 1) or 1), "jobs": jobs}) - return {"status": "stopped", "job_id": job_id} - - -@app.delete("/api/bots/{bot_id}/cron/jobs/{job_id}") -def delete_cron_job(bot_id: str, job_id: str, session: Session = Depends(get_session)): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - store = _read_cron_store(bot_id) - jobs = store.get("jobs", []) - if not isinstance(jobs, list): - jobs = [] - kept = [row for row in jobs if not (isinstance(row, dict) and str(row.get("id")) == job_id)] - if len(kept) == len(jobs): - raise HTTPException(status_code=404, detail="Cron job not found") - _write_cron_store(bot_id, {"version": int(store.get("version", 1) or 1), "jobs": kept}) - return {"status": "deleted", "job_id": job_id} - - -@app.get("/api/bots/{bot_id}/workspace/download") -def download_workspace_file( - bot_id: str, - path: str, - download: bool = False, - request: Request = None, - session: Session = Depends(get_session), -): - return _serve_workspace_file( - bot_id=bot_id, - path=path, - download=download, - request=request, - session=session, - public=False, - redirect_html_to_raw=True, - ) - - -@app.get("/public/bots/{bot_id}/workspace/download") -def public_download_workspace_file( - bot_id: str, - path: str, - download: bool = False, - request: Request = None, - session: Session = Depends(get_session), -): - return _serve_workspace_file( - bot_id=bot_id, - path=path, - download=download, - request=request, - session=session, - public=True, - redirect_html_to_raw=True, - ) - - -@app.get("/api/bots/{bot_id}/workspace/raw/{path:path}") -def raw_workspace_file( - bot_id: str, - path: str, - download: bool = False, - request: Request = None, - session: Session = Depends(get_session), -): - return _serve_workspace_file( - bot_id=bot_id, - path=path, - download=download, - request=request, - session=session, - public=False, - redirect_html_to_raw=False, - ) - - -@app.get("/public/bots/{bot_id}/workspace/raw/{path:path}") -def public_raw_workspace_file( - bot_id: str, - path: str, - download: bool = False, - request: Request = None, - session: Session = Depends(get_session), -): - return _serve_workspace_file( - bot_id=bot_id, - path=path, - download=download, - request=request, - session=session, - public=True, - redirect_html_to_raw=False, - ) - - -@app.post("/api/bots/{bot_id}/workspace/upload") -async def upload_workspace_files( - bot_id: str, - files: List[UploadFile] = File(...), - path: Optional[str] = None, - session: Session = Depends(get_session), -): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - if not files: - raise HTTPException(status_code=400, detail="no files uploaded") - platform_settings = get_platform_settings_snapshot() - max_bytes = platform_settings.upload_max_mb * 1024 * 1024 - allowed_extensions = set(platform_settings.allowed_attachment_extensions) - - root, upload_dir = _resolve_workspace_path(bot_id, path or "uploads") - os.makedirs(upload_dir, exist_ok=True) - safe_dir_real = os.path.abspath(upload_dir) - if os.path.commonpath([root, safe_dir_real]) != root: - raise HTTPException(status_code=400, detail="invalid upload target path") - - rows: List[Dict[str, Any]] = [] - for upload in files: - original = (upload.filename or "upload.bin").strip() or "upload.bin" - name = os.path.basename(original).replace("\\", "_").replace("/", "_") - name = re.sub(r"[^\w.\-()+@ ]+", "_", name) - if not name: - name = "upload.bin" - ext = str(os.path.splitext(name)[1] or "").strip().lower() - if allowed_extensions and ext not in allowed_extensions: - raise HTTPException( - status_code=400, - detail=f"File '{name}' extension is not allowed. Allowed: {', '.join(sorted(allowed_extensions))}", - ) - - abs_path = os.path.join(safe_dir_real, name) - if os.path.exists(abs_path): - base, ext = os.path.splitext(name) - name = f"{base}-{int(datetime.utcnow().timestamp())}{ext}" - abs_path = os.path.join(safe_dir_real, name) - - total_size = 0 - try: - with open(abs_path, "wb") as f: - while True: - chunk = await upload.read(1024 * 1024) - if not chunk: - break - total_size += len(chunk) - if total_size > max_bytes: - raise HTTPException( - status_code=413, - detail=f"File '{name}' too large (max {max_bytes // (1024 * 1024)}MB)", - ) - f.write(chunk) - except HTTPException: - if os.path.exists(abs_path): - os.remove(abs_path) - raise - except OSError as exc: - if os.path.exists(abs_path): - os.remove(abs_path) - raise HTTPException( - status_code=500, - detail=f"Failed to write file '{name}': {exc.strerror or str(exc)}", - ) - except Exception: - if os.path.exists(abs_path): - os.remove(abs_path) - raise HTTPException(status_code=500, detail=f"Failed to upload file '{name}'") - finally: - await upload.close() - - rel = os.path.relpath(abs_path, root).replace("\\", "/") - rows.append({"name": name, "path": rel, "size": total_size}) - - return {"bot_id": bot_id, "files": rows} - - -@app.post("/api/bots/{bot_id}/speech/transcribe") -async def transcribe_bot_speech( - bot_id: str, - file: UploadFile = File(...), - language: Optional[str] = Form(None), - session: Session = Depends(get_session), -): - bot = session.get(BotInstance, bot_id) - if not bot: - raise HTTPException(status_code=404, detail="Bot not found") - speech_settings = get_speech_runtime_settings() - if not speech_settings["enabled"]: - raise HTTPException(status_code=400, detail="Speech recognition is disabled") - if not file: - raise HTTPException(status_code=400, detail="no audio file uploaded") - - original_name = str(file.filename or "audio.webm").strip() or "audio.webm" - safe_name = os.path.basename(original_name).replace("\\", "_").replace("/", "_") - ext = os.path.splitext(safe_name)[1].strip().lower() or ".webm" - if len(ext) > 12: - ext = ".webm" - - tmp_path = "" - try: - with tempfile.NamedTemporaryFile(delete=False, suffix=ext, prefix=".speech_", dir=DATA_ROOT) as tmp: - tmp_path = tmp.name - while True: - chunk = await file.read(1024 * 1024) - if not chunk: - break - tmp.write(chunk) - - if not tmp_path or not os.path.exists(tmp_path) or os.path.getsize(tmp_path) <= 0: - raise HTTPException(status_code=400, detail="audio payload is empty") - - resolved_language = str(language or "").strip() or speech_settings["default_language"] - result = await asyncio.to_thread(speech_service.transcribe_file, tmp_path, resolved_language) - text = str(result.get("text") or "").strip() - if not text: - raise HTTPException(status_code=400, detail="No speech detected") - return { - "bot_id": bot_id, - "text": text, - "duration_seconds": result.get("duration_seconds"), - "max_audio_seconds": speech_settings["max_audio_seconds"], - "model": speech_settings["model"], - "device": speech_settings["device"], - "language": result.get("language") or resolved_language, - } - except SpeechDisabledError as exc: - logger.warning( - "speech transcribe disabled bot_id=%s file=%s language=%s detail=%s", - bot_id, - safe_name, - language, - exc, - ) - raise HTTPException(status_code=400, detail=str(exc)) - except SpeechDurationError: - logger.warning( - "speech transcribe too long bot_id=%s file=%s language=%s max_seconds=%s", - bot_id, - safe_name, - language, - speech_settings["max_audio_seconds"], - ) - raise HTTPException(status_code=413, detail=f"Audio duration exceeds {speech_settings['max_audio_seconds']} seconds") - except SpeechServiceError as exc: - logger.exception( - "speech transcribe failed bot_id=%s file=%s language=%s", - bot_id, - safe_name, - language, - ) - raise HTTPException(status_code=400, detail=str(exc)) - except HTTPException: - raise - except Exception as exc: - logger.exception( - "speech transcribe unexpected error bot_id=%s file=%s language=%s", - bot_id, - safe_name, - language, - ) - raise HTTPException(status_code=500, detail=f"speech transcription failed: {exc}") - finally: - try: - await file.close() - except Exception: - pass - if tmp_path and os.path.exists(tmp_path): - try: - os.remove(tmp_path) - except Exception: - pass - - -@app.websocket("/ws/monitor/{bot_id}") -async def websocket_endpoint(websocket: WebSocket, bot_id: str): - with Session(engine) as session: - bot = session.get(BotInstance, bot_id) - if not bot: - await websocket.close(code=4404, reason="Bot not found") - return - - connected = False - try: - await manager.connect(bot_id, websocket) - connected = True - except Exception as exc: - logger.warning("websocket connect failed bot_id=%s detail=%s", bot_id, exc) - try: - await websocket.close(code=1011, reason="WebSocket accept failed") - except Exception: - pass - return - - docker_manager.ensure_monitor(bot_id, docker_callback) - try: - while True: - await websocket.receive_text() - except WebSocketDisconnect: - pass - except RuntimeError as exc: - # Client may drop before handshake settles; treat as benign disconnect. - msg = str(exc or "").lower() - if "need to call \"accept\" first" not in msg and "not connected" not in msg: - logger.exception("websocket runtime error bot_id=%s", bot_id) - except Exception: - logger.exception("websocket unexpected error bot_id=%s", bot_id) - finally: - if connected: - manager.disconnect(bot_id, websocket) - - -def _main_server_options() -> tuple[str, int, bool]: - host = str(os.getenv("APP_HOST", "0.0.0.0") or "0.0.0.0").strip() or "0.0.0.0" - try: - port = int(os.getenv("APP_PORT", "8000")) - except Exception: - port = 8000 - port = max(1, min(port, 65535)) - reload_flag = str(os.getenv("APP_RELOAD", "true")).strip().lower() in {"1", "true", "yes", "on"} - return host, port, reload_flag +from app_factory import create_app +app = create_app() if __name__ == "__main__": import uvicorn - host, port, reload_flag = _main_server_options() - app_module = f"{os.path.splitext(os.path.basename(__file__))[0]}:app" - if reload_flag: - uvicorn.run(app_module, host=host, port=port, reload=True) - else: - uvicorn.run(app, host=host, port=port) + uvicorn.run(app, host="0.0.0.0", port=8000) diff --git a/backend/schemas/bot.py b/backend/schemas/bot.py new file mode 100644 index 0000000..e674b1b --- /dev/null +++ b/backend/schemas/bot.py @@ -0,0 +1,103 @@ +from typing import Optional, Dict, Any, List +from pydantic import BaseModel + + +class ChannelConfigRequest(BaseModel): + channel_type: str + external_app_id: Optional[str] = None + app_secret: Optional[str] = None + internal_port: Optional[int] = None + is_active: bool = True + extra_config: Optional[Dict[str, Any]] = None + + +class ChannelConfigUpdateRequest(BaseModel): + channel_type: Optional[str] = None + external_app_id: Optional[str] = None + app_secret: Optional[str] = None + internal_port: Optional[int] = None + is_active: Optional[bool] = None + extra_config: Optional[Dict[str, Any]] = None + + +class BotCreateRequest(BaseModel): + id: str + name: str + enabled: Optional[bool] = True + image_tag: str + access_password: Optional[str] = None + llm_provider: str + llm_model: str + api_key: str + api_base: Optional[str] = None + system_prompt: Optional[str] = None + temperature: float = 0.2 + top_p: float = 1.0 + max_tokens: int = 8192 + cpu_cores: float = 1.0 + memory_mb: int = 1024 + storage_gb: int = 10 + system_timezone: Optional[str] = None + soul_md: Optional[str] = None + agents_md: Optional[str] = None + user_md: Optional[str] = None + tools_md: Optional[str] = None + tools_config: Optional[Dict[str, Any]] = None + env_params: Optional[Dict[str, str]] = None + identity_md: Optional[str] = None + channels: Optional[List[ChannelConfigRequest]] = None + send_progress: Optional[bool] = None + send_tool_hints: Optional[bool] = None + + +class BotUpdateRequest(BaseModel): + name: Optional[str] = None + enabled: Optional[bool] = None + image_tag: Optional[str] = None + access_password: Optional[str] = None + llm_provider: Optional[str] = None + llm_model: Optional[str] = None + api_key: Optional[str] = None + api_base: Optional[str] = None + temperature: Optional[float] = None + top_p: Optional[float] = None + max_tokens: Optional[int] = None + cpu_cores: Optional[float] = None + memory_mb: Optional[int] = None + storage_gb: Optional[int] = None + system_timezone: Optional[str] = None + system_prompt: Optional[str] = None + agents_md: Optional[str] = None + soul_md: Optional[str] = None + user_md: Optional[str] = None + tools_md: Optional[str] = None + tools_config: Optional[Dict[str, Any]] = None + env_params: Optional[Dict[str, str]] = None + identity_md: Optional[str] = None + send_progress: Optional[bool] = None + send_tool_hints: Optional[bool] = None + + +class BotToolsConfigUpdateRequest(BaseModel): + tools_config: Optional[Dict[str, Any]] = None + + +class BotMcpConfigUpdateRequest(BaseModel): + mcp_servers: Optional[Dict[str, Any]] = None + + +class BotEnvParamsUpdateRequest(BaseModel): + env_params: Optional[Dict[str, str]] = None + + +class BotPageAuthLoginRequest(BaseModel): + password: str + + +class CommandRequest(BaseModel): + command: Optional[str] = None + attachments: Optional[List[str]] = None + + +class MessageFeedbackRequest(BaseModel): + feedback: Optional[str] = None diff --git a/backend/schemas/system.py b/backend/schemas/system.py new file mode 100644 index 0000000..f7e7866 --- /dev/null +++ b/backend/schemas/system.py @@ -0,0 +1,23 @@ +from typing import Optional, Dict, Any +from pydantic import BaseModel + + +class WorkspaceFileUpdateRequest(BaseModel): + content: str + + +class PanelLoginRequest(BaseModel): + password: str + + +class AgentMdTemplatesPayload(BaseModel): + agents_md: Optional[str] = None + soul_md: Optional[str] = None + user_md: Optional[str] = None + tools_md: Optional[str] = None + identity_md: Optional[str] = None + + +class SystemTemplatesUpdateRequest(BaseModel): + agent_md_templates: Optional[AgentMdTemplatesPayload] = None + topic_presets: Optional[Dict[str, Any]] = None diff --git a/backend/services/bot_channel_service.py b/backend/services/bot_channel_service.py new file mode 100644 index 0000000..4ba3ca0 --- /dev/null +++ b/backend/services/bot_channel_service.py @@ -0,0 +1,366 @@ +from pathlib import Path +from typing import Any, Dict, List, Optional + +from sqlmodel import Session + +from core.config_manager import BotConfigManager +from core.settings import BOTS_WORKSPACE_ROOT +from models.bot import BotInstance +from schemas.bot import ChannelConfigRequest +from services.bot_storage_service import ( + _normalize_resource_limits, + _read_bot_config, + _write_bot_resources, +) +from services.template_service import get_agent_md_templates + +config_manager = BotConfigManager(host_data_root=BOTS_WORKSPACE_ROOT) + + +def _normalize_channel_extra(raw: Any) -> Dict[str, Any]: + if not isinstance(raw, dict): + return {} + return raw + + +def _normalize_allow_from(raw: Any) -> List[str]: + rows: List[str] = [] + if isinstance(raw, list): + for item in raw: + text = str(item or "").strip() + if text and text not in rows: + rows.append(text) + return rows or ["*"] + + +def _read_global_delivery_flags(channels_cfg: Any) -> tuple[bool, bool]: + if not isinstance(channels_cfg, dict): + return False, False + send_progress = channels_cfg.get("sendProgress") + send_tool_hints = channels_cfg.get("sendToolHints") + dashboard_cfg = channels_cfg.get("dashboard") + if isinstance(dashboard_cfg, dict): + if send_progress is None and "sendProgress" in dashboard_cfg: + send_progress = dashboard_cfg.get("sendProgress") + if send_tool_hints is None and "sendToolHints" in dashboard_cfg: + send_tool_hints = dashboard_cfg.get("sendToolHints") + return bool(send_progress), bool(send_tool_hints) + + +def _channel_cfg_to_api_dict(bot_id: str, ctype: str, cfg: Dict[str, Any]) -> Dict[str, Any]: + ctype = str(ctype or "").strip().lower() + enabled = bool(cfg.get("enabled", True)) + port = max(1, min(int(cfg.get("port", 8080) or 8080), 65535)) + extra: Dict[str, Any] = {} + external_app_id = "" + app_secret = "" + + if ctype == "feishu": + external_app_id = str(cfg.get("appId") or "") + app_secret = str(cfg.get("appSecret") or "") + extra = { + "encryptKey": cfg.get("encryptKey", ""), + "verificationToken": cfg.get("verificationToken", ""), + "allowFrom": _normalize_allow_from(cfg.get("allowFrom", [])), + } + elif ctype == "dingtalk": + external_app_id = str(cfg.get("clientId") or "") + app_secret = str(cfg.get("clientSecret") or "") + extra = {"allowFrom": _normalize_allow_from(cfg.get("allowFrom", []))} + elif ctype == "telegram": + app_secret = str(cfg.get("token") or "") + extra = { + "proxy": cfg.get("proxy", ""), + "replyToMessage": bool(cfg.get("replyToMessage", False)), + "allowFrom": _normalize_allow_from(cfg.get("allowFrom", [])), + } + elif ctype == "slack": + external_app_id = str(cfg.get("botToken") or "") + app_secret = str(cfg.get("appToken") or "") + extra = { + "mode": cfg.get("mode", "socket"), + "replyInThread": bool(cfg.get("replyInThread", True)), + "groupPolicy": cfg.get("groupPolicy", "mention"), + "groupAllowFrom": cfg.get("groupAllowFrom", []), + "reactEmoji": cfg.get("reactEmoji", "eyes"), + } + elif ctype == "qq": + external_app_id = str(cfg.get("appId") or "") + app_secret = str(cfg.get("secret") or "") + extra = {"allowFrom": _normalize_allow_from(cfg.get("allowFrom", []))} + elif ctype == "weixin": + app_secret = "" + extra = { + "hasSavedState": (Path(BOTS_WORKSPACE_ROOT) / bot_id / ".nanobot" / "weixin" / "account.json").is_file(), + } + elif ctype == "email": + extra = { + "consentGranted": bool(cfg.get("consentGranted", False)), + "imapHost": str(cfg.get("imapHost") or ""), + "imapPort": int(cfg.get("imapPort") or 993), + "imapUsername": str(cfg.get("imapUsername") or ""), + "imapPassword": str(cfg.get("imapPassword") or ""), + "imapMailbox": str(cfg.get("imapMailbox") or "INBOX"), + "imapUseSsl": bool(cfg.get("imapUseSsl", True)), + "smtpHost": str(cfg.get("smtpHost") or ""), + "smtpPort": int(cfg.get("smtpPort") or 587), + "smtpUsername": str(cfg.get("smtpUsername") or ""), + "smtpPassword": str(cfg.get("smtpPassword") or ""), + "smtpUseTls": bool(cfg.get("smtpUseTls", True)), + "smtpUseSsl": bool(cfg.get("smtpUseSsl", False)), + "fromAddress": str(cfg.get("fromAddress") or ""), + "autoReplyEnabled": bool(cfg.get("autoReplyEnabled", True)), + "pollIntervalSeconds": int(cfg.get("pollIntervalSeconds") or 30), + "markSeen": bool(cfg.get("markSeen", True)), + "maxBodyChars": int(cfg.get("maxBodyChars") or 12000), + "subjectPrefix": str(cfg.get("subjectPrefix") or "Re: "), + "allowFrom": _normalize_allow_from(cfg.get("allowFrom", [])), + } + else: + external_app_id = str(cfg.get("appId") or cfg.get("clientId") or cfg.get("botToken") or cfg.get("externalAppId") or "") + app_secret = str(cfg.get("appSecret") or cfg.get("clientSecret") or cfg.get("secret") or cfg.get("token") or cfg.get("appToken") or "") + extra = { + key: value + for key, value in cfg.items() + if key not in {"enabled", "port", "appId", "clientId", "botToken", "externalAppId", "appSecret", "clientSecret", "secret", "token", "appToken"} + } + + return { + "id": ctype, + "bot_id": bot_id, + "channel_type": ctype, + "external_app_id": external_app_id, + "app_secret": app_secret, + "internal_port": port, + "is_active": enabled, + "extra_config": extra, + "locked": ctype == "dashboard", + } + + +def _channel_api_to_cfg(row: Dict[str, Any]) -> Dict[str, Any]: + ctype = str(row.get("channel_type") or "").strip().lower() + enabled = bool(row.get("is_active", True)) + extra = _normalize_channel_extra(row.get("extra_config")) + external_app_id = str(row.get("external_app_id") or "") + app_secret = str(row.get("app_secret") or "") + port = max(1, min(int(row.get("internal_port") or 8080), 65535)) + + if ctype == "feishu": + return { + "enabled": enabled, + "appId": external_app_id, + "appSecret": app_secret, + "encryptKey": extra.get("encryptKey", ""), + "verificationToken": extra.get("verificationToken", ""), + "allowFrom": _normalize_allow_from(extra.get("allowFrom", [])), + } + if ctype == "dingtalk": + return { + "enabled": enabled, + "clientId": external_app_id, + "clientSecret": app_secret, + "allowFrom": _normalize_allow_from(extra.get("allowFrom", [])), + } + if ctype == "telegram": + return { + "enabled": enabled, + "token": app_secret, + "proxy": extra.get("proxy", ""), + "replyToMessage": bool(extra.get("replyToMessage", False)), + "allowFrom": _normalize_allow_from(extra.get("allowFrom", [])), + } + if ctype == "slack": + return { + "enabled": enabled, + "mode": extra.get("mode", "socket"), + "botToken": external_app_id, + "appToken": app_secret, + "replyInThread": bool(extra.get("replyInThread", True)), + "groupPolicy": extra.get("groupPolicy", "mention"), + "groupAllowFrom": extra.get("groupAllowFrom", []), + "reactEmoji": extra.get("reactEmoji", "eyes"), + } + if ctype == "qq": + return { + "enabled": enabled, + "appId": external_app_id, + "secret": app_secret, + "allowFrom": _normalize_allow_from(extra.get("allowFrom", [])), + } + if ctype == "weixin": + return { + "enabled": enabled, + "token": app_secret, + } + if ctype == "email": + return { + "enabled": enabled, + "consentGranted": bool(extra.get("consentGranted", False)), + "imapHost": str(extra.get("imapHost") or ""), + "imapPort": max(1, min(int(extra.get("imapPort") or 993), 65535)), + "imapUsername": str(extra.get("imapUsername") or ""), + "imapPassword": str(extra.get("imapPassword") or ""), + "imapMailbox": str(extra.get("imapMailbox") or "INBOX"), + "imapUseSsl": bool(extra.get("imapUseSsl", True)), + "smtpHost": str(extra.get("smtpHost") or ""), + "smtpPort": max(1, min(int(extra.get("smtpPort") or 587), 65535)), + "smtpUsername": str(extra.get("smtpUsername") or ""), + "smtpPassword": str(extra.get("smtpPassword") or ""), + "smtpUseTls": bool(extra.get("smtpUseTls", True)), + "smtpUseSsl": bool(extra.get("smtpUseSsl", False)), + "fromAddress": str(extra.get("fromAddress") or ""), + "autoReplyEnabled": bool(extra.get("autoReplyEnabled", True)), + "pollIntervalSeconds": max(5, int(extra.get("pollIntervalSeconds") or 30)), + "markSeen": bool(extra.get("markSeen", True)), + "maxBodyChars": max(1, int(extra.get("maxBodyChars") or 12000)), + "subjectPrefix": str(extra.get("subjectPrefix") or "Re: "), + "allowFrom": _normalize_allow_from(extra.get("allowFrom", [])), + } + merged = dict(extra) + merged.update( + { + "enabled": enabled, + "appId": external_app_id, + "appSecret": app_secret, + "port": port, + } + ) + return merged + + +def _get_bot_channels_from_config(bot: BotInstance) -> List[Dict[str, Any]]: + config_data = _read_bot_config(bot.id) + channels_cfg = config_data.get("channels") + if not isinstance(channels_cfg, dict): + channels_cfg = {} + send_progress, send_tool_hints = _read_global_delivery_flags(channels_cfg) + rows: List[Dict[str, Any]] = [ + { + "id": "dashboard", + "bot_id": bot.id, + "channel_type": "dashboard", + "external_app_id": f"dashboard-{bot.id}", + "app_secret": "", + "internal_port": 9000, + "is_active": True, + "extra_config": { + "sendProgress": send_progress, + "sendToolHints": send_tool_hints, + }, + "locked": True, + } + ] + for ctype, cfg in channels_cfg.items(): + if ctype in {"sendProgress", "sendToolHints", "dashboard"} or not isinstance(cfg, dict): + continue + rows.append(_channel_cfg_to_api_dict(bot.id, ctype, cfg)) + return rows + + +def _normalize_initial_channels(bot_id: str, channels: Optional[List[ChannelConfigRequest]]) -> List[Dict[str, Any]]: + rows: List[Dict[str, Any]] = [] + seen_types: set[str] = set() + for channel in channels or []: + ctype = (channel.channel_type or "").strip().lower() + if not ctype or ctype == "dashboard" or ctype in seen_types: + continue + seen_types.add(ctype) + rows.append( + { + "id": ctype, + "bot_id": bot_id, + "channel_type": ctype, + "external_app_id": (channel.external_app_id or "").strip() or f"{ctype}-{bot_id}", + "app_secret": (channel.app_secret or "").strip(), + "internal_port": max(1, min(int(channel.internal_port or 8080), 65535)), + "is_active": bool(channel.is_active), + "extra_config": _normalize_channel_extra(channel.extra_config), + "locked": False, + } + ) + return rows + + +def _sync_workspace_channels( + session: Session, + bot_id: str, + snapshot: Dict[str, Any], + *, + channels_override: Optional[List[Dict[str, Any]]] = None, + global_delivery_override: Optional[Dict[str, Any]] = None, + runtime_overrides: Optional[Dict[str, Any]] = None, +) -> None: + bot = session.get(BotInstance, bot_id) + if not bot: + return + template_defaults = get_agent_md_templates() + bot_data: Dict[str, Any] = { + "name": bot.name, + "system_prompt": snapshot.get("system_prompt") or template_defaults.get("soul_md", ""), + "soul_md": snapshot.get("soul_md") or template_defaults.get("soul_md", ""), + "agents_md": snapshot.get("agents_md") or template_defaults.get("agents_md", ""), + "user_md": snapshot.get("user_md") or template_defaults.get("user_md", ""), + "tools_md": snapshot.get("tools_md") or template_defaults.get("tools_md", ""), + "identity_md": snapshot.get("identity_md") or template_defaults.get("identity_md", ""), + "llm_provider": snapshot.get("llm_provider") or "", + "llm_model": snapshot.get("llm_model") or "", + "api_key": snapshot.get("api_key") or "", + "api_base": snapshot.get("api_base") or "", + "temperature": snapshot.get("temperature"), + "top_p": snapshot.get("top_p"), + "max_tokens": snapshot.get("max_tokens"), + "cpu_cores": snapshot.get("cpu_cores"), + "memory_mb": snapshot.get("memory_mb"), + "storage_gb": snapshot.get("storage_gb"), + "send_progress": bool(snapshot.get("send_progress")), + "send_tool_hints": bool(snapshot.get("send_tool_hints")), + } + if isinstance(runtime_overrides, dict): + for key, value in runtime_overrides.items(): + if key in {"api_key", "llm_provider", "llm_model"}: + text = str(value or "").strip() + if not text: + continue + bot_data[key] = text + continue + if key == "api_base": + bot_data[key] = str(value or "").strip() + continue + bot_data[key] = value + + resources = _normalize_resource_limits( + bot_data.get("cpu_cores"), + bot_data.get("memory_mb"), + bot_data.get("storage_gb"), + ) + bot_data.update(resources) + send_progress = bool(bot_data.get("send_progress", False)) + send_tool_hints = bool(bot_data.get("send_tool_hints", False)) + if isinstance(global_delivery_override, dict): + if "sendProgress" in global_delivery_override: + send_progress = bool(global_delivery_override.get("sendProgress")) + if "sendToolHints" in global_delivery_override: + send_tool_hints = bool(global_delivery_override.get("sendToolHints")) + + channels_data = channels_override if channels_override is not None else _get_bot_channels_from_config(bot) + bot_data["send_progress"] = send_progress + bot_data["send_tool_hints"] = send_tool_hints + normalized_channels: List[Dict[str, Any]] = [] + for row in channels_data: + ctype = str(row.get("channel_type") or "").strip().lower() + if not ctype or ctype == "dashboard": + continue + normalized_channels.append( + { + "channel_type": ctype, + "external_app_id": str(row.get("external_app_id") or ""), + "app_secret": str(row.get("app_secret") or ""), + "internal_port": max(1, min(int(row.get("internal_port") or 8080), 65535)), + "is_active": bool(row.get("is_active", True)), + "extra_config": _normalize_channel_extra(row.get("extra_config")), + } + ) + + config_manager.update_workspace(bot_id=bot_id, bot_data=bot_data, channels=normalized_channels) + _write_bot_resources(bot_id, bot_data.get("cpu_cores"), bot_data.get("memory_mb"), bot_data.get("storage_gb")) diff --git a/backend/services/bot_config_service.py b/backend/services/bot_config_service.py new file mode 100644 index 0000000..be308be --- /dev/null +++ b/backend/services/bot_config_service.py @@ -0,0 +1,324 @@ +from datetime import datetime +from typing import Any, Dict + +from fastapi import HTTPException +from sqlmodel import Session + +from core.docker_instance import docker_manager +from core.utils import _calc_dir_size_bytes +from models.bot import BotInstance +from schemas.bot import ( + BotEnvParamsUpdateRequest, + BotMcpConfigUpdateRequest, + ChannelConfigRequest, + ChannelConfigUpdateRequest, +) +from services.bot_channel_service import ( + _channel_api_to_cfg, + _get_bot_channels_from_config, + _normalize_channel_extra, + _read_global_delivery_flags, + _sync_workspace_channels, +) +from services.bot_mcp_service import ( + _merge_mcp_servers_preserving_extras, + _normalize_mcp_servers, +) +from services.bot_storage_service import ( + _normalize_env_params, + _read_bot_config, + _read_bot_resources, + _read_env_store, + _workspace_root, + _write_bot_config, + _write_env_store, +) +from services.cache_service import _invalidate_bot_detail_cache + + +def _get_bot_or_404(session: Session, bot_id: str) -> BotInstance: + bot = session.get(BotInstance, bot_id) + if not bot: + raise HTTPException(status_code=404, detail="Bot not found") + return bot + + +def get_bot_resources_snapshot(session: Session, *, bot_id: str) -> Dict[str, Any]: + bot = _get_bot_or_404(session, bot_id) + + configured = _read_bot_resources(bot_id) + runtime = docker_manager.get_bot_resource_snapshot(bot_id) + workspace_root = _workspace_root(bot_id) + workspace_bytes = _calc_dir_size_bytes(workspace_root) + configured_storage_bytes = int(configured.get("storage_gb", 0) or 0) * 1024 * 1024 * 1024 + workspace_percent = 0.0 + if configured_storage_bytes > 0: + workspace_percent = (workspace_bytes / configured_storage_bytes) * 100.0 + + limits = runtime.get("limits") or {} + cpu_limited = (limits.get("cpu_cores") or 0) > 0 + memory_limited = (limits.get("memory_bytes") or 0) > 0 + storage_limited = bool(limits.get("storage_bytes")) or bool(limits.get("storage_opt_raw")) + + return { + "bot_id": bot_id, + "docker_status": runtime.get("docker_status") or bot.docker_status, + "configured": configured, + "runtime": runtime, + "workspace": { + "path": workspace_root, + "usage_bytes": workspace_bytes, + "configured_limit_bytes": configured_storage_bytes if configured_storage_bytes > 0 else None, + "usage_percent": max(0.0, workspace_percent), + }, + "enforcement": { + "cpu_limited": cpu_limited, + "memory_limited": memory_limited, + "storage_limited": storage_limited, + }, + "note": ( + "Resource value 0 means unlimited. CPU/Memory limits come from Docker HostConfig and are enforced by cgroup. " + "Storage limit depends on Docker storage driver support." + ), + "collected_at": datetime.utcnow().isoformat() + "Z", + } + + +def list_bot_channels_config(session: Session, *, bot_id: str): + bot = _get_bot_or_404(session, bot_id) + return _get_bot_channels_from_config(bot) + + +def get_bot_tools_config_state(session: Session, *, bot_id: str) -> Dict[str, Any]: + _get_bot_or_404(session, bot_id) + return { + "bot_id": bot_id, + "tools_config": {}, + "managed_by_dashboard": False, + "hint": "Tools config is disabled in dashboard. Configure tool-related env vars manually.", + } + + +def reject_bot_tools_config_update( + session: Session, + *, + bot_id: str, + payload: Any, +) -> None: + _get_bot_or_404(session, bot_id) + raise HTTPException( + status_code=400, + detail="Tools config is no longer managed by dashboard. Please set required env vars manually.", + ) + + +def get_bot_mcp_config_state(session: Session, *, bot_id: str) -> Dict[str, Any]: + _get_bot_or_404(session, bot_id) + config_data = _read_bot_config(bot_id) + tools_cfg = config_data.get("tools") if isinstance(config_data, dict) else {} + if not isinstance(tools_cfg, dict): + tools_cfg = {} + mcp_servers = _normalize_mcp_servers(tools_cfg.get("mcpServers")) + return { + "bot_id": bot_id, + "mcp_servers": mcp_servers, + "locked_servers": [], + "restart_required": True, + } + + +def update_bot_mcp_config_state( + session: Session, + *, + bot_id: str, + payload: BotMcpConfigUpdateRequest, +) -> Dict[str, Any]: + _get_bot_or_404(session, bot_id) + config_data = _read_bot_config(bot_id) + if not isinstance(config_data, dict): + config_data = {} + tools_cfg = config_data.get("tools") + if not isinstance(tools_cfg, dict): + tools_cfg = {} + normalized_mcp_servers = _normalize_mcp_servers(payload.mcp_servers or {}) + current_mcp_servers = tools_cfg.get("mcpServers") + merged_mcp_servers = _merge_mcp_servers_preserving_extras(current_mcp_servers, normalized_mcp_servers) + tools_cfg["mcpServers"] = merged_mcp_servers + config_data["tools"] = tools_cfg + sanitized_after_save = _normalize_mcp_servers(tools_cfg.get("mcpServers")) + _write_bot_config(bot_id, config_data) + _invalidate_bot_detail_cache(bot_id) + return { + "status": "updated", + "bot_id": bot_id, + "mcp_servers": sanitized_after_save, + "locked_servers": [], + "restart_required": True, + } + + +def get_bot_env_params_state(session: Session, *, bot_id: str) -> Dict[str, Any]: + _get_bot_or_404(session, bot_id) + return { + "bot_id": bot_id, + "env_params": _read_env_store(bot_id), + } + + +def update_bot_env_params_state( + session: Session, + *, + bot_id: str, + payload: BotEnvParamsUpdateRequest, +) -> Dict[str, Any]: + _get_bot_or_404(session, bot_id) + normalized = _normalize_env_params(payload.env_params) + _write_env_store(bot_id, normalized) + _invalidate_bot_detail_cache(bot_id) + return { + "status": "updated", + "bot_id": bot_id, + "env_params": normalized, + "restart_required": True, + } + + +def create_bot_channel_config( + session: Session, + *, + bot_id: str, + payload: ChannelConfigRequest, +) -> Dict[str, Any]: + bot = _get_bot_or_404(session, bot_id) + + ctype = (payload.channel_type or "").strip().lower() + if not ctype: + raise HTTPException(status_code=400, detail="channel_type is required") + if ctype == "dashboard": + raise HTTPException(status_code=400, detail="dashboard channel is built-in and cannot be created manually") + current_rows = _get_bot_channels_from_config(bot) + if any(str(row.get("channel_type") or "").lower() == ctype for row in current_rows): + raise HTTPException(status_code=400, detail=f"Channel already exists: {ctype}") + + new_row = { + "id": ctype, + "bot_id": bot_id, + "channel_type": ctype, + "external_app_id": (payload.external_app_id or "").strip() or f"{ctype}-{bot_id}", + "app_secret": (payload.app_secret or "").strip(), + "internal_port": max(1, min(int(payload.internal_port or 8080), 65535)), + "is_active": bool(payload.is_active), + "extra_config": _normalize_channel_extra(payload.extra_config), + "locked": False, + } + + config_data = _read_bot_config(bot_id) + channels_cfg = config_data.get("channels") + if not isinstance(channels_cfg, dict): + channels_cfg = {} + config_data["channels"] = channels_cfg + channels_cfg[ctype] = _channel_api_to_cfg(new_row) + _write_bot_config(bot_id, config_data) + _sync_workspace_channels(session, bot_id) + _invalidate_bot_detail_cache(bot_id) + return new_row + + +def update_bot_channel_config( + session: Session, + *, + bot_id: str, + channel_id: str, + payload: ChannelConfigUpdateRequest, +) -> Dict[str, Any]: + bot = _get_bot_or_404(session, bot_id) + + channel_key = str(channel_id or "").strip().lower() + rows = _get_bot_channels_from_config(bot) + row = next((r for r in rows if str(r.get("id") or "").lower() == channel_key), None) + if not row: + raise HTTPException(status_code=404, detail="Channel not found") + if str(row.get("channel_type") or "").strip().lower() == "dashboard" or bool(row.get("locked")): + raise HTTPException(status_code=400, detail="dashboard channel is built-in and cannot be modified") + + update_data = payload.model_dump(exclude_unset=True) + existing_type = str(row.get("channel_type") or "").strip().lower() + new_type = existing_type + if "channel_type" in update_data and update_data["channel_type"] is not None: + new_type = str(update_data["channel_type"]).strip().lower() + if not new_type: + raise HTTPException(status_code=400, detail="channel_type cannot be empty") + if existing_type == "dashboard" and new_type != "dashboard": + raise HTTPException(status_code=400, detail="dashboard channel type cannot be changed") + if new_type != existing_type and any(str(r.get("channel_type") or "").lower() == new_type for r in rows): + raise HTTPException(status_code=400, detail=f"Channel already exists: {new_type}") + + if "external_app_id" in update_data and update_data["external_app_id"] is not None: + row["external_app_id"] = str(update_data["external_app_id"]).strip() + if "app_secret" in update_data and update_data["app_secret"] is not None: + row["app_secret"] = str(update_data["app_secret"]).strip() + if "internal_port" in update_data and update_data["internal_port"] is not None: + row["internal_port"] = max(1, min(int(update_data["internal_port"]), 65535)) + if "is_active" in update_data and update_data["is_active"] is not None: + next_active = bool(update_data["is_active"]) + if existing_type == "dashboard" and not next_active: + raise HTTPException(status_code=400, detail="dashboard channel must remain enabled") + row["is_active"] = next_active + if "extra_config" in update_data: + row["extra_config"] = _normalize_channel_extra(update_data.get("extra_config")) + row["channel_type"] = new_type + row["id"] = new_type + row["locked"] = new_type == "dashboard" + + config_data = _read_bot_config(bot_id) + channels_cfg = config_data.get("channels") + if not isinstance(channels_cfg, dict): + channels_cfg = {} + config_data["channels"] = channels_cfg + current_send_progress, current_send_tool_hints = _read_global_delivery_flags(channels_cfg) + if new_type == "dashboard": + extra = _normalize_channel_extra(row.get("extra_config")) + channels_cfg["sendProgress"] = bool(extra.get("sendProgress", current_send_progress)) + channels_cfg["sendToolHints"] = bool(extra.get("sendToolHints", current_send_tool_hints)) + else: + channels_cfg["sendProgress"] = current_send_progress + channels_cfg["sendToolHints"] = current_send_tool_hints + channels_cfg.pop("dashboard", None) + if existing_type != "dashboard" and existing_type in channels_cfg and existing_type != new_type: + channels_cfg.pop(existing_type, None) + if new_type != "dashboard": + channels_cfg[new_type] = _channel_api_to_cfg(row) + _write_bot_config(bot_id, config_data) + session.commit() + _sync_workspace_channels(session, bot_id) + _invalidate_bot_detail_cache(bot_id) + return row + + +def delete_bot_channel_config( + session: Session, + *, + bot_id: str, + channel_id: str, +) -> Dict[str, Any]: + bot = _get_bot_or_404(session, bot_id) + + channel_key = str(channel_id or "").strip().lower() + rows = _get_bot_channels_from_config(bot) + row = next((r for r in rows if str(r.get("id") or "").lower() == channel_key), None) + if not row: + raise HTTPException(status_code=404, detail="Channel not found") + if str(row.get("channel_type") or "").lower() == "dashboard": + raise HTTPException(status_code=400, detail="dashboard channel cannot be deleted") + + config_data = _read_bot_config(bot_id) + channels_cfg = config_data.get("channels") + if not isinstance(channels_cfg, dict): + channels_cfg = {} + config_data["channels"] = channels_cfg + channels_cfg.pop(str(row.get("channel_type") or "").lower(), None) + _write_bot_config(bot_id, config_data) + session.commit() + _sync_workspace_channels(session, bot_id) + _invalidate_bot_detail_cache(bot_id) + return {"status": "deleted"} diff --git a/backend/services/bot_lifecycle_service.py b/backend/services/bot_lifecycle_service.py new file mode 100644 index 0000000..0f538d3 --- /dev/null +++ b/backend/services/bot_lifecycle_service.py @@ -0,0 +1,159 @@ +import asyncio +import os +import shutil +from typing import Any, Dict + +from sqlmodel import Session, select + +from core.docker_instance import docker_manager +from core.settings import BOTS_WORKSPACE_ROOT +from models.bot import BotInstance, BotMessage +from models.platform import BotActivityEvent, BotRequestUsage +from models.skill import BotSkillInstall +from models.topic import TopicItem, TopicTopic +from services.bot_service import ( + _read_bot_runtime_snapshot, + _resolve_bot_env_params, + _safe_float, + _safe_int, + _sync_workspace_channels, +) +from services.bot_storage_service import _write_env_store +from services.cache_service import _invalidate_bot_detail_cache, _invalidate_bot_messages_cache +from services.platform_service import record_activity_event +from services.runtime_service import _record_agent_loop_ready_warning, docker_callback + + +def _get_bot_or_404(session: Session, bot_id: str) -> BotInstance: + bot = session.get(BotInstance, bot_id) + if not bot: + raise ValueError("Bot not found") + return bot + + +async def start_bot_instance(session: Session, bot_id: str) -> Dict[str, Any]: + bot = _get_bot_or_404(session, bot_id) + if not bool(getattr(bot, "enabled", True)): + raise PermissionError("Bot is disabled. Enable it first.") + + _sync_workspace_channels(session, bot_id) + runtime_snapshot = _read_bot_runtime_snapshot(bot) + env_params = _resolve_bot_env_params(bot_id) + _write_env_store(bot_id, env_params) + success = docker_manager.start_bot( + bot_id, + image_tag=bot.image_tag, + on_state_change=docker_callback, + env_vars=env_params, + cpu_cores=_safe_float(runtime_snapshot.get("cpu_cores"), 1.0), + memory_mb=_safe_int(runtime_snapshot.get("memory_mb"), 1024), + storage_gb=_safe_int(runtime_snapshot.get("storage_gb"), 10), + ) + if not success: + bot.docker_status = "STOPPED" + session.add(bot) + session.commit() + raise RuntimeError(f"Failed to start container with image {bot.image_tag}") + + actual_status = docker_manager.get_bot_status(bot_id) + bot.docker_status = actual_status + if actual_status != "RUNNING": + session.add(bot) + session.commit() + _invalidate_bot_detail_cache(bot_id) + raise RuntimeError("Bot container failed shortly after startup. Check bot logs/config.") + + asyncio.create_task(_record_agent_loop_ready_warning(bot_id)) + session.add(bot) + record_activity_event(session, bot_id, "bot_started", channel="system", detail=f"Container started for {bot_id}") + session.commit() + _invalidate_bot_detail_cache(bot_id) + return {"status": "started"} + + +def stop_bot_instance(session: Session, bot_id: str) -> Dict[str, Any]: + bot = _get_bot_or_404(session, bot_id) + if not bool(getattr(bot, "enabled", True)): + raise PermissionError("Bot is disabled. Enable it first.") + + docker_manager.stop_bot(bot_id) + bot.docker_status = "STOPPED" + session.add(bot) + record_activity_event(session, bot_id, "bot_stopped", channel="system", detail=f"Container stopped for {bot_id}") + session.commit() + _invalidate_bot_detail_cache(bot_id) + return {"status": "stopped"} + + +def enable_bot_instance(session: Session, bot_id: str) -> Dict[str, Any]: + bot = _get_bot_or_404(session, bot_id) + bot.enabled = True + session.add(bot) + record_activity_event(session, bot_id, "bot_enabled", channel="system", detail=f"Bot {bot_id} enabled") + session.commit() + _invalidate_bot_detail_cache(bot_id) + return {"status": "enabled", "enabled": True} + + +def disable_bot_instance(session: Session, bot_id: str) -> Dict[str, Any]: + bot = _get_bot_or_404(session, bot_id) + docker_manager.stop_bot(bot_id) + bot.enabled = False + bot.docker_status = "STOPPED" + if str(bot.current_state or "").upper() not in {"ERROR"}: + bot.current_state = "IDLE" + session.add(bot) + record_activity_event(session, bot_id, "bot_disabled", channel="system", detail=f"Bot {bot_id} disabled") + session.commit() + _invalidate_bot_detail_cache(bot_id) + return {"status": "disabled", "enabled": False} + + +def deactivate_bot_instance(session: Session, bot_id: str) -> Dict[str, Any]: + bot = _get_bot_or_404(session, bot_id) + docker_manager.stop_bot(bot_id) + bot.enabled = False + bot.docker_status = "STOPPED" + if str(bot.current_state or "").upper() not in {"ERROR"}: + bot.current_state = "IDLE" + session.add(bot) + record_activity_event(session, bot_id, "bot_deactivated", channel="system", detail=f"Bot {bot_id} deactivated") + session.commit() + _invalidate_bot_detail_cache(bot_id) + return {"status": "deactivated"} + + +def delete_bot_instance(session: Session, bot_id: str, delete_workspace: bool = True) -> Dict[str, Any]: + bot = _get_bot_or_404(session, bot_id) + docker_manager.stop_bot(bot_id) + + messages = session.exec(select(BotMessage).where(BotMessage.bot_id == bot_id)).all() + for row in messages: + session.delete(row) + topic_items = session.exec(select(TopicItem).where(TopicItem.bot_id == bot_id)).all() + for row in topic_items: + session.delete(row) + topics = session.exec(select(TopicTopic).where(TopicTopic.bot_id == bot_id)).all() + for row in topics: + session.delete(row) + usage_rows = session.exec(select(BotRequestUsage).where(BotRequestUsage.bot_id == bot_id)).all() + for row in usage_rows: + session.delete(row) + activity_rows = session.exec(select(BotActivityEvent).where(BotActivityEvent.bot_id == bot_id)).all() + for row in activity_rows: + session.delete(row) + skill_install_rows = session.exec(select(BotSkillInstall).where(BotSkillInstall.bot_id == bot_id)).all() + for row in skill_install_rows: + session.delete(row) + + session.delete(bot) + session.commit() + + if delete_workspace: + workspace_root = os.path.join(BOTS_WORKSPACE_ROOT, bot_id) + if os.path.isdir(workspace_root): + shutil.rmtree(workspace_root, ignore_errors=True) + + _invalidate_bot_detail_cache(bot_id) + _invalidate_bot_messages_cache(bot_id) + return {"status": "deleted", "workspace_deleted": bool(delete_workspace)} diff --git a/backend/services/bot_management_service.py b/backend/services/bot_management_service.py new file mode 100644 index 0000000..5322dc1 --- /dev/null +++ b/backend/services/bot_management_service.py @@ -0,0 +1,383 @@ +import os +import re +import shutil +from typing import Any, Dict, List, Optional + +import httpx +from fastapi import HTTPException +from sqlmodel import Session, select + +from core.cache import cache +from core.docker_instance import docker_manager +from core.settings import BOTS_WORKSPACE_ROOT +from models.bot import BotInstance, NanobotImage +from schemas.bot import BotCreateRequest, BotUpdateRequest +from services.bot_service import ( + _normalize_env_params, + _normalize_initial_channels, + _normalize_resource_limits, + _normalize_system_timezone, + _provider_defaults, + _resolve_bot_env_params, + _serialize_bot, + _serialize_bot_list_item, + _sync_workspace_channels, +) +from services.bot_storage_service import _write_env_store +from services.cache_service import _cache_key_bot_detail, _cache_key_bots_list, _invalidate_bot_detail_cache +from services.platform_service import record_activity_event +from services.template_service import get_agent_md_templates + +BOT_ID_PATTERN = re.compile(r"^[A-Za-z0-9_]+$") +MANAGED_WORKSPACE_FILENAMES = ("AGENTS.md", "SOUL.md", "USER.md", "TOOLS.md", "IDENTITY.md") + + +def _managed_bot_file_paths(bot_id: str) -> Dict[str, str]: + bot_root = os.path.join(BOTS_WORKSPACE_ROOT, bot_id, ".nanobot") + workspace_root = os.path.join(bot_root, "workspace") + paths = { + "config": os.path.join(bot_root, "config.json"), + "env": os.path.join(bot_root, "env.json"), + "resources": os.path.join(bot_root, "resources.json"), + } + for filename in MANAGED_WORKSPACE_FILENAMES: + paths[f"workspace:{filename}"] = os.path.join(workspace_root, filename) + return paths + + +def _snapshot_managed_bot_files(bot_id: str) -> Dict[str, Optional[bytes]]: + snapshot: Dict[str, Optional[bytes]] = {} + for key, path in _managed_bot_file_paths(bot_id).items(): + if os.path.isfile(path): + with open(path, "rb") as file: + snapshot[key] = file.read() + else: + snapshot[key] = None + return snapshot + + +def _restore_managed_bot_files(bot_id: str, snapshot: Dict[str, Optional[bytes]]) -> None: + for key, path in _managed_bot_file_paths(bot_id).items(): + payload = snapshot.get(key) + if payload is None: + if os.path.exists(path): + os.remove(path) + continue + os.makedirs(os.path.dirname(path), exist_ok=True) + tmp_path = f"{path}.tmp" + with open(tmp_path, "wb") as file: + file.write(payload) + os.replace(tmp_path, path) + + +def _cleanup_bot_workspace_root(bot_id: str) -> None: + bot_root = os.path.join(BOTS_WORKSPACE_ROOT, bot_id) + if os.path.isdir(bot_root): + shutil.rmtree(bot_root, ignore_errors=True) + + +async def test_provider_connection(payload: Dict[str, Any]) -> Dict[str, Any]: + provider = (payload.get("provider") or "").strip() + api_key = (payload.get("api_key") or "").strip() + model = (payload.get("model") or "").strip() + api_base = (payload.get("api_base") or "").strip() + + if not provider or not api_key: + raise HTTPException(status_code=400, detail="provider and api_key are required") + + normalized_provider, default_base = _provider_defaults(provider) + base = (api_base or default_base).rstrip("/") + if normalized_provider not in {"openrouter", "dashscope", "kimi", "minimax", "openai", "deepseek"}: + raise HTTPException(status_code=400, detail=f"provider not supported for test: {provider}") + if not base: + raise HTTPException(status_code=400, detail=f"api_base is required for provider: {provider}") + + headers = {"Authorization": f"Bearer {api_key}"} + timeout = httpx.Timeout(20.0, connect=10.0) + url = f"{base}/models" + try: + async with httpx.AsyncClient(timeout=timeout) as client: + response = await client.get(url, headers=headers) + if response.status_code >= 400: + return { + "ok": False, + "provider": normalized_provider, + "status_code": response.status_code, + "detail": response.text[:500], + } + data = response.json() + models_raw = data.get("data", []) if isinstance(data, dict) else [] + model_ids: List[str] = [ + str(item["id"]) for item in models_raw[:20] if isinstance(item, dict) and item.get("id") + ] + return { + "ok": True, + "provider": normalized_provider, + "endpoint": url, + "models_preview": model_ids[:8], + "model_hint": ( + "model_found" + if model and any(model in item for item in model_ids) + else ("model_not_listed" if model else "") + ), + } + except Exception as exc: + return { + "ok": False, + "provider": normalized_provider, + "endpoint": url, + "detail": str(exc), + } + + +def create_bot_record(session: Session, *, payload: BotCreateRequest) -> Dict[str, Any]: + normalized_bot_id = str(payload.id or "").strip() + if not normalized_bot_id: + raise HTTPException(status_code=400, detail="Bot ID is required") + if not BOT_ID_PATTERN.fullmatch(normalized_bot_id): + raise HTTPException(status_code=400, detail="Bot ID can only contain letters, numbers, and underscores") + if session.get(BotInstance, normalized_bot_id): + raise HTTPException(status_code=409, detail=f"Bot ID already exists: {normalized_bot_id}") + + image_row = session.get(NanobotImage, payload.image_tag) + if not image_row: + raise HTTPException(status_code=400, detail=f"Image not registered in DB: {payload.image_tag}") + if image_row.status != "READY": + raise HTTPException(status_code=400, detail=f"Image status is not READY: {payload.image_tag} ({image_row.status})") + if not docker_manager.has_image(payload.image_tag): + raise HTTPException(status_code=400, detail=f"Docker image not found locally: {payload.image_tag}") + + normalized_env_params = _normalize_env_params(payload.env_params) + try: + normalized_env_params["TZ"] = _normalize_system_timezone(payload.system_timezone) + except ValueError as exc: + raise HTTPException(status_code=400, detail=str(exc)) from exc + + bot = BotInstance( + id=normalized_bot_id, + name=payload.name, + enabled=bool(payload.enabled) if payload.enabled is not None else True, + access_password=str(payload.access_password or ""), + image_tag=payload.image_tag, + workspace_dir=os.path.join(BOTS_WORKSPACE_ROOT, normalized_bot_id), + ) + template_defaults = get_agent_md_templates() + resource_limits = _normalize_resource_limits(payload.cpu_cores, payload.memory_mb, payload.storage_gb) + try: + session.add(bot) + session.flush() + _write_env_store(normalized_bot_id, normalized_env_params) + _sync_workspace_channels( + session, + normalized_bot_id, + channels_override=_normalize_initial_channels(normalized_bot_id, payload.channels), + global_delivery_override={ + "sendProgress": bool(payload.send_progress) if payload.send_progress is not None else False, + "sendToolHints": bool(payload.send_tool_hints) if payload.send_tool_hints is not None else False, + }, + runtime_overrides={ + "llm_provider": payload.llm_provider, + "llm_model": payload.llm_model, + "api_key": payload.api_key, + "api_base": payload.api_base or "", + "temperature": payload.temperature, + "top_p": payload.top_p, + "max_tokens": payload.max_tokens, + "cpu_cores": resource_limits["cpu_cores"], + "memory_mb": resource_limits["memory_mb"], + "storage_gb": resource_limits["storage_gb"], + "system_prompt": payload.system_prompt or payload.soul_md or template_defaults.get("soul_md", ""), + "soul_md": payload.soul_md or payload.system_prompt or template_defaults.get("soul_md", ""), + "agents_md": payload.agents_md or template_defaults.get("agents_md", ""), + "user_md": payload.user_md or template_defaults.get("user_md", ""), + "tools_md": payload.tools_md or template_defaults.get("tools_md", ""), + "identity_md": payload.identity_md or template_defaults.get("identity_md", ""), + "send_progress": bool(payload.send_progress) if payload.send_progress is not None else False, + "send_tool_hints": bool(payload.send_tool_hints) if payload.send_tool_hints is not None else False, + }, + ) + record_activity_event( + session, + normalized_bot_id, + "bot_created", + channel="system", + detail=f"Bot {normalized_bot_id} created", + metadata={"image_tag": payload.image_tag}, + ) + session.commit() + session.refresh(bot) + except Exception: + session.rollback() + _cleanup_bot_workspace_root(normalized_bot_id) + raise + _invalidate_bot_detail_cache(normalized_bot_id) + return _serialize_bot(bot) + + +def list_bots_with_cache(session: Session) -> List[Dict[str, Any]]: + cached = cache.get_json(_cache_key_bots_list()) + if isinstance(cached, list): + return cached + bots = session.exec(select(BotInstance)).all() + dirty = False + for bot in bots: + actual_status = docker_manager.get_bot_status(bot.id) + if bot.docker_status != actual_status: + bot.docker_status = actual_status + if actual_status != "RUNNING" and str(bot.current_state or "").upper() not in {"ERROR"}: + bot.current_state = "IDLE" + session.add(bot) + dirty = True + if dirty: + session.commit() + for bot in bots: + session.refresh(bot) + rows = [_serialize_bot_list_item(bot) for bot in bots] + cache.set_json(_cache_key_bots_list(), rows, ttl=30) + return rows + + +def get_bot_detail_cached(session: Session, *, bot_id: str) -> Dict[str, Any]: + cached = cache.get_json(_cache_key_bot_detail(bot_id)) + if isinstance(cached, dict): + return cached + bot = session.get(BotInstance, bot_id) + if not bot: + raise HTTPException(status_code=404, detail="Bot not found") + row = _serialize_bot(bot) + cache.set_json(_cache_key_bot_detail(bot_id), row, ttl=30) + return row + + +def authenticate_bot_page_access(session: Session, *, bot_id: str, password: str) -> Dict[str, Any]: + bot = session.get(BotInstance, bot_id) + if not bot: + raise HTTPException(status_code=404, detail="Bot not found") + + configured = str(bot.access_password or "").strip() + if not configured: + return {"ok": True, "enabled": False, "bot_id": bot_id} + + candidate = str(password or "").strip() + if not candidate: + raise HTTPException(status_code=401, detail="Bot access password required") + if candidate != configured: + raise HTTPException(status_code=401, detail="Invalid bot access password") + return {"ok": True, "enabled": True, "bot_id": bot_id} + + +def update_bot_record(session: Session, *, bot_id: str, payload: BotUpdateRequest) -> Dict[str, Any]: + bot = session.get(BotInstance, bot_id) + if not bot: + raise HTTPException(status_code=404, detail="Bot not found") + managed_file_snapshot = _snapshot_managed_bot_files(bot_id) + + update_data = payload.model_dump(exclude_unset=True) + if "image_tag" in update_data and update_data["image_tag"]: + image_tag = str(update_data["image_tag"]).strip() + image_row = session.get(NanobotImage, image_tag) + if not image_row: + raise HTTPException(status_code=400, detail=f"Image not registered in DB: {image_tag}") + if image_row.status != "READY": + raise HTTPException(status_code=400, detail=f"Image status is not READY: {image_tag} ({image_row.status})") + if not docker_manager.has_image(image_tag): + raise HTTPException(status_code=400, detail=f"Docker image not found locally: {image_tag}") + + env_params = update_data.pop("env_params", None) if isinstance(update_data, dict) else None + system_timezone = update_data.pop("system_timezone", None) if isinstance(update_data, dict) else None + normalized_system_timezone: Optional[str] = None + if system_timezone is not None: + try: + normalized_system_timezone = _normalize_system_timezone(system_timezone) + except ValueError as exc: + raise HTTPException(status_code=400, detail=str(exc)) from exc + + runtime_fields = { + "llm_provider", + "llm_model", + "api_key", + "api_base", + "temperature", + "top_p", + "max_tokens", + "cpu_cores", + "memory_mb", + "storage_gb", + "soul_md", + "agents_md", + "user_md", + "tools_md", + "identity_md", + "send_progress", + "send_tool_hints", + "system_prompt", + } + runtime_overrides: Dict[str, Any] = {} + update_data.pop("tools_config", None) if isinstance(update_data, dict) else None + for field in runtime_fields: + if field in update_data: + runtime_overrides[field] = update_data.pop(field) + + for text_field in ("llm_provider", "llm_model", "api_key"): + if text_field in runtime_overrides: + text = str(runtime_overrides.get(text_field) or "").strip() + if not text: + runtime_overrides.pop(text_field, None) + else: + runtime_overrides[text_field] = text + if "api_base" in runtime_overrides: + runtime_overrides["api_base"] = str(runtime_overrides.get("api_base") or "").strip() + if "system_prompt" in runtime_overrides and "soul_md" not in runtime_overrides: + runtime_overrides["soul_md"] = runtime_overrides["system_prompt"] + if "soul_md" in runtime_overrides and "system_prompt" not in runtime_overrides: + runtime_overrides["system_prompt"] = runtime_overrides["soul_md"] + if {"cpu_cores", "memory_mb", "storage_gb"} & set(runtime_overrides.keys()): + runtime_overrides.update( + _normalize_resource_limits( + runtime_overrides.get("cpu_cores"), + runtime_overrides.get("memory_mb"), + runtime_overrides.get("storage_gb"), + ) + ) + + for key, value in update_data.items(): + if key in {"name", "image_tag", "access_password", "enabled"}: + setattr(bot, key, value) + try: + session.add(bot) + session.flush() + + if env_params is not None or normalized_system_timezone is not None: + next_env_params = _resolve_bot_env_params(bot_id) + if env_params is not None: + next_env_params = _normalize_env_params(env_params) + if normalized_system_timezone is not None: + next_env_params["TZ"] = normalized_system_timezone + _write_env_store(bot_id, next_env_params) + + global_delivery_override: Optional[Dict[str, Any]] = None + if "send_progress" in runtime_overrides or "send_tool_hints" in runtime_overrides: + global_delivery_override = {} + if "send_progress" in runtime_overrides: + global_delivery_override["sendProgress"] = bool(runtime_overrides.get("send_progress")) + if "send_tool_hints" in runtime_overrides: + global_delivery_override["sendToolHints"] = bool(runtime_overrides.get("send_tool_hints")) + + _sync_workspace_channels( + session, + bot_id, + runtime_overrides=runtime_overrides if runtime_overrides else None, + global_delivery_override=global_delivery_override, + ) + session.commit() + session.refresh(bot) + except Exception: + session.rollback() + _restore_managed_bot_files(bot_id, managed_file_snapshot) + refreshed_bot = session.get(BotInstance, bot_id) + if refreshed_bot: + session.refresh(refreshed_bot) + bot = refreshed_bot + raise + _invalidate_bot_detail_cache(bot_id) + return _serialize_bot(bot) diff --git a/backend/services/bot_mcp_service.py b/backend/services/bot_mcp_service.py new file mode 100644 index 0000000..ac77e3a --- /dev/null +++ b/backend/services/bot_mcp_service.py @@ -0,0 +1,71 @@ +import re +from typing import Any, Dict + +_MCP_SERVER_NAME_RE = re.compile(r"[A-Za-z0-9][A-Za-z0-9._-]{0,63}") + + +def _normalize_mcp_servers(raw: Any) -> Dict[str, Dict[str, Any]]: + if not isinstance(raw, dict): + return {} + rows: Dict[str, Dict[str, Any]] = {} + for server_name, server_cfg in raw.items(): + name = str(server_name or "").strip() + if not name or not _MCP_SERVER_NAME_RE.fullmatch(name): + continue + if not isinstance(server_cfg, dict): + continue + url = str(server_cfg.get("url") or "").strip() + if not url: + continue + transport_type = str(server_cfg.get("type") or "streamableHttp").strip() + if transport_type not in {"streamableHttp", "sse"}: + transport_type = "streamableHttp" + headers_raw = server_cfg.get("headers") + headers: Dict[str, str] = {} + if isinstance(headers_raw, dict): + for key, value in headers_raw.items(): + header_key = str(key or "").strip() + if not header_key: + continue + headers[header_key] = str(value or "").strip() + timeout_raw = server_cfg.get("toolTimeout", 60) + try: + timeout = int(timeout_raw) + except Exception: + timeout = 60 + rows[name] = { + "type": transport_type, + "url": url, + "headers": headers, + "toolTimeout": max(1, min(timeout, 600)), + } + return rows + + +def _merge_mcp_servers_preserving_extras( + current_raw: Any, + normalized: Dict[str, Dict[str, Any]], +) -> Dict[str, Dict[str, Any]]: + current_map = current_raw if isinstance(current_raw, dict) else {} + merged: Dict[str, Dict[str, Any]] = {} + for name, normalized_cfg in normalized.items(): + base = current_map.get(name) + base_cfg = dict(base) if isinstance(base, dict) else {} + next_cfg = dict(base_cfg) + next_cfg.update(normalized_cfg) + merged[name] = next_cfg + return merged + + +def _sanitize_mcp_servers_in_config_data(config_data: Dict[str, Any]) -> Dict[str, Dict[str, Any]]: + if not isinstance(config_data, dict): + return {} + tools_cfg = config_data.get("tools") + if not isinstance(tools_cfg, dict): + tools_cfg = {} + current_raw = tools_cfg.get("mcpServers") + normalized = _normalize_mcp_servers(current_raw) + merged = _merge_mcp_servers_preserving_extras(current_raw, normalized) + tools_cfg["mcpServers"] = merged + config_data["tools"] = tools_cfg + return merged diff --git a/backend/services/bot_service.py b/backend/services/bot_service.py new file mode 100644 index 0000000..672e2f8 --- /dev/null +++ b/backend/services/bot_service.py @@ -0,0 +1,264 @@ +import os +from typing import Any, Dict, List, Optional +from datetime import datetime, timezone +from zoneinfo import ZoneInfo + +from sqlmodel import Session + +from core.settings import DEFAULT_BOT_SYSTEM_TIMEZONE +from models.bot import BotInstance +from services.bot_storage_service import ( + _bot_data_root, + _clear_bot_dashboard_direct_session, + _clear_bot_sessions, + _migrate_bot_resources_store, + _normalize_env_params, + _normalize_resource_limits, + _read_bot_config, + _read_bot_resources, + _read_cron_store, + _read_env_store, + _safe_float, + _safe_int, + _workspace_root, + _write_bot_config, + _write_bot_resources, + _write_cron_store, + _write_env_store, +) +from services.bot_channel_service import ( + _channel_api_to_cfg, + _get_bot_channels_from_config, + _normalize_channel_extra, + _normalize_initial_channels, + _read_global_delivery_flags, + _sync_workspace_channels as _sync_workspace_channels_impl, +) +from services.bot_mcp_service import ( + _merge_mcp_servers_preserving_extras, + _normalize_mcp_servers, + _sanitize_mcp_servers_in_config_data, +) +from services.template_service import get_agent_md_templates + +__all__ = [ + "_bot_data_root", + "_channel_api_to_cfg", + "_clear_bot_dashboard_direct_session", + "_clear_bot_sessions", + "_get_bot_channels_from_config", + "_migrate_bot_resources_store", + "_normalize_channel_extra", + "_normalize_env_params", + "_normalize_initial_channels", + "_normalize_mcp_servers", + "_normalize_resource_limits", + "_normalize_system_timezone", + "_provider_defaults", + "_read_bot_config", + "_read_bot_resources", + "_read_bot_runtime_snapshot", + "_read_cron_store", + "_read_env_store", + "_read_global_delivery_flags", + "_resolve_bot_env_params", + "_safe_float", + "_safe_int", + "_sanitize_mcp_servers_in_config_data", + "_serialize_bot", + "_serialize_bot_list_item", + "_sync_workspace_channels", + "_workspace_root", + "_write_bot_config", + "_write_bot_resources", + "_write_cron_store", + "_write_env_store", + "_merge_mcp_servers_preserving_extras", +] +def _get_default_system_timezone() -> str: + value = str(DEFAULT_BOT_SYSTEM_TIMEZONE or "").strip() or "Asia/Shanghai" + try: + ZoneInfo(value) + return value + except Exception: + return "Asia/Shanghai" + + +def _normalize_system_timezone(raw: Any) -> str: + value = str(raw or "").strip() + if not value: + return _get_default_system_timezone() + try: + ZoneInfo(value) + except Exception as exc: + raise ValueError("Invalid system timezone. Use an IANA timezone such as Asia/Shanghai.") from exc + return value + + +def _resolve_bot_env_params(bot_id: str, raw: Optional[Dict[str, str]] = None) -> Dict[str, str]: + env_params = _normalize_env_params(raw if isinstance(raw, dict) else _read_env_store(bot_id)) + try: + env_params["TZ"] = _normalize_system_timezone(env_params.get("TZ")) + except ValueError: + env_params["TZ"] = _get_default_system_timezone() + return env_params + + +def _provider_defaults(provider: str) -> tuple[str, str]: + normalized = provider.lower().strip() + if normalized in {"openai"}: + return "openai", "https://api.openai.com/v1" + if normalized in {"openrouter"}: + return "openrouter", "https://openrouter.ai/api/v1" + if normalized in {"dashscope", "aliyun", "qwen", "aliyun-qwen"}: + return "dashscope", "https://dashscope.aliyuncs.com/compatible-mode/v1" + if normalized in {"deepseek"}: + return "deepseek", "https://api.deepseek.com/v1" + if normalized in {"xunfei", "iflytek", "xfyun"}: + return "openai", "https://spark-api-open.xf-yun.com/v1" + if normalized in {"vllm"}: + return "openai", "" + if normalized in {"kimi", "moonshot"}: + return "kimi", "https://api.moonshot.cn/v1" + if normalized in {"minimax"}: + return "minimax", "https://api.minimax.chat/v1" + return normalized, "" +def _read_workspace_md(bot_id: str, filename: str, default_value: str) -> str: + path = os.path.join(_workspace_root(bot_id), filename) + if not os.path.isfile(path): + return default_value + try: + with open(path, "r", encoding="utf-8") as f: + return f.read().strip() + except Exception: + return default_value + +def _read_bot_runtime_snapshot(bot: BotInstance) -> Dict[str, Any]: + config_data = _read_bot_config(bot.id) + env_params = _resolve_bot_env_params(bot.id) + template_defaults = get_agent_md_templates() + + provider_name = "" + provider_cfg: Dict[str, Any] = {} + providers_cfg = config_data.get("providers") + if isinstance(providers_cfg, dict): + for p_name, p_cfg in providers_cfg.items(): + provider_name = str(p_name or "").strip() + if isinstance(p_cfg, dict): + provider_cfg = p_cfg + break + + agents_defaults: Dict[str, Any] = {} + agents_cfg = config_data.get("agents") + if isinstance(agents_cfg, dict): + defaults = agents_cfg.get("defaults") + if isinstance(defaults, dict): + agents_defaults = defaults + + channels_cfg = config_data.get("channels") + send_progress, send_tool_hints = _read_global_delivery_flags(channels_cfg) + + llm_provider = provider_name or "" + llm_model = str(agents_defaults.get("model") or "") + api_key = str(provider_cfg.get("apiKey") or "").strip() + api_base = str(provider_cfg.get("apiBase") or "").strip() + api_base_lower = api_base.lower() + provider_alias = str(provider_cfg.get("dashboardProviderAlias") or "").strip().lower() + if llm_provider == "openai" and provider_alias in {"xunfei", "iflytek", "xfyun", "vllm"}: + llm_provider = "xunfei" if provider_alias in {"iflytek", "xfyun"} else provider_alias + elif llm_provider == "openai" and ("spark-api-open.xf-yun.com" in api_base_lower or "xf-yun.com" in api_base_lower): + llm_provider = "xunfei" + + soul_md = _read_workspace_md(bot.id, "SOUL.md", template_defaults.get("soul_md", "")) + resources = _read_bot_resources(bot.id, config_data=config_data) + return { + "llm_provider": llm_provider, + "llm_model": llm_model, + "api_key": api_key, + "api_base": api_base, + "temperature": _safe_float(agents_defaults.get("temperature"), 0.2), + "top_p": _safe_float(agents_defaults.get("topP"), 1.0), + "max_tokens": _safe_int(agents_defaults.get("maxTokens"), 8192), + "cpu_cores": resources["cpu_cores"], + "memory_mb": resources["memory_mb"], + "storage_gb": resources["storage_gb"], + "system_timezone": env_params.get("TZ") or _get_default_system_timezone(), + "send_progress": send_progress, + "send_tool_hints": send_tool_hints, + "soul_md": soul_md, + "agents_md": _read_workspace_md(bot.id, "AGENTS.md", template_defaults.get("agents_md", "")), + "user_md": _read_workspace_md(bot.id, "USER.md", template_defaults.get("user_md", "")), + "tools_md": _read_workspace_md(bot.id, "TOOLS.md", template_defaults.get("tools_md", "")), + "identity_md": _read_workspace_md(bot.id, "IDENTITY.md", template_defaults.get("identity_md", "")), + "system_prompt": soul_md, + } + +def _serialize_bot(bot: BotInstance) -> Dict[str, Any]: + runtime = _read_bot_runtime_snapshot(bot) + return { + "id": bot.id, + "name": bot.name, + "enabled": bool(getattr(bot, "enabled", True)), + "access_password": bot.access_password or "", + "has_access_password": bool(str(bot.access_password or "").strip()), + "avatar_model": "base", + "avatar_skin": "blue_suit", + "image_tag": bot.image_tag, + "llm_provider": runtime.get("llm_provider") or "", + "llm_model": runtime.get("llm_model") or "", + "system_prompt": runtime.get("system_prompt") or "", + "api_base": runtime.get("api_base") or "", + "temperature": _safe_float(runtime.get("temperature"), 0.2), + "top_p": _safe_float(runtime.get("top_p"), 1.0), + "max_tokens": _safe_int(runtime.get("max_tokens"), 8192), + "cpu_cores": _safe_float(runtime.get("cpu_cores"), 1.0), + "memory_mb": _safe_int(runtime.get("memory_mb"), 1024), + "storage_gb": _safe_int(runtime.get("storage_gb"), 10), + "system_timezone": str(runtime.get("system_timezone") or _get_default_system_timezone()), + "send_progress": bool(runtime.get("send_progress")), + "send_tool_hints": bool(runtime.get("send_tool_hints")), + "soul_md": runtime.get("soul_md") or "", + "agents_md": runtime.get("agents_md") or "", + "user_md": runtime.get("user_md") or "", + "tools_md": runtime.get("tools_md") or "", + "identity_md": runtime.get("identity_md") or "", + "workspace_dir": bot.workspace_dir, + "docker_status": bot.docker_status, + "current_state": bot.current_state, + "last_action": bot.last_action, + "created_at": bot.created_at, + "updated_at": bot.updated_at, + } + +def _serialize_bot_list_item(bot: BotInstance) -> Dict[str, Any]: + return { + "id": bot.id, + "name": bot.name, + "enabled": bool(getattr(bot, "enabled", True)), + "has_access_password": bool(str(bot.access_password or "").strip()), + "image_tag": bot.image_tag, + "docker_status": bot.docker_status, + "current_state": bot.current_state, + "last_action": bot.last_action, + "updated_at": bot.updated_at, + } + +def _sync_workspace_channels( + session: Session, + bot_id: str, + channels_override: Optional[List[Dict[str, Any]]] = None, + global_delivery_override: Optional[Dict[str, Any]] = None, + runtime_overrides: Optional[Dict[str, Any]] = None, +) -> None: + bot = session.get(BotInstance, bot_id) + if not bot: + return + snapshot = _read_bot_runtime_snapshot(bot) + _sync_workspace_channels_impl( + session, + bot_id, + snapshot, + channels_override=channels_override, + global_delivery_override=global_delivery_override, + runtime_overrides=runtime_overrides, + ) diff --git a/backend/services/bot_storage_service.py b/backend/services/bot_storage_service.py new file mode 100644 index 0000000..2b5aef5 --- /dev/null +++ b/backend/services/bot_storage_service.py @@ -0,0 +1,248 @@ +from __future__ import annotations + +import json +import os +import re +from typing import Any, Dict, Optional + +from core.settings import BOTS_WORKSPACE_ROOT + +_ENV_KEY_RE = re.compile(r"^[A-Z_][A-Z0-9_]{0,127}$") + +__all__ = [ + "_bot_data_root", + "_clear_bot_dashboard_direct_session", + "_clear_bot_sessions", + "_migrate_bot_resources_store", + "_normalize_env_params", + "_normalize_resource_limits", + "_read_bot_config", + "_read_bot_resources", + "_read_cron_store", + "_read_env_store", + "_safe_float", + "_safe_int", + "_workspace_root", + "_write_bot_config", + "_write_bot_resources", + "_write_cron_store", + "_write_env_store", +] + + +def _workspace_root(bot_id: str) -> str: + return os.path.abspath(os.path.join(BOTS_WORKSPACE_ROOT, bot_id, ".nanobot", "workspace")) + + +def _bot_data_root(bot_id: str) -> str: + return os.path.abspath(os.path.join(BOTS_WORKSPACE_ROOT, bot_id, ".nanobot")) + + +def _safe_float(raw: Any, default: float) -> float: + try: + return float(raw) + except Exception: + return default + + +def _safe_int(raw: Any, default: int) -> int: + try: + return int(raw) + except Exception: + return default + + +def _normalize_resource_limits(cpu_cores: Any, memory_mb: Any, storage_gb: Any) -> Dict[str, Any]: + cpu = _safe_float(cpu_cores, 1.0) + mem = _safe_int(memory_mb, 1024) + storage = _safe_int(storage_gb, 10) + if cpu < 0: + cpu = 1.0 + if mem < 0: + mem = 1024 + if storage < 0: + storage = 10 + normalized_cpu = 0.0 if cpu == 0 else min(16.0, max(0.1, cpu)) + normalized_mem = 0 if mem == 0 else min(65536, max(256, mem)) + normalized_storage = 0 if storage == 0 else min(1024, max(1, storage)) + return { + "cpu_cores": normalized_cpu, + "memory_mb": normalized_mem, + "storage_gb": normalized_storage, + } + + +def _normalize_env_params(raw: Any) -> Dict[str, str]: + if not isinstance(raw, dict): + return {} + rows: Dict[str, str] = {} + for key, value in raw.items(): + normalized_key = str(key or "").strip().upper() + if not normalized_key or not _ENV_KEY_RE.fullmatch(normalized_key): + continue + rows[normalized_key] = str(value or "").strip() + return rows + + +def _read_json_object(path: str) -> Dict[str, Any]: + if not os.path.isfile(path): + return {} + try: + with open(path, "r", encoding="utf-8") as file: + data = json.load(file) + return data if isinstance(data, dict) else {} + except Exception: + return {} + + +def _write_json_atomic(path: str, payload: Dict[str, Any]) -> None: + os.makedirs(os.path.dirname(path), exist_ok=True) + tmp_path = f"{path}.tmp" + with open(tmp_path, "w", encoding="utf-8") as file: + json.dump(payload, file, ensure_ascii=False, indent=2) + os.replace(tmp_path, path) + + +def _config_json_path(bot_id: str) -> str: + return os.path.join(_bot_data_root(bot_id), "config.json") + + +def _read_bot_config(bot_id: str) -> Dict[str, Any]: + return _read_json_object(_config_json_path(bot_id)) + + +def _write_bot_config(bot_id: str, config_data: Dict[str, Any]) -> None: + _write_json_atomic(_config_json_path(bot_id), config_data) + + +def _resources_json_path(bot_id: str) -> str: + return os.path.join(_bot_data_root(bot_id), "resources.json") + + +def _write_bot_resources(bot_id: str, cpu_cores: Any, memory_mb: Any, storage_gb: Any) -> None: + normalized = _normalize_resource_limits(cpu_cores, memory_mb, storage_gb) + _write_json_atomic( + _resources_json_path(bot_id), + { + "cpuCores": normalized["cpu_cores"], + "memoryMB": normalized["memory_mb"], + "storageGB": normalized["storage_gb"], + }, + ) + + +def _read_bot_resources(bot_id: str, config_data: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: + cpu_raw: Any = None + memory_raw: Any = None + storage_raw: Any = None + + data = _read_json_object(_resources_json_path(bot_id)) + if data: + cpu_raw = data.get("cpuCores", data.get("cpu_cores")) + memory_raw = data.get("memoryMB", data.get("memory_mb")) + storage_raw = data.get("storageGB", data.get("storage_gb")) + + if cpu_raw is None or memory_raw is None or storage_raw is None: + cfg = config_data if isinstance(config_data, dict) else _read_bot_config(bot_id) + runtime_cfg = cfg.get("runtime") + if isinstance(runtime_cfg, dict): + resources_raw = runtime_cfg.get("resources") + if isinstance(resources_raw, dict): + if cpu_raw is None: + cpu_raw = resources_raw.get("cpuCores", resources_raw.get("cpu_cores")) + if memory_raw is None: + memory_raw = resources_raw.get("memoryMB", resources_raw.get("memory_mb")) + if storage_raw is None: + storage_raw = resources_raw.get("storageGB", resources_raw.get("storage_gb")) + + return _normalize_resource_limits(cpu_raw, memory_raw, storage_raw) + + +def _migrate_bot_resources_store(bot_id: str) -> None: + config_data = _read_bot_config(bot_id) + runtime_cfg = config_data.get("runtime") + resources_raw: Dict[str, Any] = {} + if isinstance(runtime_cfg, dict): + legacy_raw = runtime_cfg.get("resources") + if isinstance(legacy_raw, dict): + resources_raw = legacy_raw + + path = _resources_json_path(bot_id) + if not os.path.isfile(path): + _write_bot_resources( + bot_id, + resources_raw.get("cpuCores", resources_raw.get("cpu_cores")), + resources_raw.get("memoryMB", resources_raw.get("memory_mb")), + resources_raw.get("storageGB", resources_raw.get("storage_gb")), + ) + + if isinstance(runtime_cfg, dict) and "resources" in runtime_cfg: + runtime_cfg.pop("resources", None) + if not runtime_cfg: + config_data.pop("runtime", None) + _write_bot_config(bot_id, config_data) + + +def _env_store_path(bot_id: str) -> str: + return os.path.join(_bot_data_root(bot_id), "env.json") + + +def _read_env_store(bot_id: str) -> Dict[str, str]: + return _normalize_env_params(_read_json_object(_env_store_path(bot_id))) + + +def _write_env_store(bot_id: str, env_params: Dict[str, str]) -> None: + _write_json_atomic(_env_store_path(bot_id), _normalize_env_params(env_params)) + + +def _cron_store_path(bot_id: str) -> str: + return os.path.join(_bot_data_root(bot_id), "cron", "jobs.json") + + +def _read_cron_store(bot_id: str) -> Dict[str, Any]: + data = _read_json_object(_cron_store_path(bot_id)) + if not data: + return {"version": 1, "jobs": []} + jobs = data.get("jobs") + if not isinstance(jobs, list): + data["jobs"] = [] + if "version" not in data: + data["version"] = 1 + return data + + +def _write_cron_store(bot_id: str, store: Dict[str, Any]) -> None: + _write_json_atomic(_cron_store_path(bot_id), store) + + +def _sessions_root(bot_id: str) -> str: + return os.path.join(_workspace_root(bot_id), "sessions") + + +def _clear_bot_sessions(bot_id: str) -> int: + root = _sessions_root(bot_id) + if not os.path.isdir(root): + return 0 + deleted = 0 + for name in os.listdir(root): + path = os.path.join(root, name) + if not os.path.isfile(path): + continue + if not name.lower().endswith(".jsonl"): + continue + try: + os.remove(path) + deleted += 1 + except Exception: + continue + return deleted + + +def _clear_bot_dashboard_direct_session(bot_id: str) -> Dict[str, Any]: + root = _sessions_root(bot_id) + os.makedirs(root, exist_ok=True) + path = os.path.join(root, "dashboard_direct.jsonl") + existed = os.path.exists(path) + with open(path, "w", encoding="utf-8"): + pass + return {"path": path, "existed": existed} diff --git a/backend/services/cache_service.py b/backend/services/cache_service.py new file mode 100644 index 0000000..cd697eb --- /dev/null +++ b/backend/services/cache_service.py @@ -0,0 +1,27 @@ +from typing import Optional +from core.cache import cache + +def _cache_key_bots_list() -> str: + return "bot:list:v2" + +def _cache_key_bot_detail(bot_id: str) -> str: + return f"bot:detail:v2:{bot_id}" + +def _cache_key_bot_messages(bot_id: str, limit: int) -> str: + return f"bot:messages:list:v2:{bot_id}:limit:{limit}" + +def _cache_key_bot_messages_page(bot_id: str, limit: int, before_id: Optional[int]) -> str: + cursor = str(int(before_id)) if isinstance(before_id, int) and before_id > 0 else "latest" + return f"bot:messages:page:v2:{bot_id}:before:{cursor}:limit:{limit}" + +def _cache_key_images() -> str: + return "images:list" + +def _invalidate_bot_detail_cache(bot_id: str) -> None: + cache.delete(_cache_key_bots_list(), _cache_key_bot_detail(bot_id)) + +def _invalidate_bot_messages_cache(bot_id: str) -> None: + cache.delete_prefix(f"bot:messages:{bot_id}:") + +def _invalidate_images_cache() -> None: + cache.delete(_cache_key_images()) diff --git a/backend/services/chat_command_service.py b/backend/services/chat_command_service.py new file mode 100644 index 0000000..3ad8495 --- /dev/null +++ b/backend/services/chat_command_service.py @@ -0,0 +1,205 @@ +import logging +import os +from typing import Any, Dict, List + +from fastapi import HTTPException +from sqlmodel import Session + +from core.docker_instance import docker_manager +from models.bot import BotInstance +from services.bot_service import _read_bot_runtime_snapshot +from services.platform_service import ( + create_usage_request, + fail_latest_usage, + record_activity_event, +) +from services.runtime_service import _persist_runtime_packet, _queue_runtime_broadcast +from services.workspace_service import _resolve_workspace_path +from core.utils import _is_video_attachment_path, _is_visual_attachment_path + +logger = logging.getLogger("dashboard.backend") + + +def _normalize_message_media_item(value: Any) -> str: + return str(value or "").strip().replace("\\", "/").lstrip("/") + + +def _normalize_message_media_list(raw: Any) -> List[str]: + if not isinstance(raw, list): + return [] + rows: List[str] = [] + for value in raw: + normalized = _normalize_message_media_item(value) + if normalized: + rows.append(normalized) + return rows + + +def _build_delivery_command(command: str, checked_attachments: List[str]) -> str: + if not checked_attachments: + return command + + attachment_block = "\n".join(f"- {path}" for path in checked_attachments) + if all(_is_visual_attachment_path(path) for path in checked_attachments): + has_video = any(_is_video_attachment_path(path) for path in checked_attachments) + media_label = "图片/视频" if has_video else "图片" + capability_hint = ( + "1) 附件已随请求附带;图片在可用时可直接作为多模态输入理解,视频请按附件路径处理。\n" + if has_video + else "1) 附件中的图片已作为多模态输入提供,优先直接理解并回答。\n" + ) + if command: + return ( + f"{command}\n\n" + "[Attached files]\n" + f"{attachment_block}\n\n" + "【附件处理要求】\n" + f"{capability_hint}" + "2) 若当前模型或接口不支持直接理解该附件,请明确说明后再调用工具解析。\n" + "3) 除非用户明确要求,不要先调用工具读取附件文件。\n" + "4) 回复语言必须遵循 USER.md;若未指定,则与用户当前输入语言保持一致。\n" + "5) 仅基于可见内容回答;看不清或无法确认的部分请明确说明,不要猜测。" + ) + return ( + "请先处理已附带的附件列表:\n" + f"{attachment_block}\n\n" + f"请直接分析已附带的{media_label}并总结关键信息。\n" + f"{'图片在可用时可直接作为多模态输入理解,视频请按附件路径处理。' if has_video else ''}\n" + "若当前模型或接口不支持直接理解该附件,请明确说明后再调用工具解析。\n" + "回复语言必须遵循 USER.md;若未指定,则与用户当前输入语言保持一致。\n" + "仅基于可见内容回答;看不清或无法确认的部分请明确说明,不要猜测。" + ) + + command_has_paths = all(path in command for path in checked_attachments) if command else False + if command and not command_has_paths: + return ( + f"{command}\n\n" + "[Attached files]\n" + f"{attachment_block}\n\n" + "Please process the attached file(s) listed above when answering this request.\n" + "Reply language must follow USER.md. If not specified, use the same language as the user input." + ) + if not command: + return ( + "Please process the uploaded file(s) listed below:\n" + f"{attachment_block}\n\n" + "Reply language must follow USER.md. If not specified, use the same language as the user input." + ) + return command + + +def send_bot_command(session: Session, bot_id: str, command: str, attachments: Any) -> Dict[str, Any]: + request_id = "" + try: + bot = session.get(BotInstance, bot_id) + if not bot: + raise HTTPException(status_code=404, detail="Bot not found") + runtime_snapshot = _read_bot_runtime_snapshot(bot) + + normalized_attachments = _normalize_message_media_list(attachments) + text_command = str(command or "").strip() + if not text_command and not normalized_attachments: + raise HTTPException(status_code=400, detail="Command or attachments is required") + + checked_attachments: List[str] = [] + for rel_path in normalized_attachments: + _, target = _resolve_workspace_path(bot_id, rel_path) + if not os.path.isfile(target): + raise HTTPException(status_code=400, detail=f"attachment not found: {rel_path}") + checked_attachments.append(rel_path) + delivery_media = [f"/root/.nanobot/workspace/{path.lstrip('/')}" for path in checked_attachments] + + display_command = text_command if text_command else "[attachment message]" + delivery_command = _build_delivery_command(text_command, checked_attachments) + + request_id = create_usage_request( + session, + bot_id, + display_command, + attachments=checked_attachments, + channel="dashboard", + metadata={"attachment_count": len(checked_attachments)}, + provider=str(runtime_snapshot.get("llm_provider") or "").strip() or None, + model=str(runtime_snapshot.get("llm_model") or "").strip() or None, + ) + record_activity_event( + session, + bot_id, + "command_submitted", + request_id=request_id, + channel="dashboard", + detail="command submitted", + metadata={"attachment_count": len(checked_attachments), "has_text": bool(text_command)}, + ) + session.commit() + + outbound_user_packet: Dict[str, Any] | None = None + if display_command or checked_attachments: + outbound_user_packet = { + "type": "USER_COMMAND", + "channel": "dashboard", + "text": display_command, + "media": checked_attachments, + "request_id": request_id, + } + _persist_runtime_packet(bot_id, outbound_user_packet) + + if outbound_user_packet: + _queue_runtime_broadcast(bot_id, outbound_user_packet) + + success = docker_manager.send_command(bot_id, delivery_command, media=delivery_media) + if success: + return {"success": True} + + detail = docker_manager.get_last_delivery_error(bot_id) + fail_latest_usage(session, bot_id, detail or "command delivery failed") + record_activity_event( + session, + bot_id, + "command_failed", + request_id=request_id, + channel="dashboard", + detail=(detail or "command delivery failed")[:400], + ) + session.commit() + _queue_runtime_broadcast( + bot_id, + { + "type": "AGENT_STATE", + "channel": "dashboard", + "payload": { + "state": "ERROR", + "action_msg": detail or "command delivery failed", + }, + }, + ) + raise HTTPException( + status_code=502, + detail=f"Failed to deliver command to bot dashboard channel{': ' + detail if detail else ''}", + ) + except HTTPException: + raise + except Exception as exc: + logger.exception("send_bot_command failed for bot_id=%s", bot_id) + try: + session.rollback() + except Exception: + pass + if request_id: + try: + fail_latest_usage(session, bot_id, str(exc)) + record_activity_event( + session, + bot_id, + "command_failed", + request_id=request_id, + channel="dashboard", + detail=str(exc)[:400], + ) + session.commit() + except Exception: + try: + session.rollback() + except Exception: + pass + raise HTTPException(status_code=500, detail=f"Failed to process bot command: {exc}") from exc diff --git a/backend/services/chat_history_service.py b/backend/services/chat_history_service.py new file mode 100644 index 0000000..97993c0 --- /dev/null +++ b/backend/services/chat_history_service.py @@ -0,0 +1,335 @@ +import json +import os +from datetime import datetime, timezone +from typing import Any, Dict, List, Optional + +from fastapi import HTTPException +from sqlmodel import Session, select + +from core.cache import cache +from core.docker_instance import docker_manager +from core.utils import _resolve_local_day_range +from models.bot import BotInstance, BotMessage +from services.bot_storage_service import _clear_bot_dashboard_direct_session, _clear_bot_sessions, _workspace_root +from services.cache_service import ( + _cache_key_bot_messages, + _cache_key_bot_messages_page, + _invalidate_bot_detail_cache, + _invalidate_bot_messages_cache, +) +from services.platform_service import get_chat_pull_page_size, record_activity_event + + +def _get_bot_or_404(session: Session, bot_id: str) -> BotInstance: + bot = session.get(BotInstance, bot_id) + if not bot: + raise HTTPException(status_code=404, detail="Bot not found") + return bot + + +def _normalize_message_media_item(bot_id: str, value: Any) -> str: + raw = str(value or "").strip().replace("\\", "/") + if not raw: + return "" + if raw.startswith("/root/.nanobot/workspace/"): + return raw[len("/root/.nanobot/workspace/") :].lstrip("/") + root = _workspace_root(bot_id) + if os.path.isabs(raw): + try: + if os.path.commonpath([root, raw]) == root: + return os.path.relpath(raw, root).replace("\\", "/") + except Exception: + pass + return raw.lstrip("/") + + +def _normalize_message_media_list(raw: Any, bot_id: str) -> List[str]: + if not isinstance(raw, list): + return [] + rows: List[str] = [] + for value in raw: + normalized = _normalize_message_media_item(bot_id, value) + if normalized: + rows.append(normalized) + return rows + + +def _parse_message_media(bot_id: str, media_raw: Optional[str]) -> List[str]: + if not media_raw: + return [] + try: + parsed = json.loads(media_raw) + except Exception: + return [] + return _normalize_message_media_list(parsed, bot_id) + + +def serialize_bot_message_row(bot_id: str, row: BotMessage) -> Dict[str, Any]: + created_at = row.created_at + if created_at.tzinfo is None: + created_at = created_at.replace(tzinfo=timezone.utc) + return { + "id": row.id, + "bot_id": row.bot_id, + "role": row.role, + "text": row.text, + "media": _parse_message_media(bot_id, getattr(row, "media_json", None)), + "feedback": str(getattr(row, "feedback", "") or "").strip() or None, + "ts": int(created_at.timestamp() * 1000), + } + + +def list_bot_messages_payload(session: Session, bot_id: str, limit: int = 200) -> List[Dict[str, Any]]: + _get_bot_or_404(session, bot_id) + safe_limit = max(1, min(int(limit), 500)) + cached = cache.get_json(_cache_key_bot_messages(bot_id, safe_limit)) + if isinstance(cached, list): + return cached + rows = session.exec( + select(BotMessage) + .where(BotMessage.bot_id == bot_id) + .order_by(BotMessage.created_at.desc(), BotMessage.id.desc()) + .limit(safe_limit) + ).all() + payload = [serialize_bot_message_row(bot_id, row) for row in reversed(rows)] + cache.set_json(_cache_key_bot_messages(bot_id, safe_limit), payload, ttl=30) + return payload + + +def list_bot_messages_page_payload( + session: Session, + bot_id: str, + limit: Optional[int], + before_id: Optional[int], +) -> Dict[str, Any]: + _get_bot_or_404(session, bot_id) + configured_limit = get_chat_pull_page_size() + safe_limit = max(1, min(int(limit if limit is not None else configured_limit), 500)) + safe_before_id = int(before_id) if isinstance(before_id, int) and before_id > 0 else None + cache_key = _cache_key_bot_messages_page(bot_id, safe_limit, safe_before_id) + cached = cache.get_json(cache_key) + if isinstance(cached, dict) and isinstance(cached.get("items"), list): + return cached + + stmt = ( + select(BotMessage) + .where(BotMessage.bot_id == bot_id) + .order_by(BotMessage.created_at.desc(), BotMessage.id.desc()) + .limit(safe_limit + 1) + ) + if safe_before_id is not None: + stmt = stmt.where(BotMessage.id < safe_before_id) + + rows = session.exec(stmt).all() + has_more = len(rows) > safe_limit + if has_more: + rows = rows[:safe_limit] + ordered = list(reversed(rows)) + payload = { + "items": [serialize_bot_message_row(bot_id, row) for row in ordered], + "has_more": bool(has_more), + "next_before_id": rows[-1].id if rows else None, + "limit": safe_limit, + } + cache.set_json(cache_key, payload, ttl=30) + return payload + + +def list_bot_messages_by_date_payload( + session: Session, + bot_id: str, + date: str, + tz_offset_minutes: Optional[int], + limit: Optional[int], +) -> Dict[str, Any]: + _get_bot_or_404(session, bot_id) + + utc_start, utc_end = _resolve_local_day_range(date, tz_offset_minutes) + configured_limit = max(60, get_chat_pull_page_size()) + safe_limit = max(12, min(int(limit if limit is not None else configured_limit), 240)) + before_limit = max(3, min(18, safe_limit // 4)) + after_limit = max(0, safe_limit - before_limit - 1) + + exact_anchor = session.exec( + select(BotMessage) + .where(BotMessage.bot_id == bot_id, BotMessage.created_at >= utc_start, BotMessage.created_at < utc_end) + .order_by(BotMessage.created_at.asc(), BotMessage.id.asc()) + .limit(1) + ).first() + + anchor = exact_anchor + matched_exact_date = exact_anchor is not None + if anchor is None: + next_row = session.exec( + select(BotMessage) + .where(BotMessage.bot_id == bot_id, BotMessage.created_at >= utc_end) + .order_by(BotMessage.created_at.asc(), BotMessage.id.asc()) + .limit(1) + ).first() + prev_row = session.exec( + select(BotMessage) + .where(BotMessage.bot_id == bot_id, BotMessage.created_at < utc_start) + .order_by(BotMessage.created_at.desc(), BotMessage.id.desc()) + .limit(1) + ).first() + if next_row and prev_row: + gap_after = next_row.created_at - utc_end + gap_before = utc_start - prev_row.created_at + anchor = next_row if gap_after <= gap_before else prev_row + else: + anchor = next_row or prev_row + + if anchor is None or anchor.id is None: + return { + "items": [], + "anchor_id": None, + "resolved_ts": None, + "matched_exact_date": False, + "has_more_before": False, + "has_more_after": False, + } + + before_rows = session.exec( + select(BotMessage) + .where(BotMessage.bot_id == bot_id, BotMessage.id < anchor.id) + .order_by(BotMessage.created_at.desc(), BotMessage.id.desc()) + .limit(before_limit) + ).all() + after_rows = session.exec( + select(BotMessage) + .where(BotMessage.bot_id == bot_id, BotMessage.id > anchor.id) + .order_by(BotMessage.created_at.asc(), BotMessage.id.asc()) + .limit(after_limit) + ).all() + + ordered = list(reversed(before_rows)) + [anchor] + after_rows + first_row = ordered[0] if ordered else None + last_row = ordered[-1] if ordered else None + + has_more_before = False + if first_row is not None and first_row.id is not None: + has_more_before = ( + session.exec( + select(BotMessage.id) + .where(BotMessage.bot_id == bot_id, BotMessage.id < first_row.id) + .order_by(BotMessage.id.desc()) + .limit(1) + ).first() + is not None + ) + + has_more_after = False + if last_row is not None and last_row.id is not None: + has_more_after = ( + session.exec( + select(BotMessage.id) + .where(BotMessage.bot_id == bot_id, BotMessage.id > last_row.id) + .order_by(BotMessage.id.asc()) + .limit(1) + ).first() + is not None + ) + + return { + "items": [serialize_bot_message_row(bot_id, row) for row in ordered], + "anchor_id": anchor.id, + "resolved_ts": int(anchor.created_at.timestamp() * 1000), + "matched_exact_date": matched_exact_date, + "has_more_before": has_more_before, + "has_more_after": has_more_after, + } + + +def update_bot_message_feedback_payload( + session: Session, + bot_id: str, + message_id: int, + feedback: Optional[str], +) -> Dict[str, Any]: + _get_bot_or_404(session, bot_id) + row = session.get(BotMessage, message_id) + if not row or row.bot_id != bot_id: + raise HTTPException(status_code=404, detail="Message not found") + if row.role != "assistant": + raise HTTPException(status_code=400, detail="Only assistant messages support feedback") + + raw = str(feedback or "").strip().lower() + if raw in {"", "none", "null"}: + row.feedback = None + row.feedback_at = None + elif raw in {"up", "down"}: + row.feedback = raw + row.feedback_at = datetime.utcnow() + else: + raise HTTPException(status_code=400, detail="feedback must be 'up' or 'down'") + + session.add(row) + session.commit() + _invalidate_bot_messages_cache(bot_id) + return { + "status": "updated", + "bot_id": bot_id, + "message_id": row.id, + "feedback": row.feedback, + "feedback_at": row.feedback_at.isoformat() if row.feedback_at else None, + } + + +def clear_bot_messages_payload(session: Session, bot_id: str) -> Dict[str, Any]: + bot = _get_bot_or_404(session, bot_id) + rows = session.exec(select(BotMessage).where(BotMessage.bot_id == bot_id)).all() + deleted = 0 + for row in rows: + session.delete(row) + deleted += 1 + cleared_sessions = _clear_bot_sessions(bot_id) + if str(bot.docker_status or "").upper() == "RUNNING": + try: + docker_manager.send_command(bot_id, "/new") + except Exception: + pass + bot.last_action = "" + bot.current_state = "IDLE" + bot.updated_at = datetime.utcnow() + session.add(bot) + record_activity_event( + session, + bot_id, + "history_cleared", + channel="system", + detail=f"Cleared {deleted} stored messages", + metadata={"deleted_messages": deleted, "cleared_sessions": cleared_sessions}, + ) + session.commit() + _invalidate_bot_detail_cache(bot_id) + _invalidate_bot_messages_cache(bot_id) + return {"bot_id": bot_id, "deleted": deleted, "cleared_sessions": cleared_sessions} + + +def clear_dashboard_direct_session_payload(session: Session, bot_id: str) -> Dict[str, Any]: + bot = _get_bot_or_404(session, bot_id) + result = _clear_bot_dashboard_direct_session(bot_id) + if str(bot.docker_status or "").upper() == "RUNNING": + try: + docker_manager.send_command(bot_id, "/new") + except Exception: + pass + + bot.updated_at = datetime.utcnow() + session.add(bot) + record_activity_event( + session, + bot_id, + "dashboard_session_cleared", + channel="dashboard", + detail="Cleared dashboard_direct session file", + metadata={"session_file": result["path"], "previously_existed": result["existed"]}, + ) + session.commit() + _invalidate_bot_detail_cache(bot_id) + return { + "bot_id": bot_id, + "cleared": True, + "session_file": result["path"], + "previously_existed": result["existed"], + } diff --git a/backend/services/platform_activity_service.py b/backend/services/platform_activity_service.py new file mode 100644 index 0000000..8bb7fdf --- /dev/null +++ b/backend/services/platform_activity_service.py @@ -0,0 +1,103 @@ +import json +from datetime import datetime, timedelta +from typing import Any, Dict, List, Optional + +from sqlalchemy import delete as sql_delete +from sqlmodel import Session, select + +from models.platform import BotActivityEvent +from schemas.platform import PlatformActivityItem +from services.platform_settings_service import get_activity_event_retention_days + +ACTIVITY_EVENT_PRUNE_INTERVAL = timedelta(minutes=10) +OPERATIONAL_ACTIVITY_EVENT_TYPES = { + "bot_created", + "bot_started", + "bot_stopped", + "bot_warning", + "bot_enabled", + "bot_disabled", + "bot_deactivated", + "command_submitted", + "command_failed", + "history_cleared", +} + +_last_activity_event_prune_at: Optional[datetime] = None + + +def _utcnow() -> datetime: + return datetime.utcnow() + + +def prune_expired_activity_events(session: Session, force: bool = False) -> int: + global _last_activity_event_prune_at + + now = _utcnow() + if not force and _last_activity_event_prune_at and now - _last_activity_event_prune_at < ACTIVITY_EVENT_PRUNE_INTERVAL: + return 0 + + retention_days = get_activity_event_retention_days(session) + cutoff = now - timedelta(days=retention_days) + result = session.exec(sql_delete(BotActivityEvent).where(BotActivityEvent.created_at < cutoff)) + _last_activity_event_prune_at = now + return int(getattr(result, "rowcount", 0) or 0) + + +def record_activity_event( + session: Session, + bot_id: str, + event_type: str, + request_id: Optional[str] = None, + channel: str = "dashboard", + detail: Optional[str] = None, + metadata: Optional[Dict[str, Any]] = None, +) -> None: + normalized_event_type = str(event_type or "unknown").strip().lower() or "unknown" + if normalized_event_type not in OPERATIONAL_ACTIVITY_EVENT_TYPES: + return + prune_expired_activity_events(session, force=False) + row = BotActivityEvent( + bot_id=bot_id, + request_id=request_id, + event_type=normalized_event_type, + channel=str(channel or "dashboard").strip().lower() or "dashboard", + detail=(str(detail or "").strip() or None), + metadata_json=json.dumps(metadata or {}, ensure_ascii=False) if metadata else None, + created_at=_utcnow(), + ) + session.add(row) + + +def list_activity_events( + session: Session, + bot_id: Optional[str] = None, + limit: int = 100, +) -> List[Dict[str, Any]]: + deleted = prune_expired_activity_events(session, force=False) + if deleted > 0: + session.commit() + safe_limit = max(1, min(int(limit), 500)) + stmt = select(BotActivityEvent).order_by(BotActivityEvent.created_at.desc(), BotActivityEvent.id.desc()).limit(safe_limit) + if bot_id: + stmt = stmt.where(BotActivityEvent.bot_id == bot_id) + rows = session.exec(stmt).all() + items: List[Dict[str, Any]] = [] + for row in rows: + try: + metadata = json.loads(row.metadata_json or "{}") + except Exception: + metadata = {} + items.append( + PlatformActivityItem( + id=int(row.id or 0), + bot_id=row.bot_id, + request_id=row.request_id, + event_type=row.event_type, + channel=row.channel, + detail=row.detail, + metadata=metadata if isinstance(metadata, dict) else {}, + created_at=row.created_at.isoformat() + "Z", + ).model_dump() + ) + return items diff --git a/backend/services/platform_overview_service.py b/backend/services/platform_overview_service.py new file mode 100644 index 0000000..866f442 --- /dev/null +++ b/backend/services/platform_overview_service.py @@ -0,0 +1,121 @@ +from typing import Any, Dict, List + +from sqlmodel import Session, select + +from core.utils import _calc_dir_size_bytes +from models.bot import BotInstance, NanobotImage +from services.bot_storage_service import _read_bot_resources, _workspace_root +from services.platform_activity_service import list_activity_events, prune_expired_activity_events +from services.platform_settings_service import get_platform_settings +from services.platform_usage_service import list_usage + + +def build_platform_overview(session: Session, docker_manager: Any) -> Dict[str, Any]: + deleted = prune_expired_activity_events(session, force=False) + if deleted > 0: + session.commit() + bots = session.exec(select(BotInstance)).all() + images = session.exec(select(NanobotImage).order_by(NanobotImage.created_at.desc())).all() + settings = get_platform_settings(session) + + running = 0 + stopped = 0 + disabled = 0 + configured_cpu_total = 0.0 + configured_memory_total = 0 + configured_storage_total = 0 + workspace_used_total = 0 + workspace_limit_total = 0 + live_cpu_percent_total = 0.0 + live_memory_used_total = 0 + live_memory_limit_total = 0 + + bot_rows: List[Dict[str, Any]] = [] + for bot in bots: + enabled = bool(getattr(bot, "enabled", True)) + runtime_status = docker_manager.get_bot_status(bot.id) if docker_manager else str(bot.docker_status or "STOPPED") + resources = _read_bot_resources(bot.id) + runtime = ( + docker_manager.get_bot_resource_snapshot(bot.id) + if docker_manager + else {"usage": {}, "limits": {}, "docker_status": runtime_status} + ) + workspace_root = _workspace_root(bot.id) + workspace_used = _calc_dir_size_bytes(workspace_root) + workspace_limit = int(resources["storage_gb"] or 0) * 1024 * 1024 * 1024 + + configured_cpu_total += float(resources["cpu_cores"] or 0) + configured_memory_total += int(resources["memory_mb"] or 0) * 1024 * 1024 + configured_storage_total += workspace_limit + workspace_used_total += workspace_used + workspace_limit_total += workspace_limit + live_cpu_percent_total += float((runtime.get("usage") or {}).get("cpu_percent") or 0.0) + live_memory_used_total += int((runtime.get("usage") or {}).get("memory_bytes") or 0) + live_memory_limit_total += int((runtime.get("usage") or {}).get("memory_limit_bytes") or 0) + + if not enabled: + disabled += 1 + elif runtime_status == "RUNNING": + running += 1 + else: + stopped += 1 + + bot_rows.append( + { + "id": bot.id, + "name": bot.name, + "enabled": enabled, + "docker_status": runtime_status, + "image_tag": bot.image_tag, + "llm_provider": getattr(bot, "llm_provider", None), + "llm_model": getattr(bot, "llm_model", None), + "current_state": bot.current_state, + "last_action": bot.last_action, + "resources": resources, + "workspace_usage_bytes": workspace_used, + "workspace_limit_bytes": workspace_limit if workspace_limit > 0 else None, + } + ) + + usage = list_usage(session, limit=20) + events = list_activity_events(session, limit=20) + + return { + "summary": { + "bots": { + "total": len(bots), + "running": running, + "stopped": stopped, + "disabled": disabled, + }, + "images": { + "total": len(images), + "ready": len([row for row in images if row.status == "READY"]), + "abnormal": len([row for row in images if row.status != "READY"]), + }, + "resources": { + "configured_cpu_cores": round(configured_cpu_total, 2), + "configured_memory_bytes": configured_memory_total, + "configured_storage_bytes": configured_storage_total, + "live_cpu_percent": round(live_cpu_percent_total, 2), + "live_memory_used_bytes": live_memory_used_total, + "live_memory_limit_bytes": live_memory_limit_total, + "workspace_used_bytes": workspace_used_total, + "workspace_limit_bytes": workspace_limit_total, + }, + }, + "images": [ + { + "tag": row.tag, + "version": row.version, + "status": row.status, + "source_dir": row.source_dir, + "created_at": row.created_at.isoformat() + "Z", + } + for row in images + ], + "bots": bot_rows, + "settings": settings.model_dump(), + "usage": usage, + "events": events, + } diff --git a/backend/services/platform_runtime_settings_service.py b/backend/services/platform_runtime_settings_service.py new file mode 100644 index 0000000..4def569 --- /dev/null +++ b/backend/services/platform_runtime_settings_service.py @@ -0,0 +1,147 @@ +from typing import Any, Dict, List + +from sqlmodel import Session, select + +from core.database import engine +from core.settings import ( + DEFAULT_STT_AUDIO_FILTER, + DEFAULT_STT_AUDIO_PREPROCESS, + DEFAULT_STT_DEFAULT_LANGUAGE, + DEFAULT_STT_FORCE_SIMPLIFIED, + DEFAULT_STT_INITIAL_PROMPT, + DEFAULT_STT_MAX_AUDIO_SECONDS, + STT_DEVICE, + STT_MODEL, +) +from models.platform import PlatformSetting +from schemas.platform import LoadingPageSettings, PlatformSettingsPayload +from services.platform_settings_core import ( + SETTING_KEYS, + SYSTEM_SETTING_DEFINITIONS, + _bootstrap_platform_setting_values, + _normalize_extension_list, + _read_setting_value, + _upsert_setting_row, +) +from services.platform_system_settings_service import ensure_default_system_settings + + +def default_platform_settings() -> PlatformSettingsPayload: + bootstrap = _bootstrap_platform_setting_values() + return PlatformSettingsPayload( + page_size=int(bootstrap["page_size"]), + chat_pull_page_size=int(bootstrap["chat_pull_page_size"]), + command_auto_unlock_seconds=int(bootstrap["command_auto_unlock_seconds"]), + upload_max_mb=int(bootstrap["upload_max_mb"]), + allowed_attachment_extensions=list(bootstrap["allowed_attachment_extensions"]), + workspace_download_extensions=list(bootstrap["workspace_download_extensions"]), + speech_enabled=bool(bootstrap["speech_enabled"]), + speech_max_audio_seconds=DEFAULT_STT_MAX_AUDIO_SECONDS, + speech_default_language=DEFAULT_STT_DEFAULT_LANGUAGE, + speech_force_simplified=DEFAULT_STT_FORCE_SIMPLIFIED, + speech_audio_preprocess=DEFAULT_STT_AUDIO_PREPROCESS, + speech_audio_filter=DEFAULT_STT_AUDIO_FILTER, + speech_initial_prompt=DEFAULT_STT_INITIAL_PROMPT, + loading_page=LoadingPageSettings(), + ) + + +def get_platform_settings(session: Session) -> PlatformSettingsPayload: + defaults = default_platform_settings() + ensure_default_system_settings(session) + rows = session.exec(select(PlatformSetting).where(PlatformSetting.key.in_(SETTING_KEYS))).all() + data: Dict[str, Any] = {row.key: _read_setting_value(row) for row in rows} + + merged = defaults.model_dump() + merged["page_size"] = max(1, min(100, int(data.get("page_size") or merged["page_size"]))) + merged["chat_pull_page_size"] = max(10, min(500, int(data.get("chat_pull_page_size") or merged["chat_pull_page_size"]))) + merged["command_auto_unlock_seconds"] = max( + 1, + min(600, int(data.get("command_auto_unlock_seconds") or merged["command_auto_unlock_seconds"])), + ) + merged["upload_max_mb"] = int(data.get("upload_max_mb") or merged["upload_max_mb"]) + merged["allowed_attachment_extensions"] = _normalize_extension_list( + data.get("allowed_attachment_extensions", merged["allowed_attachment_extensions"]) + ) + merged["workspace_download_extensions"] = _normalize_extension_list( + data.get("workspace_download_extensions", merged["workspace_download_extensions"]) + ) + merged["speech_enabled"] = bool(data.get("speech_enabled", merged["speech_enabled"])) + loading_page = data.get("loading_page") + if isinstance(loading_page, dict): + current = dict(merged["loading_page"]) + for key in ("title", "subtitle", "description"): + value = str(loading_page.get(key) or "").strip() + if value: + current[key] = value + merged["loading_page"] = current + return PlatformSettingsPayload.model_validate(merged) + + +def save_platform_settings(session: Session, payload: PlatformSettingsPayload) -> PlatformSettingsPayload: + normalized = PlatformSettingsPayload( + page_size=max(1, min(100, int(payload.page_size))), + chat_pull_page_size=max(10, min(500, int(payload.chat_pull_page_size))), + command_auto_unlock_seconds=max(1, min(600, int(payload.command_auto_unlock_seconds))), + upload_max_mb=payload.upload_max_mb, + allowed_attachment_extensions=_normalize_extension_list(payload.allowed_attachment_extensions), + workspace_download_extensions=_normalize_extension_list(payload.workspace_download_extensions), + speech_enabled=bool(payload.speech_enabled), + loading_page=LoadingPageSettings.model_validate(payload.loading_page.model_dump()), + ) + payload_by_key = normalized.model_dump() + for key in SETTING_KEYS: + definition = SYSTEM_SETTING_DEFINITIONS[key] + _upsert_setting_row( + session, + key, + name=str(definition["name"]), + category=str(definition["category"]), + description=str(definition["description"]), + value_type=str(definition["value_type"]), + value=payload_by_key[key], + is_public=bool(definition["is_public"]), + sort_order=int(definition["sort_order"]), + ) + session.commit() + return normalized + + +def get_platform_settings_snapshot() -> PlatformSettingsPayload: + with Session(engine) as session: + return get_platform_settings(session) + + +def get_upload_max_mb() -> int: + return get_platform_settings_snapshot().upload_max_mb + + +def get_allowed_attachment_extensions() -> List[str]: + return get_platform_settings_snapshot().allowed_attachment_extensions + + +def get_workspace_download_extensions() -> List[str]: + return get_platform_settings_snapshot().workspace_download_extensions + + +def get_page_size() -> int: + return get_platform_settings_snapshot().page_size + + +def get_chat_pull_page_size() -> int: + return get_platform_settings_snapshot().chat_pull_page_size + + +def get_speech_runtime_settings() -> Dict[str, Any]: + settings = get_platform_settings_snapshot() + return { + "enabled": bool(settings.speech_enabled), + "max_audio_seconds": int(DEFAULT_STT_MAX_AUDIO_SECONDS), + "default_language": str(DEFAULT_STT_DEFAULT_LANGUAGE or "zh").strip().lower() or "zh", + "force_simplified": bool(DEFAULT_STT_FORCE_SIMPLIFIED), + "audio_preprocess": bool(DEFAULT_STT_AUDIO_PREPROCESS), + "audio_filter": str(DEFAULT_STT_AUDIO_FILTER or "").strip(), + "initial_prompt": str(DEFAULT_STT_INITIAL_PROMPT or "").strip(), + "model": STT_MODEL, + "device": STT_DEVICE, + } diff --git a/backend/services/platform_service.py b/backend/services/platform_service.py index 3882dfb..7752003 100644 --- a/backend/services/platform_service.py +++ b/backend/services/platform_service.py @@ -1,1117 +1,36 @@ -import json -import math -import os -import re -import uuid -from collections import defaultdict -from datetime import datetime, timedelta -from typing import Any, Dict, List, Optional - -from sqlalchemy import delete as sql_delete, func -from sqlmodel import Session, select - -from core.database import engine -from core.settings import ( - BOTS_WORKSPACE_ROOT, - DEFAULT_CHAT_PULL_PAGE_SIZE, - DEFAULT_COMMAND_AUTO_UNLOCK_SECONDS, - DEFAULT_PAGE_SIZE, - DEFAULT_STT_AUDIO_FILTER, - DEFAULT_STT_AUDIO_PREPROCESS, - DEFAULT_STT_DEFAULT_LANGUAGE, - DEFAULT_STT_FORCE_SIMPLIFIED, - DEFAULT_STT_INITIAL_PROMPT, - DEFAULT_STT_MAX_AUDIO_SECONDS, - DEFAULT_UPLOAD_MAX_MB, - DEFAULT_WORKSPACE_DOWNLOAD_EXTENSIONS, - STT_DEVICE, - STT_ENABLED_DEFAULT, - STT_MODEL, +from services.platform_activity_service import ( + list_activity_events, + prune_expired_activity_events, + record_activity_event, ) -from models.bot import BotInstance, NanobotImage -from models.platform import BotActivityEvent, BotRequestUsage, PlatformSetting -from schemas.platform import ( - LoadingPageSettings, - PlatformActivityItem, - PlatformUsageAnalytics, - PlatformUsageAnalyticsSeries, - PlatformSettingsPayload, - PlatformUsageResponse, - PlatformUsageItem, - PlatformUsageSummary, - SystemSettingItem, - SystemSettingPayload, +from services.platform_overview_service import build_platform_overview +from services.platform_settings_service import ( + ACTIVITY_EVENT_RETENTION_SETTING_KEY, + DEFAULT_ACTIVITY_EVENT_RETENTION_DAYS, + DEFAULT_ALLOWED_ATTACHMENT_EXTENSIONS, + SETTING_KEYS, + SYSTEM_SETTING_DEFINITIONS, + create_or_update_system_setting, + default_platform_settings, + delete_system_setting, + ensure_default_system_settings, + get_activity_event_retention_days, + get_allowed_attachment_extensions, + get_chat_pull_page_size, + get_page_size, + get_platform_settings, + get_platform_settings_snapshot, + get_speech_runtime_settings, + get_upload_max_mb, + get_workspace_download_extensions, + list_system_settings, + save_platform_settings, ) - -DEFAULT_ALLOWED_ATTACHMENT_EXTENSIONS: tuple[str, ...] = () -DEFAULT_ACTIVITY_EVENT_RETENTION_DAYS = 7 -ACTIVITY_EVENT_RETENTION_SETTING_KEY = "activity_event_retention_days" -ACTIVITY_EVENT_PRUNE_INTERVAL = timedelta(minutes=10) -OPERATIONAL_ACTIVITY_EVENT_TYPES = { - "bot_created", - "bot_started", - "bot_stopped", - "bot_warning", - "bot_enabled", - "bot_disabled", - "bot_deactivated", - "command_submitted", - "command_failed", - "history_cleared", -} -SETTING_KEYS = ( - "page_size", - "chat_pull_page_size", - "command_auto_unlock_seconds", - "upload_max_mb", - "allowed_attachment_extensions", - "workspace_download_extensions", - "speech_enabled", +from services.platform_usage_service import ( + bind_usage_message, + create_usage_request, + estimate_tokens, + fail_latest_usage, + finalize_usage_from_packet, + list_usage, ) -PROTECTED_SETTING_KEYS = set(SETTING_KEYS) | {ACTIVITY_EVENT_RETENTION_SETTING_KEY} -DEPRECATED_SETTING_KEYS = { - "loading_page", - "speech_max_audio_seconds", - "speech_default_language", - "speech_force_simplified", - "speech_audio_preprocess", - "speech_audio_filter", - "speech_initial_prompt", -} -SYSTEM_SETTING_DEFINITIONS: Dict[str, Dict[str, Any]] = { - "page_size": { - "name": "分页大小", - "category": "ui", - "description": "平台各类列表默认每页条数。", - "value_type": "integer", - "value": DEFAULT_PAGE_SIZE, - "is_public": True, - "sort_order": 5, - }, - "chat_pull_page_size": { - "name": "对话懒加载条数", - "category": "chat", - "description": "Bot 对话区向上懒加载时每次读取的消息条数。", - "value_type": "integer", - "value": DEFAULT_CHAT_PULL_PAGE_SIZE, - "is_public": True, - "sort_order": 8, - }, - "command_auto_unlock_seconds": { - "name": "发送按钮自动恢复秒数", - "category": "chat", - "description": "对话发送后按钮保持停止态的最长秒数,超时后自动恢复为可发送状态。", - "value_type": "integer", - "value": DEFAULT_COMMAND_AUTO_UNLOCK_SECONDS, - "is_public": True, - "sort_order": 9, - }, - "upload_max_mb": { - "name": "上传大小限制", - "category": "upload", - "description": "单文件上传大小限制,单位 MB。", - "value_type": "integer", - "value": DEFAULT_UPLOAD_MAX_MB, - "is_public": False, - "sort_order": 10, - }, - "allowed_attachment_extensions": { - "name": "允许附件后缀", - "category": "upload", - "description": "允许上传的附件后缀列表,留空表示不限制。", - "value_type": "json", - "value": list(DEFAULT_ALLOWED_ATTACHMENT_EXTENSIONS), - "is_public": False, - "sort_order": 20, - }, - "workspace_download_extensions": { - "name": "工作区下载后缀", - "category": "workspace", - "description": "命中后缀的工作区文件默认走下载模式。", - "value_type": "json", - "value": list(DEFAULT_WORKSPACE_DOWNLOAD_EXTENSIONS), - "is_public": False, - "sort_order": 30, - }, - "speech_enabled": { - "name": "语音识别开关", - "category": "speech", - "description": "控制 Bot 语音转写功能是否启用。", - "value_type": "boolean", - "value": STT_ENABLED_DEFAULT, - "is_public": True, - "sort_order": 32, - }, - ACTIVITY_EVENT_RETENTION_SETTING_KEY: { - "name": "活动事件保留天数", - "category": "maintenance", - "description": "bot_activity_event 运维事件的保留天数,超期记录会自动清理。", - "value_type": "integer", - "value": DEFAULT_ACTIVITY_EVENT_RETENTION_DAYS, - "is_public": False, - "sort_order": 34, - }, -} - -_last_activity_event_prune_at: Optional[datetime] = None - - -def _utcnow() -> datetime: - return datetime.utcnow() - - -def _normalize_activity_event_retention_days(raw: Any) -> int: - try: - value = int(raw) - except Exception: - value = DEFAULT_ACTIVITY_EVENT_RETENTION_DAYS - return max(1, min(3650, value)) - - -def _normalize_extension(raw: Any) -> str: - text = str(raw or "").strip().lower() - if not text: - return "" - if text.startswith("*."): - text = text[1:] - if not text.startswith("."): - text = f".{text}" - if not re.fullmatch(r"\.[a-z0-9][a-z0-9._+-]{0,31}", text): - return "" - return text - - -def _normalize_extension_list(rows: Any) -> List[str]: - if not isinstance(rows, list): - return [] - normalized: List[str] = [] - for item in rows: - ext = _normalize_extension(item) - if ext and ext not in normalized: - normalized.append(ext) - return normalized - - -def _legacy_env_int(name: str, default: int, min_value: int, max_value: int) -> int: - raw = os.getenv(name) - if raw is None: - return default - try: - value = int(str(raw).strip()) - except Exception: - value = default - return max(min_value, min(max_value, value)) - - -def _legacy_env_bool(name: str, default: bool) -> bool: - raw = os.getenv(name) - if raw is None: - return default - return str(raw).strip().lower() in {"1", "true", "yes", "on"} - - -def _legacy_env_extensions(name: str, default: List[str]) -> List[str]: - raw = os.getenv(name) - if raw is None: - return list(default) - source = re.split(r"[,;\s]+", str(raw)) - normalized: List[str] = [] - for item in source: - ext = _normalize_extension(item) - if ext and ext not in normalized: - normalized.append(ext) - return normalized - - -def _bootstrap_platform_setting_values() -> Dict[str, Any]: - return { - "page_size": _legacy_env_int("PAGE_SIZE", DEFAULT_PAGE_SIZE, 1, 100), - "chat_pull_page_size": _legacy_env_int( - "CHAT_PULL_PAGE_SIZE", - DEFAULT_CHAT_PULL_PAGE_SIZE, - 10, - 500, - ), - "command_auto_unlock_seconds": _legacy_env_int( - "COMMAND_AUTO_UNLOCK_SECONDS", - DEFAULT_COMMAND_AUTO_UNLOCK_SECONDS, - 1, - 600, - ), - "upload_max_mb": _legacy_env_int("UPLOAD_MAX_MB", DEFAULT_UPLOAD_MAX_MB, 1, 2048), - "allowed_attachment_extensions": _legacy_env_extensions( - "ALLOWED_ATTACHMENT_EXTENSIONS", - list(DEFAULT_ALLOWED_ATTACHMENT_EXTENSIONS), - ), - "workspace_download_extensions": _legacy_env_extensions( - "WORKSPACE_DOWNLOAD_EXTENSIONS", - list(DEFAULT_WORKSPACE_DOWNLOAD_EXTENSIONS), - ), - "speech_enabled": _legacy_env_bool("STT_ENABLED", STT_ENABLED_DEFAULT), - } - - -def _bot_workspace_root(bot_id: str) -> str: - return os.path.abspath(os.path.join(BOTS_WORKSPACE_ROOT, bot_id, ".nanobot", "workspace")) - - -def _bot_data_root(bot_id: str) -> str: - return os.path.abspath(os.path.join(BOTS_WORKSPACE_ROOT, bot_id, ".nanobot")) - - -def _calc_dir_size_bytes(path: str) -> int: - total = 0 - if not os.path.isdir(path): - return 0 - for root, _, files in os.walk(path): - for name in files: - target = os.path.join(root, name) - try: - if os.path.islink(target): - continue - total += int(os.path.getsize(target)) - except OSError: - continue - return total - - -def _read_bot_resources(bot_id: str) -> Dict[str, Any]: - path = os.path.join(_bot_data_root(bot_id), "resources.json") - raw: Dict[str, Any] = {} - if os.path.isfile(path): - try: - with open(path, "r", encoding="utf-8") as f: - loaded = json.load(f) - if isinstance(loaded, dict): - raw = loaded - except Exception: - raw = {} - - def _safe_float(value: Any, default: float) -> float: - try: - return float(value) - except Exception: - return default - - def _safe_int(value: Any, default: int) -> int: - try: - return int(value) - except Exception: - return default - - cpu = _safe_float(raw.get("cpuCores", raw.get("cpu_cores", 1.0)), 1.0) - memory = _safe_int(raw.get("memoryMB", raw.get("memory_mb", 1024)), 1024) - storage = _safe_int(raw.get("storageGB", raw.get("storage_gb", 10)), 10) - cpu = 0.0 if cpu == 0 else min(16.0, max(0.1, cpu)) - memory = 0 if memory == 0 else min(65536, max(256, memory)) - storage = 0 if storage == 0 else min(1024, max(1, storage)) - return { - "cpu_cores": cpu, - "memory_mb": memory, - "storage_gb": storage, - } - - -def estimate_tokens(text: str) -> int: - content = str(text or "").strip() - if not content: - return 0 - pieces = re.findall(r"[\u4e00-\u9fff]|[A-Za-z0-9_]+|[^\s]", content) - total = 0 - for piece in pieces: - if re.fullmatch(r"[\u4e00-\u9fff]", piece): - total += 1 - elif re.fullmatch(r"[A-Za-z0-9_]+", piece): - total += max(1, math.ceil(len(piece) / 4)) - else: - total += 1 - return max(1, total) - - -def default_platform_settings() -> PlatformSettingsPayload: - bootstrap = _bootstrap_platform_setting_values() - return PlatformSettingsPayload( - page_size=int(bootstrap["page_size"]), - chat_pull_page_size=int(bootstrap["chat_pull_page_size"]), - command_auto_unlock_seconds=int(bootstrap["command_auto_unlock_seconds"]), - upload_max_mb=int(bootstrap["upload_max_mb"]), - allowed_attachment_extensions=list(bootstrap["allowed_attachment_extensions"]), - workspace_download_extensions=list(bootstrap["workspace_download_extensions"]), - speech_enabled=bool(bootstrap["speech_enabled"]), - speech_max_audio_seconds=DEFAULT_STT_MAX_AUDIO_SECONDS, - speech_default_language=DEFAULT_STT_DEFAULT_LANGUAGE, - speech_force_simplified=DEFAULT_STT_FORCE_SIMPLIFIED, - speech_audio_preprocess=DEFAULT_STT_AUDIO_PREPROCESS, - speech_audio_filter=DEFAULT_STT_AUDIO_FILTER, - speech_initial_prompt=DEFAULT_STT_INITIAL_PROMPT, - loading_page=LoadingPageSettings(), - ) - - -def _normalize_setting_key(raw: Any) -> str: - text = str(raw or "").strip() - return re.sub(r"[^a-zA-Z0-9_.-]+", "_", text).strip("._-").lower() - - -def _normalize_setting_value(value: Any, value_type: str) -> Any: - normalized_type = str(value_type or "json").strip().lower() or "json" - if normalized_type == "integer": - return int(value or 0) - if normalized_type == "float": - return float(value or 0) - if normalized_type == "boolean": - if isinstance(value, bool): - return value - return str(value or "").strip().lower() in {"1", "true", "yes", "on"} - if normalized_type == "string": - return str(value or "") - if normalized_type == "json": - return value - raise ValueError(f"Unsupported value_type: {normalized_type}") - - -def _read_setting_value(row: PlatformSetting) -> Any: - try: - value = json.loads(row.value_json or "null") - except Exception: - value = None - return _normalize_setting_value(value, row.value_type) - - -def _setting_item_from_row(row: PlatformSetting) -> Dict[str, Any]: - return SystemSettingItem( - key=row.key, - name=row.name, - category=row.category, - description=row.description, - value_type=row.value_type, - value=_read_setting_value(row), - is_public=bool(row.is_public), - sort_order=int(row.sort_order or 100), - created_at=row.created_at.isoformat() + "Z", - updated_at=row.updated_at.isoformat() + "Z", - ).model_dump() - - -def _upsert_setting_row( - session: Session, - key: str, - *, - name: str, - category: str, - description: str, - value_type: str, - value: Any, - is_public: bool, - sort_order: int, -) -> PlatformSetting: - normalized_key = _normalize_setting_key(key) - if not normalized_key: - raise ValueError("Setting key is required") - normalized_type = str(value_type or "json").strip().lower() or "json" - normalized_value = _normalize_setting_value(value, normalized_type) - now = _utcnow() - row = session.get(PlatformSetting, normalized_key) - if row is None: - row = PlatformSetting( - key=normalized_key, - name=str(name or normalized_key), - category=str(category or "general"), - description=str(description or ""), - value_type=normalized_type, - value_json=json.dumps(normalized_value, ensure_ascii=False), - is_public=bool(is_public), - sort_order=int(sort_order or 100), - created_at=now, - updated_at=now, - ) - else: - row.name = str(name or row.name or normalized_key) - row.category = str(category or row.category or "general") - row.description = str(description or row.description or "") - row.value_type = normalized_type - row.value_json = json.dumps(normalized_value, ensure_ascii=False) - row.is_public = bool(is_public) - row.sort_order = int(sort_order or row.sort_order or 100) - row.updated_at = now - session.add(row) - return row - - -def ensure_default_system_settings(session: Session) -> None: - bootstrap_values = _bootstrap_platform_setting_values() - legacy_row = session.get(PlatformSetting, "global") - if legacy_row is not None: - try: - legacy_data = json.loads(legacy_row.value_json or "{}") - except Exception: - legacy_data = {} - if isinstance(legacy_data, dict): - for key in SETTING_KEYS: - meta = SYSTEM_SETTING_DEFINITIONS[key] - _upsert_setting_row( - session, - key, - name=str(meta["name"]), - category=str(meta["category"]), - description=str(meta["description"]), - value_type=str(meta["value_type"]), - value=legacy_data.get(key, bootstrap_values.get(key, meta["value"])), - is_public=bool(meta["is_public"]), - sort_order=int(meta["sort_order"]), - ) - session.delete(legacy_row) - session.commit() - - dirty = False - for key in DEPRECATED_SETTING_KEYS: - legacy_row = session.get(PlatformSetting, key) - if legacy_row is not None: - session.delete(legacy_row) - dirty = True - - for key, meta in SYSTEM_SETTING_DEFINITIONS.items(): - row = session.get(PlatformSetting, key) - if row is None: - _upsert_setting_row( - session, - key, - name=str(meta["name"]), - category=str(meta["category"]), - description=str(meta["description"]), - value_type=str(meta["value_type"]), - value=bootstrap_values.get(key, meta["value"]), - is_public=bool(meta["is_public"]), - sort_order=int(meta["sort_order"]), - ) - dirty = True - continue - changed = False - for field in ("name", "category", "description", "value_type"): - value = str(meta[field]) - if not getattr(row, field): - setattr(row, field, value) - changed = True - if getattr(row, "sort_order", None) is None: - row.sort_order = int(meta["sort_order"]) - changed = True - if getattr(row, "is_public", None) is None: - row.is_public = bool(meta["is_public"]) - changed = True - if changed: - row.updated_at = _utcnow() - session.add(row) - dirty = True - if dirty: - session.commit() - - -def list_system_settings(session: Session, search: str = "") -> List[Dict[str, Any]]: - ensure_default_system_settings(session) - stmt = select(PlatformSetting).order_by(PlatformSetting.sort_order.asc(), PlatformSetting.key.asc()) - rows = session.exec(stmt).all() - keyword = str(search or "").strip().lower() - items = [_setting_item_from_row(row) for row in rows] - if not keyword: - return items - return [ - item - for item in items - if keyword in str(item["key"]).lower() - or keyword in str(item["name"]).lower() - or keyword in str(item["category"]).lower() - or keyword in str(item["description"]).lower() - ] - - -def create_or_update_system_setting(session: Session, payload: SystemSettingPayload) -> Dict[str, Any]: - ensure_default_system_settings(session) - normalized_key = _normalize_setting_key(payload.key) - definition = SYSTEM_SETTING_DEFINITIONS.get(normalized_key, {}) - row = _upsert_setting_row( - session, - payload.key, - name=payload.name or str(definition.get("name") or payload.key), - category=payload.category or str(definition.get("category") or "general"), - description=payload.description or str(definition.get("description") or ""), - value_type=payload.value_type or str(definition.get("value_type") or "json"), - value=payload.value if payload.value is not None else definition.get("value"), - is_public=payload.is_public, - sort_order=payload.sort_order or int(definition.get("sort_order") or 100), - ) - if normalized_key == ACTIVITY_EVENT_RETENTION_SETTING_KEY: - prune_expired_activity_events(session, force=True) - session.commit() - session.refresh(row) - return _setting_item_from_row(row) - - -def delete_system_setting(session: Session, key: str) -> None: - normalized_key = _normalize_setting_key(key) - if normalized_key in PROTECTED_SETTING_KEYS: - raise ValueError("Core platform settings cannot be deleted") - row = session.get(PlatformSetting, normalized_key) - if row is None: - raise ValueError("Setting not found") - session.delete(row) - session.commit() - - -def get_platform_settings(session: Session) -> PlatformSettingsPayload: - defaults = default_platform_settings() - ensure_default_system_settings(session) - rows = session.exec(select(PlatformSetting).where(PlatformSetting.key.in_(SETTING_KEYS))).all() - data: Dict[str, Any] = {row.key: _read_setting_value(row) for row in rows} - - merged = defaults.model_dump() - merged["page_size"] = max(1, min(100, int(data.get("page_size") or merged["page_size"]))) - merged["chat_pull_page_size"] = max(10, min(500, int(data.get("chat_pull_page_size") or merged["chat_pull_page_size"]))) - merged["command_auto_unlock_seconds"] = max( - 1, - min(600, int(data.get("command_auto_unlock_seconds") or merged["command_auto_unlock_seconds"])) - ) - merged["upload_max_mb"] = int(data.get("upload_max_mb") or merged["upload_max_mb"]) - merged["allowed_attachment_extensions"] = _normalize_extension_list( - data.get("allowed_attachment_extensions", merged["allowed_attachment_extensions"]) - ) - merged["workspace_download_extensions"] = _normalize_extension_list( - data.get("workspace_download_extensions", merged["workspace_download_extensions"]) - ) - merged["speech_enabled"] = bool(data.get("speech_enabled", merged["speech_enabled"])) - loading_page = data.get("loading_page") - if isinstance(loading_page, dict): - current = dict(merged["loading_page"]) - for key in ("title", "subtitle", "description"): - value = str(loading_page.get(key) or "").strip() - if value: - current[key] = value - merged["loading_page"] = current - return PlatformSettingsPayload.model_validate(merged) - - -def save_platform_settings(session: Session, payload: PlatformSettingsPayload) -> PlatformSettingsPayload: - normalized = PlatformSettingsPayload( - page_size=max(1, min(100, int(payload.page_size))), - chat_pull_page_size=max(10, min(500, int(payload.chat_pull_page_size))), - command_auto_unlock_seconds=max(1, min(600, int(payload.command_auto_unlock_seconds))), - upload_max_mb=payload.upload_max_mb, - allowed_attachment_extensions=_normalize_extension_list(payload.allowed_attachment_extensions), - workspace_download_extensions=_normalize_extension_list(payload.workspace_download_extensions), - speech_enabled=bool(payload.speech_enabled), - loading_page=LoadingPageSettings.model_validate(payload.loading_page.model_dump()), - ) - payload_by_key = normalized.model_dump() - for key in SETTING_KEYS: - definition = SYSTEM_SETTING_DEFINITIONS[key] - _upsert_setting_row( - session, - key, - name=str(definition["name"]), - category=str(definition["category"]), - description=str(definition["description"]), - value_type=str(definition["value_type"]), - value=payload_by_key[key], - is_public=bool(definition["is_public"]), - sort_order=int(definition["sort_order"]), - ) - session.commit() - return normalized - - -def get_platform_settings_snapshot() -> PlatformSettingsPayload: - with Session(engine) as session: - return get_platform_settings(session) - - -def get_upload_max_mb() -> int: - return get_platform_settings_snapshot().upload_max_mb - - -def get_allowed_attachment_extensions() -> List[str]: - return get_platform_settings_snapshot().allowed_attachment_extensions - - -def get_workspace_download_extensions() -> List[str]: - return get_platform_settings_snapshot().workspace_download_extensions - - -def get_page_size() -> int: - return get_platform_settings_snapshot().page_size - - -def get_chat_pull_page_size() -> int: - return get_platform_settings_snapshot().chat_pull_page_size - - -def get_speech_runtime_settings() -> Dict[str, Any]: - settings = get_platform_settings_snapshot() - return { - "enabled": bool(settings.speech_enabled), - "max_audio_seconds": int(DEFAULT_STT_MAX_AUDIO_SECONDS), - "default_language": str(DEFAULT_STT_DEFAULT_LANGUAGE or "zh").strip().lower() or "zh", - "force_simplified": bool(DEFAULT_STT_FORCE_SIMPLIFIED), - "audio_preprocess": bool(DEFAULT_STT_AUDIO_PREPROCESS), - "audio_filter": str(DEFAULT_STT_AUDIO_FILTER or "").strip(), - "initial_prompt": str(DEFAULT_STT_INITIAL_PROMPT or "").strip(), - "model": STT_MODEL, - "device": STT_DEVICE, - } - - -def get_activity_event_retention_days(session: Session) -> int: - row = session.get(PlatformSetting, ACTIVITY_EVENT_RETENTION_SETTING_KEY) - if row is None: - return DEFAULT_ACTIVITY_EVENT_RETENTION_DAYS - try: - value = _read_setting_value(row) - except Exception: - value = DEFAULT_ACTIVITY_EVENT_RETENTION_DAYS - return _normalize_activity_event_retention_days(value) - - -def create_usage_request( - session: Session, - bot_id: str, - command: str, - attachments: Optional[List[str]] = None, - channel: str = "dashboard", - metadata: Optional[Dict[str, Any]] = None, - provider: Optional[str] = None, - model: Optional[str] = None, -) -> str: - request_id = uuid.uuid4().hex - rows = [str(item).strip() for item in (attachments or []) if str(item).strip()] - input_tokens = estimate_tokens(command) - usage = BotRequestUsage( - bot_id=bot_id, - request_id=request_id, - channel=channel, - status="PENDING", - provider=(str(provider or "").strip() or None), - model=(str(model or "").strip() or None), - token_source="estimated", - input_tokens=input_tokens, - output_tokens=0, - total_tokens=input_tokens, - input_text_preview=str(command or "")[:400], - attachments_json=json.dumps(rows, ensure_ascii=False) if rows else None, - metadata_json=json.dumps(metadata or {}, ensure_ascii=False), - started_at=_utcnow(), - created_at=_utcnow(), - updated_at=_utcnow(), - ) - session.add(usage) - session.flush() - return request_id - - -def bind_usage_message( - session: Session, - bot_id: str, - request_id: str, - message_id: Optional[int], -) -> Optional[BotRequestUsage]: - if not request_id or not message_id: - return None - usage_row = _find_pending_usage_by_request_id(session, bot_id, request_id) - if not usage_row: - return None - usage_row.message_id = int(message_id) - usage_row.updated_at = _utcnow() - session.add(usage_row) - return usage_row - - -def _find_latest_pending_usage(session: Session, bot_id: str) -> Optional[BotRequestUsage]: - stmt = ( - select(BotRequestUsage) - .where(BotRequestUsage.bot_id == bot_id) - .where(BotRequestUsage.status == "PENDING") - .order_by(BotRequestUsage.started_at.desc(), BotRequestUsage.id.desc()) - .limit(1) - ) - return session.exec(stmt).first() - - -def _find_pending_usage_by_request_id(session: Session, bot_id: str, request_id: str) -> Optional[BotRequestUsage]: - if not request_id: - return None - stmt = ( - select(BotRequestUsage) - .where(BotRequestUsage.bot_id == bot_id) - .where(BotRequestUsage.request_id == request_id) - .where(BotRequestUsage.status == "PENDING") - .order_by(BotRequestUsage.started_at.desc(), BotRequestUsage.id.desc()) - .limit(1) - ) - return session.exec(stmt).first() - - -def finalize_usage_from_packet(session: Session, bot_id: str, packet: Dict[str, Any]) -> Optional[BotRequestUsage]: - request_id = str(packet.get("request_id") or "").strip() - usage_row = _find_pending_usage_by_request_id(session, bot_id, request_id) or _find_latest_pending_usage(session, bot_id) - if not usage_row: - return None - - raw_usage = packet.get("usage") - input_tokens: Optional[int] = None - output_tokens: Optional[int] = None - source = "estimated" - if isinstance(raw_usage, dict): - for key in ("input_tokens", "prompt_tokens", "promptTokens"): - if raw_usage.get(key) is not None: - try: - input_tokens = int(raw_usage.get(key) or 0) - except Exception: - input_tokens = None - break - for key in ("output_tokens", "completion_tokens", "completionTokens"): - if raw_usage.get(key) is not None: - try: - output_tokens = int(raw_usage.get(key) or 0) - except Exception: - output_tokens = None - break - if input_tokens is not None or output_tokens is not None: - source = "exact" - - text = str(packet.get("text") or packet.get("content") or "").strip() - provider = str(packet.get("provider") or "").strip() - model = str(packet.get("model") or "").strip() - message_id = packet.get("message_id") - if input_tokens is None: - input_tokens = usage_row.input_tokens - if output_tokens is None: - output_tokens = estimate_tokens(text) - if source == "exact": - source = "mixed" - - if provider: - usage_row.provider = provider[:120] - if model: - usage_row.model = model[:255] - if message_id is not None: - try: - usage_row.message_id = int(message_id) - except Exception: - pass - usage_row.output_tokens = max(0, int(output_tokens or 0)) - usage_row.input_tokens = max(0, int(input_tokens or 0)) - usage_row.total_tokens = usage_row.input_tokens + usage_row.output_tokens - usage_row.output_text_preview = text[:400] if text else usage_row.output_text_preview - usage_row.status = "COMPLETED" - usage_row.token_source = source - usage_row.completed_at = _utcnow() - usage_row.updated_at = _utcnow() - session.add(usage_row) - return usage_row - - -def fail_latest_usage(session: Session, bot_id: str, detail: str) -> Optional[BotRequestUsage]: - usage_row = _find_latest_pending_usage(session, bot_id) - if not usage_row: - return None - usage_row.status = "ERROR" - usage_row.error_text = str(detail or "")[:500] - usage_row.completed_at = _utcnow() - usage_row.updated_at = _utcnow() - session.add(usage_row) - return usage_row - - -def prune_expired_activity_events(session: Session, force: bool = False) -> int: - global _last_activity_event_prune_at - - now = _utcnow() - if not force and _last_activity_event_prune_at and now - _last_activity_event_prune_at < ACTIVITY_EVENT_PRUNE_INTERVAL: - return 0 - - retention_days = get_activity_event_retention_days(session) - cutoff = now - timedelta(days=retention_days) - result = session.exec( - sql_delete(BotActivityEvent).where(BotActivityEvent.created_at < cutoff) - ) - _last_activity_event_prune_at = now - return int(getattr(result, "rowcount", 0) or 0) - - -def record_activity_event( - session: Session, - bot_id: str, - event_type: str, - request_id: Optional[str] = None, - channel: str = "dashboard", - detail: Optional[str] = None, - metadata: Optional[Dict[str, Any]] = None, -) -> None: - normalized_event_type = str(event_type or "unknown").strip().lower() or "unknown" - if normalized_event_type not in OPERATIONAL_ACTIVITY_EVENT_TYPES: - return - prune_expired_activity_events(session, force=False) - row = BotActivityEvent( - bot_id=bot_id, - request_id=request_id, - event_type=normalized_event_type, - channel=str(channel or "dashboard").strip().lower() or "dashboard", - detail=(str(detail or "").strip() or None), - metadata_json=json.dumps(metadata or {}, ensure_ascii=False) if metadata else None, - created_at=_utcnow(), - ) - session.add(row) - - -def list_usage( - session: Session, - bot_id: Optional[str] = None, - limit: int = 100, - offset: int = 0, -) -> Dict[str, Any]: - safe_limit = max(1, min(int(limit), 500)) - safe_offset = max(0, int(offset or 0)) - stmt = ( - select(BotRequestUsage) - .order_by(BotRequestUsage.started_at.desc(), BotRequestUsage.id.desc()) - .offset(safe_offset) - .limit(safe_limit) - ) - summary_stmt = select( - func.count(BotRequestUsage.id), - func.coalesce(func.sum(BotRequestUsage.input_tokens), 0), - func.coalesce(func.sum(BotRequestUsage.output_tokens), 0), - func.coalesce(func.sum(BotRequestUsage.total_tokens), 0), - ) - total_stmt = select(func.count(BotRequestUsage.id)) - if bot_id: - stmt = stmt.where(BotRequestUsage.bot_id == bot_id) - summary_stmt = summary_stmt.where(BotRequestUsage.bot_id == bot_id) - total_stmt = total_stmt.where(BotRequestUsage.bot_id == bot_id) - else: - since = _utcnow() - timedelta(days=1) - summary_stmt = summary_stmt.where(BotRequestUsage.created_at >= since) - rows = session.exec(stmt).all() - count, input_sum, output_sum, total_sum = session.exec(summary_stmt).one() - total = int(session.exec(total_stmt).one() or 0) - items = [ - PlatformUsageItem( - id=int(row.id or 0), - bot_id=row.bot_id, - message_id=int(row.message_id) if row.message_id is not None else None, - request_id=row.request_id, - channel=row.channel, - status=row.status, - provider=row.provider, - model=row.model, - token_source=row.token_source, - content=row.input_text_preview or row.output_text_preview, - input_tokens=int(row.input_tokens or 0), - output_tokens=int(row.output_tokens or 0), - total_tokens=int(row.total_tokens or 0), - input_text_preview=row.input_text_preview, - output_text_preview=row.output_text_preview, - started_at=row.started_at.isoformat() + "Z", - completed_at=row.completed_at.isoformat() + "Z" if row.completed_at else None, - ).model_dump() - for row in rows - ] - return PlatformUsageResponse( - summary=PlatformUsageSummary( - request_count=int(count or 0), - input_tokens=int(input_sum or 0), - output_tokens=int(output_sum or 0), - total_tokens=int(total_sum or 0), - ), - items=[PlatformUsageItem.model_validate(item) for item in items], - total=total, - limit=safe_limit, - offset=safe_offset, - has_more=safe_offset + len(items) < total, - analytics=_build_usage_analytics(session, bot_id=bot_id), - ).model_dump() - - -def _build_usage_analytics( - session: Session, - bot_id: Optional[str] = None, - window_days: int = 7, -) -> PlatformUsageAnalytics: - safe_window_days = max(1, int(window_days or 0)) - today = _utcnow().date() - days = [today - timedelta(days=offset) for offset in range(safe_window_days - 1, -1, -1)] - day_keys = [day.isoformat() for day in days] - day_labels = [day.strftime("%m-%d") for day in days] - first_day = days[0] - first_started_at = datetime.combine(first_day, datetime.min.time()) - - stmt = select(BotRequestUsage.model, BotRequestUsage.started_at).where(BotRequestUsage.started_at >= first_started_at) - if bot_id: - stmt = stmt.where(BotRequestUsage.bot_id == bot_id) - - counts_by_model: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int)) - total_requests = 0 - for model_name, started_at in session.exec(stmt).all(): - if not started_at: - continue - day_key = started_at.date().isoformat() - if day_key not in day_keys: - continue - normalized_model = str(model_name or "").strip() or "Unknown" - counts_by_model[normalized_model][day_key] += 1 - total_requests += 1 - - series = [ - PlatformUsageAnalyticsSeries( - model=model_name, - total_requests=sum(day_counts.values()), - daily_counts=[int(day_counts.get(day_key, 0)) for day_key in day_keys], - ) - for model_name, day_counts in counts_by_model.items() - ] - series.sort(key=lambda item: (-item.total_requests, item.model.lower())) - - return PlatformUsageAnalytics( - window_days=safe_window_days, - days=day_labels, - total_requests=total_requests, - series=series, - ) - - -def list_activity_events( - session: Session, - bot_id: Optional[str] = None, - limit: int = 100, -) -> List[Dict[str, Any]]: - deleted = prune_expired_activity_events(session, force=False) - if deleted > 0: - session.commit() - safe_limit = max(1, min(int(limit), 500)) - stmt = select(BotActivityEvent).order_by(BotActivityEvent.created_at.desc(), BotActivityEvent.id.desc()).limit(safe_limit) - if bot_id: - stmt = stmt.where(BotActivityEvent.bot_id == bot_id) - rows = session.exec(stmt).all() - items: List[Dict[str, Any]] = [] - for row in rows: - try: - metadata = json.loads(row.metadata_json or "{}") - except Exception: - metadata = {} - items.append( - PlatformActivityItem( - id=int(row.id or 0), - bot_id=row.bot_id, - request_id=row.request_id, - event_type=row.event_type, - channel=row.channel, - detail=row.detail, - metadata=metadata if isinstance(metadata, dict) else {}, - created_at=row.created_at.isoformat() + "Z", - ).model_dump() - ) - return items - - -def build_platform_overview(session: Session, docker_manager: Any) -> Dict[str, Any]: - deleted = prune_expired_activity_events(session, force=False) - if deleted > 0: - session.commit() - bots = session.exec(select(BotInstance)).all() - images = session.exec(select(NanobotImage).order_by(NanobotImage.created_at.desc())).all() - settings = get_platform_settings(session) - - running = 0 - stopped = 0 - disabled = 0 - configured_cpu_total = 0.0 - configured_memory_total = 0 - configured_storage_total = 0 - workspace_used_total = 0 - workspace_limit_total = 0 - live_cpu_percent_total = 0.0 - live_memory_used_total = 0 - live_memory_limit_total = 0 - - bot_rows: List[Dict[str, Any]] = [] - for bot in bots: - enabled = bool(getattr(bot, "enabled", True)) - runtime_status = docker_manager.get_bot_status(bot.id) if docker_manager else str(bot.docker_status or "STOPPED") - resources = _read_bot_resources(bot.id) - runtime = docker_manager.get_bot_resource_snapshot(bot.id) if docker_manager else {"usage": {}, "limits": {}, "docker_status": runtime_status} - workspace_root = _bot_workspace_root(bot.id) - workspace_used = _calc_dir_size_bytes(workspace_root) - workspace_limit = int(resources["storage_gb"] or 0) * 1024 * 1024 * 1024 - - configured_cpu_total += float(resources["cpu_cores"] or 0) - configured_memory_total += int(resources["memory_mb"] or 0) * 1024 * 1024 - configured_storage_total += workspace_limit - workspace_used_total += workspace_used - workspace_limit_total += workspace_limit - live_cpu_percent_total += float((runtime.get("usage") or {}).get("cpu_percent") or 0.0) - live_memory_used_total += int((runtime.get("usage") or {}).get("memory_bytes") or 0) - live_memory_limit_total += int((runtime.get("usage") or {}).get("memory_limit_bytes") or 0) - - if not enabled: - disabled += 1 - elif runtime_status == "RUNNING": - running += 1 - else: - stopped += 1 - - bot_rows.append( - { - "id": bot.id, - "name": bot.name, - "enabled": enabled, - "docker_status": runtime_status, - "image_tag": bot.image_tag, - "llm_provider": getattr(bot, "llm_provider", None), - "llm_model": getattr(bot, "llm_model", None), - "current_state": bot.current_state, - "last_action": bot.last_action, - "resources": resources, - "workspace_usage_bytes": workspace_used, - "workspace_limit_bytes": workspace_limit if workspace_limit > 0 else None, - } - ) - - usage = list_usage(session, limit=20) - events = list_activity_events(session, limit=20) - - return { - "summary": { - "bots": { - "total": len(bots), - "running": running, - "stopped": stopped, - "disabled": disabled, - }, - "images": { - "total": len(images), - "ready": len([row for row in images if row.status == "READY"]), - "abnormal": len([row for row in images if row.status != "READY"]), - }, - "resources": { - "configured_cpu_cores": round(configured_cpu_total, 2), - "configured_memory_bytes": configured_memory_total, - "configured_storage_bytes": configured_storage_total, - "live_cpu_percent": round(live_cpu_percent_total, 2), - "live_memory_used_bytes": live_memory_used_total, - "live_memory_limit_bytes": live_memory_limit_total, - "workspace_used_bytes": workspace_used_total, - "workspace_limit_bytes": workspace_limit_total, - }, - }, - "images": [ - { - "tag": row.tag, - "version": row.version, - "status": row.status, - "source_dir": row.source_dir, - "created_at": row.created_at.isoformat() + "Z", - } - for row in images - ], - "bots": bot_rows, - "settings": settings.model_dump(), - "usage": usage, - "events": events, - } diff --git a/backend/services/platform_settings_core.py b/backend/services/platform_settings_core.py new file mode 100644 index 0000000..97d107b --- /dev/null +++ b/backend/services/platform_settings_core.py @@ -0,0 +1,300 @@ +import json +import os +import re +from datetime import datetime +from typing import Any, Dict, List + +from sqlmodel import Session + +from core.settings import ( + DEFAULT_CHAT_PULL_PAGE_SIZE, + DEFAULT_COMMAND_AUTO_UNLOCK_SECONDS, + DEFAULT_PAGE_SIZE, + DEFAULT_UPLOAD_MAX_MB, + DEFAULT_WORKSPACE_DOWNLOAD_EXTENSIONS, + STT_ENABLED_DEFAULT, +) +from models.platform import PlatformSetting +from schemas.platform import SystemSettingItem + +DEFAULT_ALLOWED_ATTACHMENT_EXTENSIONS: tuple[str, ...] = () +DEFAULT_ACTIVITY_EVENT_RETENTION_DAYS = 7 +ACTIVITY_EVENT_RETENTION_SETTING_KEY = "activity_event_retention_days" +SETTING_KEYS = ( + "page_size", + "chat_pull_page_size", + "command_auto_unlock_seconds", + "upload_max_mb", + "allowed_attachment_extensions", + "workspace_download_extensions", + "speech_enabled", +) +PROTECTED_SETTING_KEYS = set(SETTING_KEYS) | {ACTIVITY_EVENT_RETENTION_SETTING_KEY} +DEPRECATED_SETTING_KEYS = { + "loading_page", + "speech_max_audio_seconds", + "speech_default_language", + "speech_force_simplified", + "speech_audio_preprocess", + "speech_audio_filter", + "speech_initial_prompt", +} +SYSTEM_SETTING_DEFINITIONS: Dict[str, Dict[str, Any]] = { + "page_size": { + "name": "分页大小", + "category": "ui", + "description": "平台各类列表默认每页条数。", + "value_type": "integer", + "value": DEFAULT_PAGE_SIZE, + "is_public": True, + "sort_order": 5, + }, + "chat_pull_page_size": { + "name": "对话懒加载条数", + "category": "chat", + "description": "Bot 对话区向上懒加载时每次读取的消息条数。", + "value_type": "integer", + "value": DEFAULT_CHAT_PULL_PAGE_SIZE, + "is_public": True, + "sort_order": 8, + }, + "command_auto_unlock_seconds": { + "name": "发送按钮自动恢复秒数", + "category": "chat", + "description": "对话发送后按钮保持停止态的最长秒数,超时后自动恢复为可发送状态。", + "value_type": "integer", + "value": DEFAULT_COMMAND_AUTO_UNLOCK_SECONDS, + "is_public": True, + "sort_order": 9, + }, + "upload_max_mb": { + "name": "上传大小限制", + "category": "upload", + "description": "单文件上传大小限制,单位 MB。", + "value_type": "integer", + "value": DEFAULT_UPLOAD_MAX_MB, + "is_public": False, + "sort_order": 10, + }, + "allowed_attachment_extensions": { + "name": "允许附件后缀", + "category": "upload", + "description": "允许上传的附件后缀列表,留空表示不限制。", + "value_type": "json", + "value": list(DEFAULT_ALLOWED_ATTACHMENT_EXTENSIONS), + "is_public": False, + "sort_order": 20, + }, + "workspace_download_extensions": { + "name": "工作区下载后缀", + "category": "workspace", + "description": "命中后缀的工作区文件默认走下载模式。", + "value_type": "json", + "value": list(DEFAULT_WORKSPACE_DOWNLOAD_EXTENSIONS), + "is_public": False, + "sort_order": 30, + }, + "speech_enabled": { + "name": "语音识别开关", + "category": "speech", + "description": "控制 Bot 语音转写功能是否启用。", + "value_type": "boolean", + "value": STT_ENABLED_DEFAULT, + "is_public": True, + "sort_order": 32, + }, + ACTIVITY_EVENT_RETENTION_SETTING_KEY: { + "name": "活动事件保留天数", + "category": "maintenance", + "description": "bot_activity_event 运维事件的保留天数,超期记录会自动清理。", + "value_type": "integer", + "value": DEFAULT_ACTIVITY_EVENT_RETENTION_DAYS, + "is_public": False, + "sort_order": 34, + }, +} + + +def _utcnow() -> datetime: + return datetime.utcnow() + + +def _normalize_activity_event_retention_days(raw: Any) -> int: + try: + value = int(raw) + except Exception: + value = DEFAULT_ACTIVITY_EVENT_RETENTION_DAYS + return max(1, min(3650, value)) + + +def _normalize_extension(raw: Any) -> str: + text = str(raw or "").strip().lower() + if not text: + return "" + if text.startswith("*."): + text = text[1:] + if not text.startswith("."): + text = f".{text}" + if not re.fullmatch(r"\.[a-z0-9][a-z0-9._+-]{0,31}", text): + return "" + return text + + +def _normalize_extension_list(rows: Any) -> List[str]: + if not isinstance(rows, list): + return [] + normalized: List[str] = [] + for item in rows: + ext = _normalize_extension(item) + if ext and ext not in normalized: + normalized.append(ext) + return normalized + + +def _legacy_env_int(name: str, default: int, min_value: int, max_value: int) -> int: + raw = os.getenv(name) + if raw is None: + return default + try: + value = int(str(raw).strip()) + except Exception: + value = default + return max(min_value, min(max_value, value)) + + +def _legacy_env_bool(name: str, default: bool) -> bool: + raw = os.getenv(name) + if raw is None: + return default + return str(raw).strip().lower() in {"1", "true", "yes", "on"} + + +def _legacy_env_extensions(name: str, default: List[str]) -> List[str]: + raw = os.getenv(name) + if raw is None: + return list(default) + source = re.split(r"[,;\s]+", str(raw)) + normalized: List[str] = [] + for item in source: + ext = _normalize_extension(item) + if ext and ext not in normalized: + normalized.append(ext) + return normalized + + +def _bootstrap_platform_setting_values() -> Dict[str, Any]: + return { + "page_size": _legacy_env_int("PAGE_SIZE", DEFAULT_PAGE_SIZE, 1, 100), + "chat_pull_page_size": _legacy_env_int( + "CHAT_PULL_PAGE_SIZE", + DEFAULT_CHAT_PULL_PAGE_SIZE, + 10, + 500, + ), + "command_auto_unlock_seconds": _legacy_env_int( + "COMMAND_AUTO_UNLOCK_SECONDS", + DEFAULT_COMMAND_AUTO_UNLOCK_SECONDS, + 1, + 600, + ), + "upload_max_mb": _legacy_env_int("UPLOAD_MAX_MB", DEFAULT_UPLOAD_MAX_MB, 1, 2048), + "allowed_attachment_extensions": _legacy_env_extensions( + "ALLOWED_ATTACHMENT_EXTENSIONS", + list(DEFAULT_ALLOWED_ATTACHMENT_EXTENSIONS), + ), + "workspace_download_extensions": _legacy_env_extensions( + "WORKSPACE_DOWNLOAD_EXTENSIONS", + list(DEFAULT_WORKSPACE_DOWNLOAD_EXTENSIONS), + ), + "speech_enabled": _legacy_env_bool("STT_ENABLED", STT_ENABLED_DEFAULT), + } + + +def _normalize_setting_key(raw: Any) -> str: + text = str(raw or "").strip() + return re.sub(r"[^a-zA-Z0-9_.-]+", "_", text).strip("._-").lower() + + +def _normalize_setting_value(value: Any, value_type: str) -> Any: + normalized_type = str(value_type or "json").strip().lower() or "json" + if normalized_type == "integer": + return int(value or 0) + if normalized_type == "float": + return float(value or 0) + if normalized_type == "boolean": + if isinstance(value, bool): + return value + return str(value or "").strip().lower() in {"1", "true", "yes", "on"} + if normalized_type == "string": + return str(value or "") + if normalized_type == "json": + return value + raise ValueError(f"Unsupported value_type: {normalized_type}") + + +def _read_setting_value(row: PlatformSetting) -> Any: + try: + value = json.loads(row.value_json or "null") + except Exception: + value = None + return _normalize_setting_value(value, row.value_type) + + +def _setting_item_from_row(row: PlatformSetting) -> Dict[str, Any]: + return SystemSettingItem( + key=row.key, + name=row.name, + category=row.category, + description=row.description, + value_type=row.value_type, + value=_read_setting_value(row), + is_public=bool(row.is_public), + sort_order=int(row.sort_order or 100), + created_at=row.created_at.isoformat() + "Z", + updated_at=row.updated_at.isoformat() + "Z", + ).model_dump() + + +def _upsert_setting_row( + session: Session, + key: str, + *, + name: str, + category: str, + description: str, + value_type: str, + value: Any, + is_public: bool, + sort_order: int, +) -> PlatformSetting: + normalized_key = _normalize_setting_key(key) + if not normalized_key: + raise ValueError("Setting key is required") + normalized_type = str(value_type or "json").strip().lower() or "json" + normalized_value = _normalize_setting_value(value, normalized_type) + now = _utcnow() + row = session.get(PlatformSetting, normalized_key) + if row is None: + row = PlatformSetting( + key=normalized_key, + name=str(name or normalized_key), + category=str(category or "general"), + description=str(description or ""), + value_type=normalized_type, + value_json=json.dumps(normalized_value, ensure_ascii=False), + is_public=bool(is_public), + sort_order=int(sort_order or 100), + created_at=now, + updated_at=now, + ) + else: + row.name = str(name or row.name or normalized_key) + row.category = str(category or row.category or "general") + row.description = str(description or row.description or "") + row.value_type = normalized_type + row.value_json = json.dumps(normalized_value, ensure_ascii=False) + row.is_public = bool(is_public) + row.sort_order = int(sort_order or row.sort_order or 100) + row.updated_at = now + session.add(row) + return row diff --git a/backend/services/platform_settings_service.py b/backend/services/platform_settings_service.py new file mode 100644 index 0000000..06587b9 --- /dev/null +++ b/backend/services/platform_settings_service.py @@ -0,0 +1,26 @@ +from services.platform_runtime_settings_service import ( + default_platform_settings, + get_allowed_attachment_extensions, + get_chat_pull_page_size, + get_page_size, + get_platform_settings, + get_platform_settings_snapshot, + get_speech_runtime_settings, + get_upload_max_mb, + get_workspace_download_extensions, + save_platform_settings, +) +from services.platform_settings_core import ( + ACTIVITY_EVENT_RETENTION_SETTING_KEY, + DEFAULT_ACTIVITY_EVENT_RETENTION_DAYS, + DEFAULT_ALLOWED_ATTACHMENT_EXTENSIONS, + SETTING_KEYS, + SYSTEM_SETTING_DEFINITIONS, +) +from services.platform_system_settings_service import ( + create_or_update_system_setting, + delete_system_setting, + ensure_default_system_settings, + get_activity_event_retention_days, + list_system_settings, +) diff --git a/backend/services/platform_system_settings_service.py b/backend/services/platform_system_settings_service.py new file mode 100644 index 0000000..2515e32 --- /dev/null +++ b/backend/services/platform_system_settings_service.py @@ -0,0 +1,153 @@ +from typing import Any, Dict, List + +from sqlmodel import Session, select + +from models.platform import PlatformSetting +from schemas.platform import SystemSettingPayload +from services.platform_settings_core import ( + ACTIVITY_EVENT_RETENTION_SETTING_KEY, + DEFAULT_ACTIVITY_EVENT_RETENTION_DAYS, + DEPRECATED_SETTING_KEYS, + PROTECTED_SETTING_KEYS, + SETTING_KEYS, + SYSTEM_SETTING_DEFINITIONS, + _bootstrap_platform_setting_values, + _normalize_activity_event_retention_days, + _normalize_setting_key, + _read_setting_value, + _setting_item_from_row, + _upsert_setting_row, + _utcnow, +) + + +def ensure_default_system_settings(session: Session) -> None: + bootstrap_values = _bootstrap_platform_setting_values() + legacy_row = session.get(PlatformSetting, "global") + if legacy_row is not None: + try: + legacy_data = _read_setting_value(legacy_row) + except Exception: + legacy_data = {} + if isinstance(legacy_data, dict): + for key in SETTING_KEYS: + meta = SYSTEM_SETTING_DEFINITIONS[key] + _upsert_setting_row( + session, + key, + name=str(meta["name"]), + category=str(meta["category"]), + description=str(meta["description"]), + value_type=str(meta["value_type"]), + value=legacy_data.get(key, bootstrap_values.get(key, meta["value"])), + is_public=bool(meta["is_public"]), + sort_order=int(meta["sort_order"]), + ) + session.delete(legacy_row) + session.commit() + + dirty = False + for key in DEPRECATED_SETTING_KEYS: + legacy_row = session.get(PlatformSetting, key) + if legacy_row is not None: + session.delete(legacy_row) + dirty = True + + for key, meta in SYSTEM_SETTING_DEFINITIONS.items(): + row = session.get(PlatformSetting, key) + if row is None: + _upsert_setting_row( + session, + key, + name=str(meta["name"]), + category=str(meta["category"]), + description=str(meta["description"]), + value_type=str(meta["value_type"]), + value=bootstrap_values.get(key, meta["value"]), + is_public=bool(meta["is_public"]), + sort_order=int(meta["sort_order"]), + ) + dirty = True + continue + changed = False + for field in ("name", "category", "description", "value_type"): + value = str(meta[field]) + if not getattr(row, field): + setattr(row, field, value) + changed = True + if getattr(row, "sort_order", None) is None: + row.sort_order = int(meta["sort_order"]) + changed = True + if getattr(row, "is_public", None) is None: + row.is_public = bool(meta["is_public"]) + changed = True + if changed: + row.updated_at = _utcnow() + session.add(row) + dirty = True + if dirty: + session.commit() + + +def list_system_settings(session: Session, search: str = "") -> List[Dict[str, Any]]: + ensure_default_system_settings(session) + stmt = select(PlatformSetting).order_by(PlatformSetting.sort_order.asc(), PlatformSetting.key.asc()) + rows = session.exec(stmt).all() + keyword = str(search or "").strip().lower() + items = [_setting_item_from_row(row) for row in rows] + if not keyword: + return items + return [ + item + for item in items + if keyword in str(item["key"]).lower() + or keyword in str(item["name"]).lower() + or keyword in str(item["category"]).lower() + or keyword in str(item["description"]).lower() + ] + + +def create_or_update_system_setting(session: Session, payload: SystemSettingPayload) -> Dict[str, Any]: + ensure_default_system_settings(session) + normalized_key = _normalize_setting_key(payload.key) + definition = SYSTEM_SETTING_DEFINITIONS.get(normalized_key, {}) + row = _upsert_setting_row( + session, + payload.key, + name=payload.name or str(definition.get("name") or payload.key), + category=payload.category or str(definition.get("category") or "general"), + description=payload.description or str(definition.get("description") or ""), + value_type=payload.value_type or str(definition.get("value_type") or "json"), + value=payload.value if payload.value is not None else definition.get("value"), + is_public=payload.is_public, + sort_order=payload.sort_order or int(definition.get("sort_order") or 100), + ) + if normalized_key == ACTIVITY_EVENT_RETENTION_SETTING_KEY: + from services.platform_activity_service import prune_expired_activity_events + + prune_expired_activity_events(session, force=True) + session.commit() + session.refresh(row) + return _setting_item_from_row(row) + + +def delete_system_setting(session: Session, key: str) -> None: + normalized_key = _normalize_setting_key(key) + if normalized_key in PROTECTED_SETTING_KEYS: + raise ValueError("Core platform settings cannot be deleted") + row = session.get(PlatformSetting, normalized_key) + if row is None: + raise ValueError("Setting not found") + session.delete(row) + session.commit() + + +def get_activity_event_retention_days(session: Session) -> int: + row = session.get(PlatformSetting, ACTIVITY_EVENT_RETENTION_SETTING_KEY) + if row is None: + return DEFAULT_ACTIVITY_EVENT_RETENTION_DAYS + try: + value = _read_setting_value(row) + except Exception: + value = DEFAULT_ACTIVITY_EVENT_RETENTION_DAYS + return _normalize_activity_event_retention_days(value) diff --git a/backend/services/platform_usage_service.py b/backend/services/platform_usage_service.py new file mode 100644 index 0000000..996f4b1 --- /dev/null +++ b/backend/services/platform_usage_service.py @@ -0,0 +1,305 @@ +import json +import math +import re +import uuid +from collections import defaultdict +from datetime import datetime, timedelta +from typing import Any, Dict, List, Optional + +from sqlalchemy import func +from sqlmodel import Session, select + +from models.platform import BotRequestUsage +from schemas.platform import ( + PlatformUsageAnalytics, + PlatformUsageAnalyticsSeries, + PlatformUsageItem, + PlatformUsageResponse, + PlatformUsageSummary, +) + + +def _utcnow() -> datetime: + return datetime.utcnow() + + +def estimate_tokens(text: str) -> int: + content = str(text or "").strip() + if not content: + return 0 + pieces = re.findall(r"[\u4e00-\u9fff]|[A-Za-z0-9_]+|[^\s]", content) + total = 0 + for piece in pieces: + if re.fullmatch(r"[\u4e00-\u9fff]", piece): + total += 1 + elif re.fullmatch(r"[A-Za-z0-9_]+", piece): + total += max(1, math.ceil(len(piece) / 4)) + else: + total += 1 + return max(1, total) + + +def create_usage_request( + session: Session, + bot_id: str, + command: str, + attachments: Optional[List[str]] = None, + channel: str = "dashboard", + metadata: Optional[Dict[str, Any]] = None, + provider: Optional[str] = None, + model: Optional[str] = None, +) -> str: + request_id = uuid.uuid4().hex + rows = [str(item).strip() for item in (attachments or []) if str(item).strip()] + input_tokens = estimate_tokens(command) + usage = BotRequestUsage( + bot_id=bot_id, + request_id=request_id, + channel=channel, + status="PENDING", + provider=(str(provider or "").strip() or None), + model=(str(model or "").strip() or None), + token_source="estimated", + input_tokens=input_tokens, + output_tokens=0, + total_tokens=input_tokens, + input_text_preview=str(command or "")[:400], + attachments_json=json.dumps(rows, ensure_ascii=False) if rows else None, + metadata_json=json.dumps(metadata or {}, ensure_ascii=False), + started_at=_utcnow(), + created_at=_utcnow(), + updated_at=_utcnow(), + ) + session.add(usage) + session.flush() + return request_id + + +def _find_latest_pending_usage(session: Session, bot_id: str) -> Optional[BotRequestUsage]: + stmt = ( + select(BotRequestUsage) + .where(BotRequestUsage.bot_id == bot_id) + .where(BotRequestUsage.status == "PENDING") + .order_by(BotRequestUsage.started_at.desc(), BotRequestUsage.id.desc()) + .limit(1) + ) + return session.exec(stmt).first() + + +def _find_pending_usage_by_request_id(session: Session, bot_id: str, request_id: str) -> Optional[BotRequestUsage]: + if not request_id: + return None + stmt = ( + select(BotRequestUsage) + .where(BotRequestUsage.bot_id == bot_id) + .where(BotRequestUsage.request_id == request_id) + .where(BotRequestUsage.status == "PENDING") + .order_by(BotRequestUsage.started_at.desc(), BotRequestUsage.id.desc()) + .limit(1) + ) + return session.exec(stmt).first() + + +def bind_usage_message( + session: Session, + bot_id: str, + request_id: str, + message_id: Optional[int], +) -> Optional[BotRequestUsage]: + if not request_id or not message_id: + return None + usage_row = _find_pending_usage_by_request_id(session, bot_id, request_id) + if not usage_row: + return None + usage_row.message_id = int(message_id) + usage_row.updated_at = _utcnow() + session.add(usage_row) + return usage_row + + +def finalize_usage_from_packet(session: Session, bot_id: str, packet: Dict[str, Any]) -> Optional[BotRequestUsage]: + request_id = str(packet.get("request_id") or "").strip() + usage_row = _find_pending_usage_by_request_id(session, bot_id, request_id) or _find_latest_pending_usage(session, bot_id) + if not usage_row: + return None + + raw_usage = packet.get("usage") + input_tokens: Optional[int] = None + output_tokens: Optional[int] = None + source = "estimated" + if isinstance(raw_usage, dict): + for key in ("input_tokens", "prompt_tokens", "promptTokens"): + if raw_usage.get(key) is not None: + try: + input_tokens = int(raw_usage.get(key) or 0) + except Exception: + input_tokens = None + break + for key in ("output_tokens", "completion_tokens", "completionTokens"): + if raw_usage.get(key) is not None: + try: + output_tokens = int(raw_usage.get(key) or 0) + except Exception: + output_tokens = None + break + if input_tokens is not None or output_tokens is not None: + source = "exact" + + text = str(packet.get("text") or packet.get("content") or "").strip() + provider = str(packet.get("provider") or "").strip() + model = str(packet.get("model") or "").strip() + message_id = packet.get("message_id") + if input_tokens is None: + input_tokens = usage_row.input_tokens + if output_tokens is None: + output_tokens = estimate_tokens(text) + if source == "exact": + source = "mixed" + + if provider: + usage_row.provider = provider[:120] + if model: + usage_row.model = model[:255] + if message_id is not None: + try: + usage_row.message_id = int(message_id) + except Exception: + pass + usage_row.output_tokens = max(0, int(output_tokens or 0)) + usage_row.input_tokens = max(0, int(input_tokens or 0)) + usage_row.total_tokens = usage_row.input_tokens + usage_row.output_tokens + usage_row.output_text_preview = text[:400] if text else usage_row.output_text_preview + usage_row.status = "COMPLETED" + usage_row.token_source = source + usage_row.completed_at = _utcnow() + usage_row.updated_at = _utcnow() + session.add(usage_row) + return usage_row + + +def fail_latest_usage(session: Session, bot_id: str, detail: str) -> Optional[BotRequestUsage]: + usage_row = _find_latest_pending_usage(session, bot_id) + if not usage_row: + return None + usage_row.status = "ERROR" + usage_row.error_text = str(detail or "")[:500] + usage_row.completed_at = _utcnow() + usage_row.updated_at = _utcnow() + session.add(usage_row) + return usage_row + + +def _build_usage_analytics( + session: Session, + bot_id: Optional[str] = None, + window_days: int = 7, +) -> PlatformUsageAnalytics: + safe_window_days = max(1, int(window_days or 0)) + today = _utcnow().date() + days = [today - timedelta(days=offset) for offset in range(safe_window_days - 1, -1, -1)] + day_keys = [day.isoformat() for day in days] + day_labels = [day.strftime("%m-%d") for day in days] + first_day = days[0] + first_started_at = datetime.combine(first_day, datetime.min.time()) + + stmt = select(BotRequestUsage.model, BotRequestUsage.started_at).where(BotRequestUsage.started_at >= first_started_at) + if bot_id: + stmt = stmt.where(BotRequestUsage.bot_id == bot_id) + + counts_by_model: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int)) + total_requests = 0 + for model_name, started_at in session.exec(stmt).all(): + if not started_at: + continue + day_key = started_at.date().isoformat() + if day_key not in day_keys: + continue + normalized_model = str(model_name or "").strip() or "Unknown" + counts_by_model[normalized_model][day_key] += 1 + total_requests += 1 + + series = [ + PlatformUsageAnalyticsSeries( + model=model_name, + total_requests=sum(day_counts.values()), + daily_counts=[int(day_counts.get(day_key, 0)) for day_key in day_keys], + ) + for model_name, day_counts in counts_by_model.items() + ] + series.sort(key=lambda item: (-item.total_requests, item.model.lower())) + + return PlatformUsageAnalytics( + window_days=safe_window_days, + days=day_labels, + total_requests=total_requests, + series=series, + ) + + +def list_usage( + session: Session, + bot_id: Optional[str] = None, + limit: int = 100, + offset: int = 0, +) -> Dict[str, Any]: + safe_limit = max(1, min(int(limit), 500)) + safe_offset = max(0, int(offset or 0)) + stmt = ( + select(BotRequestUsage) + .order_by(BotRequestUsage.started_at.desc(), BotRequestUsage.id.desc()) + .offset(safe_offset) + .limit(safe_limit) + ) + summary_stmt = select( + func.count(BotRequestUsage.id), + func.coalesce(func.sum(BotRequestUsage.input_tokens), 0), + func.coalesce(func.sum(BotRequestUsage.output_tokens), 0), + func.coalesce(func.sum(BotRequestUsage.total_tokens), 0), + ) + total_stmt = select(func.count(BotRequestUsage.id)) + if bot_id: + stmt = stmt.where(BotRequestUsage.bot_id == bot_id) + summary_stmt = summary_stmt.where(BotRequestUsage.bot_id == bot_id) + total_stmt = total_stmt.where(BotRequestUsage.bot_id == bot_id) + else: + since = _utcnow() - timedelta(days=1) + summary_stmt = summary_stmt.where(BotRequestUsage.created_at >= since) + rows = session.exec(stmt).all() + count, input_sum, output_sum, total_sum = session.exec(summary_stmt).one() + total = int(session.exec(total_stmt).one() or 0) + items = [ + PlatformUsageItem( + id=int(row.id or 0), + bot_id=row.bot_id, + message_id=int(row.message_id) if row.message_id is not None else None, + request_id=row.request_id, + channel=row.channel, + status=row.status, + provider=row.provider, + model=row.model, + token_source=row.token_source, + content=row.input_text_preview or row.output_text_preview, + input_tokens=int(row.input_tokens or 0), + output_tokens=int(row.output_tokens or 0), + total_tokens=int(row.total_tokens or 0), + input_text_preview=row.input_text_preview, + output_text_preview=row.output_text_preview, + started_at=row.started_at.isoformat() + "Z", + completed_at=row.completed_at.isoformat() + "Z" if row.completed_at else None, + ).model_dump() + for row in rows + ] + return PlatformUsageResponse( + summary=PlatformUsageSummary( + request_count=int(count or 0), + input_tokens=int(input_sum or 0), + output_tokens=int(output_sum or 0), + total_tokens=int(total_sum or 0), + ), + items=[PlatformUsageItem.model_validate(item) for item in items], + total=total, + limit=safe_limit, + offset=safe_offset, + has_more=safe_offset + len(items) < total, + analytics=_build_usage_analytics(session, bot_id=bot_id), + ).model_dump() diff --git a/backend/services/runtime_service.py b/backend/services/runtime_service.py new file mode 100644 index 0000000..2d782cc --- /dev/null +++ b/backend/services/runtime_service.py @@ -0,0 +1,261 @@ +import asyncio +import json +import logging +import os +import time +from datetime import datetime +from typing import Any, Dict, List, Optional + +from sqlmodel import Session + +from core.database import engine +from core.docker_instance import docker_manager +from core.websocket_manager import manager +from models.bot import BotInstance, BotMessage +from services.bot_service import _workspace_root +from services.cache_service import _invalidate_bot_detail_cache, _invalidate_bot_messages_cache +from services.platform_service import bind_usage_message, finalize_usage_from_packet, record_activity_event +from services.topic_runtime import publish_runtime_topic_packet + +logger = logging.getLogger("dashboard.backend") + +_main_loop: Optional[asyncio.AbstractEventLoop] = None +_AGENT_LOOP_READY_MARKER = "Agent loop started" + + +def set_main_loop(loop: Optional[asyncio.AbstractEventLoop]) -> None: + global _main_loop + _main_loop = loop + + +def get_main_loop() -> Optional[asyncio.AbstractEventLoop]: + return _main_loop + + +def _queue_runtime_broadcast(bot_id: str, packet: Dict[str, Any]) -> None: + loop = get_main_loop() + if not loop or not loop.is_running(): + return + asyncio.run_coroutine_threadsafe(manager.broadcast(bot_id, packet), loop) + + +def _normalize_packet_channel(packet: Dict[str, Any]) -> str: + raw = str(packet.get("channel") or packet.get("source") or "").strip().lower() + if raw in {"dashboard", "dashboard_channel", "dashboard-channel"}: + return "dashboard" + return raw + + +def _normalize_media_item(bot_id: str, value: Any) -> str: + raw = str(value or "").strip().replace("\\", "/") + if not raw: + return "" + if raw.startswith("/root/.nanobot/workspace/"): + return raw[len("/root/.nanobot/workspace/") :].lstrip("/") + root = _workspace_root(bot_id) + if os.path.isabs(raw): + try: + if os.path.commonpath([root, raw]) == root: + return os.path.relpath(raw, root).replace("\\", "/") + except Exception: + pass + return raw.lstrip("/") + + +def _normalize_media_list(raw: Any, bot_id: str) -> List[str]: + if not isinstance(raw, list): + return [] + rows: List[str] = [] + for value in raw: + normalized = _normalize_media_item(bot_id, value) + if normalized: + rows.append(normalized) + return rows + + +def _persist_runtime_packet(bot_id: str, packet: Dict[str, Any]) -> Optional[int]: + packet_type = str(packet.get("type", "")).upper() + if packet_type not in {"AGENT_STATE", "ASSISTANT_MESSAGE", "USER_COMMAND", "BUS_EVENT"}: + return None + + source_channel = _normalize_packet_channel(packet) + if source_channel != "dashboard": + return None + + persisted_message_id: Optional[int] = None + with Session(engine) as session: + bot = session.get(BotInstance, bot_id) + if not bot: + return None + + if packet_type == "AGENT_STATE": + payload = packet.get("payload") or {} + state = str(payload.get("state") or "").strip() + action = str(payload.get("action_msg") or payload.get("msg") or "").strip() + if state: + bot.current_state = state + if action: + bot.last_action = action[:4000] + elif packet_type == "ASSISTANT_MESSAGE": + bot.current_state = "IDLE" + text_msg = str(packet.get("text") or "").strip() + media_list = _normalize_media_list(packet.get("media"), bot_id) + if text_msg or media_list: + if text_msg: + bot.last_action = " ".join(text_msg.split())[:4000] + message_row = BotMessage( + bot_id=bot_id, + role="assistant", + text=text_msg, + media_json=json.dumps(media_list, ensure_ascii=False) if media_list else None, + ) + session.add(message_row) + session.flush() + persisted_message_id = message_row.id + finalize_usage_from_packet( + session, + bot_id, + { + **packet, + "message_id": persisted_message_id, + }, + ) + elif packet_type == "USER_COMMAND": + text_msg = str(packet.get("text") or "").strip() + media_list = _normalize_media_list(packet.get("media"), bot_id) + if text_msg or media_list: + message_row = BotMessage( + bot_id=bot_id, + role="user", + text=text_msg, + media_json=json.dumps(media_list, ensure_ascii=False) if media_list else None, + ) + session.add(message_row) + session.flush() + persisted_message_id = message_row.id + bind_usage_message( + session, + bot_id, + str(packet.get("request_id") or "").strip(), + persisted_message_id, + ) + elif packet_type == "BUS_EVENT": + is_progress = bool(packet.get("is_progress")) + detail_text = str(packet.get("content") or packet.get("text") or "").strip() + if not is_progress: + text_msg = detail_text + media_list = _normalize_media_list(packet.get("media"), bot_id) + if text_msg or media_list: + bot.current_state = "IDLE" + if text_msg: + bot.last_action = " ".join(text_msg.split())[:4000] + message_row = BotMessage( + bot_id=bot_id, + role="assistant", + text=text_msg, + media_json=json.dumps(media_list, ensure_ascii=False) if media_list else None, + ) + session.add(message_row) + session.flush() + persisted_message_id = message_row.id + finalize_usage_from_packet( + session, + bot_id, + { + "text": text_msg, + "usage": packet.get("usage"), + "request_id": packet.get("request_id"), + "provider": packet.get("provider"), + "model": packet.get("model"), + "message_id": persisted_message_id, + }, + ) + + bot.updated_at = datetime.utcnow() + session.add(bot) + session.commit() + + publish_runtime_topic_packet( + engine, + bot_id, + packet, + source_channel, + persisted_message_id, + logger, + ) + + if persisted_message_id: + packet["message_id"] = persisted_message_id + if packet_type in {"ASSISTANT_MESSAGE", "USER_COMMAND", "BUS_EVENT"}: + _invalidate_bot_messages_cache(bot_id) + _invalidate_bot_detail_cache(bot_id) + return persisted_message_id + + +def docker_callback(bot_id: str, packet: Dict[str, Any]) -> None: + packet_type = str(packet.get("type", "")).upper() + if packet_type == "RAW_LOG": + _queue_runtime_broadcast(bot_id, packet) + return + + persisted_message_id = _persist_runtime_packet(bot_id, packet) + if persisted_message_id: + packet["message_id"] = persisted_message_id + _queue_runtime_broadcast(bot_id, packet) + + +async def _wait_for_agent_loop_ready( + bot_id: str, + timeout_seconds: float = 12.0, + poll_interval_seconds: float = 0.5, +) -> bool: + deadline = time.monotonic() + max(1.0, timeout_seconds) + marker = _AGENT_LOOP_READY_MARKER.lower() + while time.monotonic() < deadline: + logs = docker_manager.get_recent_logs(bot_id, tail=200) + if any(marker in str(line or "").lower() for line in logs): + return True + await asyncio.sleep(max(0.1, poll_interval_seconds)) + return False + + +async def _record_agent_loop_ready_warning( + bot_id: str, + timeout_seconds: float = 12.0, + poll_interval_seconds: float = 0.5, +) -> None: + try: + agent_loop_ready = await _wait_for_agent_loop_ready( + bot_id, + timeout_seconds=timeout_seconds, + poll_interval_seconds=poll_interval_seconds, + ) + if agent_loop_ready: + return + if docker_manager.get_bot_status(bot_id) != "RUNNING": + return + + detail = ( + "Bot container started, but ready marker was not found in logs within " + f"{int(timeout_seconds)}s. Check bot logs or MCP config if the bot stays unavailable." + ) + logger.warning("bot_id=%s agent loop ready marker not found within %ss", bot_id, timeout_seconds) + with Session(engine) as background_session: + if not background_session.get(BotInstance, bot_id): + return + record_activity_event( + background_session, + bot_id, + "bot_warning", + channel="system", + detail=detail, + metadata={ + "kind": "agent_loop_ready_timeout", + "marker": _AGENT_LOOP_READY_MARKER, + "timeout_seconds": timeout_seconds, + }, + ) + background_session.commit() + _invalidate_bot_detail_cache(bot_id) + except Exception: + logger.exception("Failed to record agent loop readiness warning for bot_id=%s", bot_id) diff --git a/backend/services/skill_market_service.py b/backend/services/skill_market_service.py new file mode 100644 index 0000000..f3c64dd --- /dev/null +++ b/backend/services/skill_market_service.py @@ -0,0 +1,434 @@ +import json +import os +import tempfile +import zipfile +from datetime import datetime +from typing import Any, Dict, List, Optional + +from fastapi import HTTPException, UploadFile +from sqlmodel import Session, select + +from core.settings import DATA_ROOT +from core.utils import ( + _is_ignored_skill_zip_top_level, + _is_valid_top_level_skill_name, + _parse_json_string_list, + _read_description_from_text, + _sanitize_skill_market_key, + _sanitize_zip_filename, +) +from models.skill import BotSkillInstall, SkillMarketItem +from services.platform_service import get_platform_settings_snapshot +from services.skill_service import _install_skill_zip_into_workspace, _skills_root + + +def _skill_market_root() -> str: + return os.path.abspath(os.path.join(DATA_ROOT, "skills")) + + +def _extract_skill_zip_summary(zip_path: str) -> Dict[str, Any]: + entry_names: List[str] = [] + description = "" + with zipfile.ZipFile(zip_path) as archive: + members = archive.infolist() + file_members = [member for member in members if not member.is_dir()] + for member in file_members: + raw_name = str(member.filename or "").replace("\\", "/").lstrip("/") + if not raw_name: + continue + first = raw_name.split("/", 1)[0].strip() + if _is_ignored_skill_zip_top_level(first): + continue + if _is_valid_top_level_skill_name(first) and first not in entry_names: + entry_names.append(first) + + candidates = sorted( + [ + str(member.filename or "").replace("\\", "/").lstrip("/") + for member in file_members + if str(member.filename or "").replace("\\", "/").rsplit("/", 1)[-1].lower() + in {"skill.md", "readme.md"} + ], + key=lambda value: (value.count("/"), value.lower()), + ) + for candidate in candidates: + try: + with archive.open(candidate, "r") as file: + preview = file.read(4096).decode("utf-8", errors="ignore") + description = _read_description_from_text(preview) + if description: + break + except Exception: + continue + return { + "entry_names": entry_names, + "description": description, + } + + +def _resolve_unique_skill_market_key(session: Session, preferred_key: str, exclude_id: Optional[int] = None) -> str: + base_key = _sanitize_skill_market_key(preferred_key) or "skill" + candidate = base_key + counter = 2 + while True: + stmt = select(SkillMarketItem).where(SkillMarketItem.skill_key == candidate) + rows = session.exec(stmt).all() + conflict = next((row for row in rows if exclude_id is None or row.id != exclude_id), None) + if not conflict: + return candidate + candidate = f"{base_key}-{counter}" + counter += 1 + + +def _resolve_unique_skill_market_zip_filename( + session: Session, + filename: str, + *, + exclude_filename: Optional[str] = None, + exclude_id: Optional[int] = None, +) -> str: + root = _skill_market_root() + os.makedirs(root, exist_ok=True) + safe_name = _sanitize_zip_filename(filename) + if not safe_name.lower().endswith(".zip"): + raise HTTPException(status_code=400, detail="Only .zip skill package is supported") + candidate = safe_name + stem, ext = os.path.splitext(safe_name) + counter = 2 + while True: + file_conflict = os.path.exists(os.path.join(root, candidate)) and candidate != str(exclude_filename or "").strip() + rows = session.exec(select(SkillMarketItem).where(SkillMarketItem.zip_filename == candidate)).all() + db_conflict = next((row for row in rows if exclude_id is None or row.id != exclude_id), None) + if not file_conflict and not db_conflict: + return candidate + candidate = f"{stem}-{counter}{ext}" + counter += 1 + + +async def _store_skill_market_zip_upload( + session: Session, + upload: UploadFile, + *, + exclude_filename: Optional[str] = None, + exclude_id: Optional[int] = None, +) -> Dict[str, Any]: + root = _skill_market_root() + os.makedirs(root, exist_ok=True) + + incoming_name = _sanitize_zip_filename(upload.filename or "") + if not incoming_name.lower().endswith(".zip"): + raise HTTPException(status_code=400, detail="Only .zip skill package is supported") + + target_filename = _resolve_unique_skill_market_zip_filename( + session, + incoming_name, + exclude_filename=exclude_filename, + exclude_id=exclude_id, + ) + max_bytes = get_platform_settings_snapshot().upload_max_mb * 1024 * 1024 + total_size = 0 + tmp_path: Optional[str] = None + try: + with tempfile.NamedTemporaryFile(prefix=".skill_market_", suffix=".zip", dir=root, delete=False) as tmp_zip: + tmp_path = tmp_zip.name + while True: + chunk = await upload.read(1024 * 1024) + if not chunk: + break + total_size += len(chunk) + if total_size > max_bytes: + raise HTTPException( + status_code=413, + detail=f"Zip package too large (max {max_bytes // (1024 * 1024)}MB)", + ) + tmp_zip.write(chunk) + if total_size == 0: + raise HTTPException(status_code=400, detail="Zip package is empty") + summary = _extract_skill_zip_summary(tmp_path) + if not summary["entry_names"]: + raise HTTPException(status_code=400, detail="Zip package has no valid skill entries") + final_path = os.path.join(root, target_filename) + os.replace(tmp_path, final_path) + tmp_path = None + return { + "zip_filename": target_filename, + "zip_size_bytes": total_size, + "entry_names": summary["entry_names"], + "description": summary["description"], + } + except zipfile.BadZipFile as exc: + raise HTTPException(status_code=400, detail="Invalid zip file") from exc + finally: + await upload.close() + if tmp_path and os.path.exists(tmp_path): + os.remove(tmp_path) + + +def _serialize_skill_market_item( + item: SkillMarketItem, + *, + install_count: int = 0, + install_row: Optional[BotSkillInstall] = None, + workspace_installed: Optional[bool] = None, + installed_entries: Optional[List[str]] = None, +) -> Dict[str, Any]: + zip_path = os.path.join(_skill_market_root(), str(item.zip_filename or "")) + entry_names = _parse_json_string_list(item.entry_names_json) + payload = { + "id": item.id, + "skill_key": item.skill_key, + "display_name": item.display_name or item.skill_key, + "description": item.description or "", + "zip_filename": item.zip_filename, + "zip_size_bytes": int(item.zip_size_bytes or 0), + "entry_names": entry_names, + "entry_count": len(entry_names), + "zip_exists": os.path.isfile(zip_path), + "install_count": int(install_count or 0), + "created_at": item.created_at.isoformat() + "Z" if item.created_at else None, + "updated_at": item.updated_at.isoformat() + "Z" if item.updated_at else None, + } + if install_row is not None: + resolved_entries = ( + installed_entries + if installed_entries is not None + else _parse_json_string_list(install_row.installed_entries_json) + ) + resolved_installed = workspace_installed if workspace_installed is not None else install_row.status == "INSTALLED" + payload.update( + { + "installed": resolved_installed, + "install_status": install_row.status, + "installed_at": install_row.installed_at.isoformat() + "Z" if install_row.installed_at else None, + "installed_entries": resolved_entries, + "install_error": install_row.last_error, + } + ) + return payload + + +def _build_install_count_by_skill(session: Session) -> Dict[int, int]: + installs = session.exec(select(BotSkillInstall)).all() + install_count_by_skill: Dict[int, int] = {} + for row in installs: + skill_id = int(row.skill_market_item_id or 0) + if skill_id <= 0 or row.status != "INSTALLED": + continue + install_count_by_skill[skill_id] = install_count_by_skill.get(skill_id, 0) + 1 + return install_count_by_skill + + +def list_skill_market_items(session: Session) -> List[Dict[str, Any]]: + items = session.exec(select(SkillMarketItem).order_by(SkillMarketItem.display_name, SkillMarketItem.id)).all() + install_count_by_skill = _build_install_count_by_skill(session) + return [ + _serialize_skill_market_item(item, install_count=install_count_by_skill.get(int(item.id or 0), 0)) + for item in items + ] + + +async def create_skill_market_item_record( + session: Session, + *, + skill_key: str, + display_name: str, + description: str, + upload: UploadFile, +) -> Dict[str, Any]: + upload_meta = await _store_skill_market_zip_upload(session, upload) + try: + preferred_key = skill_key or display_name or os.path.splitext(upload_meta["zip_filename"])[0] + next_key = _resolve_unique_skill_market_key(session, preferred_key) + item = SkillMarketItem( + skill_key=next_key, + display_name=str(display_name or next_key).strip() or next_key, + description=str(description or upload_meta["description"] or "").strip(), + zip_filename=upload_meta["zip_filename"], + zip_size_bytes=int(upload_meta["zip_size_bytes"] or 0), + entry_names_json=json.dumps(upload_meta["entry_names"], ensure_ascii=False), + ) + session.add(item) + session.commit() + session.refresh(item) + return _serialize_skill_market_item(item, install_count=0) + except Exception: + target_path = os.path.join(_skill_market_root(), upload_meta["zip_filename"]) + if os.path.exists(target_path): + os.remove(target_path) + raise + + +async def update_skill_market_item_record( + session: Session, + *, + skill_id: int, + skill_key: str, + display_name: str, + description: str, + upload: Optional[UploadFile] = None, +) -> Dict[str, Any]: + item = session.get(SkillMarketItem, skill_id) + if not item: + raise HTTPException(status_code=404, detail="Skill market item not found") + + old_filename = str(item.zip_filename or "").strip() + upload_meta: Optional[Dict[str, Any]] = None + if upload is not None: + upload_meta = await _store_skill_market_zip_upload( + session, + upload, + exclude_filename=old_filename or None, + exclude_id=item.id, + ) + + next_key = _resolve_unique_skill_market_key( + session, + skill_key or item.skill_key or display_name or os.path.splitext(upload_meta["zip_filename"] if upload_meta else old_filename)[0], + exclude_id=item.id, + ) + item.skill_key = next_key + item.display_name = str(display_name or item.display_name or next_key).strip() or next_key + item.description = str(description or (upload_meta["description"] if upload_meta else item.description) or "").strip() + item.updated_at = datetime.utcnow() + if upload_meta: + item.zip_filename = upload_meta["zip_filename"] + item.zip_size_bytes = int(upload_meta["zip_size_bytes"] or 0) + item.entry_names_json = json.dumps(upload_meta["entry_names"], ensure_ascii=False) + session.add(item) + session.commit() + session.refresh(item) + + if upload_meta and old_filename and old_filename != upload_meta["zip_filename"]: + old_path = os.path.join(_skill_market_root(), old_filename) + if os.path.exists(old_path): + os.remove(old_path) + + installs = session.exec(select(BotSkillInstall).where(BotSkillInstall.skill_market_item_id == skill_id)).all() + install_count = sum(1 for row in installs if row.status == "INSTALLED") + return _serialize_skill_market_item(item, install_count=install_count) + + +def delete_skill_market_item_record(session: Session, *, skill_id: int) -> Dict[str, Any]: + item = session.get(SkillMarketItem, skill_id) + if not item: + raise HTTPException(status_code=404, detail="Skill market item not found") + zip_filename = str(item.zip_filename or "").strip() + installs = session.exec(select(BotSkillInstall).where(BotSkillInstall.skill_market_item_id == skill_id)).all() + for row in installs: + session.delete(row) + session.delete(item) + session.commit() + if zip_filename: + zip_path = os.path.join(_skill_market_root(), zip_filename) + if os.path.exists(zip_path): + os.remove(zip_path) + return {"status": "deleted", "id": skill_id} + + +def list_bot_skill_market_items(session: Session, *, bot_id: str) -> List[Dict[str, Any]]: + items = session.exec(select(SkillMarketItem).order_by(SkillMarketItem.display_name, SkillMarketItem.id)).all() + install_rows = session.exec(select(BotSkillInstall).where(BotSkillInstall.bot_id == bot_id)).all() + install_lookup = {int(row.skill_market_item_id): row for row in install_rows} + install_count_by_skill = _build_install_count_by_skill(session) + return [ + _serialize_skill_market_item( + item, + install_count=install_count_by_skill.get(int(item.id or 0), 0), + install_row=install_lookup.get(int(item.id or 0)), + workspace_installed=( + None + if install_lookup.get(int(item.id or 0)) is None + else ( + install_lookup[int(item.id or 0)].status == "INSTALLED" + and all( + os.path.exists(os.path.join(_skills_root(bot_id), name)) + for name in _parse_json_string_list(install_lookup[int(item.id or 0)].installed_entries_json) + ) + ) + ), + installed_entries=( + None + if install_lookup.get(int(item.id or 0)) is None + else _parse_json_string_list(install_lookup[int(item.id or 0)].installed_entries_json) + ), + ) + for item in items + ] + + +def install_skill_market_item_for_bot( + session: Session, + *, + bot_id: str, + skill_id: int, +) -> Dict[str, Any]: + item = session.get(SkillMarketItem, skill_id) + if not item: + raise HTTPException(status_code=404, detail="Skill market item not found") + + zip_path = os.path.join(_skill_market_root(), str(item.zip_filename or "")) + if not os.path.isfile(zip_path): + raise HTTPException(status_code=404, detail="Skill zip package not found") + + install_row = session.exec( + select(BotSkillInstall).where( + BotSkillInstall.bot_id == bot_id, + BotSkillInstall.skill_market_item_id == skill_id, + ) + ).first() + + try: + install_result = _install_skill_zip_into_workspace(bot_id, zip_path) + now = datetime.utcnow() + if not install_row: + install_row = BotSkillInstall( + bot_id=bot_id, + skill_market_item_id=skill_id, + ) + install_row.installed_entries_json = json.dumps(install_result["installed"], ensure_ascii=False) + install_row.source_zip_filename = str(item.zip_filename or "") + install_row.status = "INSTALLED" + install_row.last_error = None + install_row.installed_at = now + install_row.updated_at = now + session.add(install_row) + session.commit() + session.refresh(install_row) + return { + "status": "installed", + "bot_id": bot_id, + "skill_market_item_id": skill_id, + "installed": install_result["installed"], + "skills": install_result["skills"], + "market_item": _serialize_skill_market_item(item, install_count=0, install_row=install_row), + } + except HTTPException as exc: + now = datetime.utcnow() + if not install_row: + install_row = BotSkillInstall( + bot_id=bot_id, + skill_market_item_id=skill_id, + installed_at=now, + ) + install_row.source_zip_filename = str(item.zip_filename or "") + install_row.status = "FAILED" + install_row.last_error = str(exc.detail or "Install failed") + install_row.updated_at = now + session.add(install_row) + session.commit() + raise + except Exception as exc: + now = datetime.utcnow() + if not install_row: + install_row = BotSkillInstall( + bot_id=bot_id, + skill_market_item_id=skill_id, + installed_at=now, + ) + install_row.source_zip_filename = str(item.zip_filename or "") + install_row.status = "FAILED" + install_row.last_error = str(exc or "Install failed")[:1000] + install_row.updated_at = now + session.add(install_row) + session.commit() + raise HTTPException(status_code=500, detail="Skill install failed unexpectedly") from exc diff --git a/backend/services/skill_service.py b/backend/services/skill_service.py new file mode 100644 index 0000000..c4af051 --- /dev/null +++ b/backend/services/skill_service.py @@ -0,0 +1,199 @@ +import shutil +import zipfile +import tempfile +from datetime import datetime +import os +from typing import Any, Dict, List, Optional + +from fastapi import HTTPException, UploadFile + +from core.utils import ( + _is_ignored_skill_zip_top_level, + _is_valid_top_level_skill_name, +) +from services.bot_storage_service import _workspace_root +from services.platform_service import get_platform_settings_snapshot + + +def _skills_root(bot_id: str) -> str: + return os.path.join(_workspace_root(bot_id), "skills") + +def _read_skill_description(entry_path: str) -> str: + candidates: List[str] = [] + if os.path.isdir(entry_path): + candidates = [ + os.path.join(entry_path, "SKILL.md"), + os.path.join(entry_path, "skill.md"), + os.path.join(entry_path, "README.md"), + os.path.join(entry_path, "readme.md"), + ] + elif entry_path.lower().endswith(".md"): + candidates = [entry_path] + + for candidate in candidates: + if not os.path.isfile(candidate): + continue + try: + with open(candidate, "r", encoding="utf-8") as f: + for line in f: + text = line.strip() + if text and not text.startswith("#"): + return text[:240] + except Exception: + continue + return "" + +def _list_workspace_skills(bot_id: str) -> List[Dict[str, Any]]: + root = _skills_root(bot_id) + os.makedirs(root, exist_ok=True) + rows: List[Dict[str, Any]] = [] + names = sorted(os.listdir(root), key=lambda n: (not os.path.isdir(os.path.join(root, n)), n.lower())) + for name in names: + if not name or name.startswith("."): + continue + if not _is_valid_top_level_skill_name(name): + continue + abs_path = os.path.join(root, name) + if not os.path.exists(abs_path): + continue + stat = os.stat(abs_path) + rows.append( + { + "id": name, + "name": name, + "type": "dir" if os.path.isdir(abs_path) else "file", + "path": f"skills/{name}", + "size": stat.st_size if os.path.isfile(abs_path) else None, + "mtime": datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z", + "description": _read_skill_description(abs_path), + } + ) + return rows + +def _install_skill_zip_into_workspace(bot_id: str, zip_path: str) -> Dict[str, Any]: + try: + archive = zipfile.ZipFile(zip_path) + except Exception as exc: + raise HTTPException(status_code=400, detail="Invalid zip file") from exc + + skills_root = _skills_root(bot_id) + os.makedirs(skills_root, exist_ok=True) + + installed: List[str] = [] + with archive: + members = archive.infolist() + file_members = [m for m in members if not m.is_dir()] + if not file_members: + raise HTTPException(status_code=400, detail="Zip package has no files") + + top_names: List[str] = [] + for member in file_members: + raw_name = str(member.filename or "").replace("\\", "/").lstrip("/") + if not raw_name: + continue + first = raw_name.split("/", 1)[0].strip() + if _is_ignored_skill_zip_top_level(first): + continue + if not _is_valid_top_level_skill_name(first): + raise HTTPException(status_code=400, detail=f"Invalid skill entry name in zip: {first}") + if first not in top_names: + top_names.append(first) + + if not top_names: + raise HTTPException(status_code=400, detail="Zip package has no valid skill entries") + + conflicts = [name for name in top_names if os.path.exists(os.path.join(skills_root, name))] + if conflicts: + raise HTTPException(status_code=400, detail=f"Skill already exists: {', '.join(conflicts)}") + + with tempfile.TemporaryDirectory(prefix=".skill_upload_", dir=skills_root) as tmp_dir: + tmp_root = os.path.abspath(tmp_dir) + for member in members: + raw_name = str(member.filename or "").replace("\\", "/").lstrip("/") + if not raw_name: + continue + target = os.path.abspath(os.path.join(tmp_root, raw_name)) + if os.path.commonpath([tmp_root, target]) != tmp_root: + raise HTTPException(status_code=400, detail=f"Unsafe zip entry path: {raw_name}") + if member.is_dir(): + os.makedirs(target, exist_ok=True) + continue + os.makedirs(os.path.dirname(target), exist_ok=True) + with archive.open(member, "r") as source, open(target, "wb") as dest: + shutil.copyfileobj(source, dest) + + for name in top_names: + src = os.path.join(tmp_root, name) + dst = os.path.join(skills_root, name) + if not os.path.exists(src): + continue + shutil.move(src, dst) + installed.append(name) + + if not installed: + raise HTTPException(status_code=400, detail="No skill entries installed from zip") + + return { + "installed": installed, + "skills": _list_workspace_skills(bot_id), + } + + +def list_bot_skills(bot_id: str) -> List[Dict[str, Any]]: + return _list_workspace_skills(bot_id) + + +async def upload_bot_skill_zip_to_workspace(bot_id: str, *, upload: UploadFile) -> Dict[str, Any]: + tmp_zip_path: Optional[str] = None + try: + with tempfile.NamedTemporaryFile(prefix=".skill_upload_", suffix=".zip", delete=False) as tmp_zip: + tmp_zip_path = tmp_zip.name + filename = str(upload.filename or "").strip() + if not filename.lower().endswith(".zip"): + raise HTTPException(status_code=400, detail="Only .zip skill package is supported") + max_bytes = get_platform_settings_snapshot().upload_max_mb * 1024 * 1024 + total_size = 0 + while True: + chunk = await upload.read(1024 * 1024) + if not chunk: + break + total_size += len(chunk) + if total_size > max_bytes: + raise HTTPException( + status_code=413, + detail=f"Zip package too large (max {max_bytes // (1024 * 1024)}MB)", + ) + tmp_zip.write(chunk) + if total_size == 0: + raise HTTPException(status_code=400, detail="Zip package is empty") + finally: + await upload.close() + try: + install_result = _install_skill_zip_into_workspace(bot_id, tmp_zip_path) + finally: + if tmp_zip_path and os.path.exists(tmp_zip_path): + os.remove(tmp_zip_path) + + return { + "status": "installed", + "bot_id": bot_id, + "installed": install_result["installed"], + "skills": install_result["skills"], + } + + +def delete_workspace_skill_entry(bot_id: str, *, skill_name: str) -> Dict[str, Any]: + name = str(skill_name or "").strip() + if not _is_valid_top_level_skill_name(name): + raise HTTPException(status_code=400, detail="Invalid skill name") + root = _skills_root(bot_id) + target = os.path.abspath(os.path.join(root, name)) + if os.path.commonpath([os.path.abspath(root), target]) != os.path.abspath(root): + raise HTTPException(status_code=400, detail="Invalid skill path") + if not os.path.exists(target): + raise HTTPException(status_code=404, detail="Skill not found in workspace") + if os.path.isdir(target): + shutil.rmtree(target, ignore_errors=False) + else: + os.remove(target) + return {"status": "deleted", "bot_id": bot_id, "skill": name} diff --git a/backend/services/speech_transcribe_service.py b/backend/services/speech_transcribe_service.py new file mode 100644 index 0000000..a559f04 --- /dev/null +++ b/backend/services/speech_transcribe_service.py @@ -0,0 +1,99 @@ +import asyncio +import os +import tempfile +from typing import Any, Dict, Optional + +from fastapi import HTTPException, UploadFile +from sqlmodel import Session + +from core.settings import DATA_ROOT +from core.speech_service import ( + SpeechDisabledError, + SpeechDurationError, + SpeechServiceError, + WhisperSpeechService, +) +from models.bot import BotInstance +from services.platform_service import get_speech_runtime_settings + + +async def transcribe_bot_speech_upload( + session: Session, + bot_id: str, + upload: UploadFile, + language: Optional[str], + speech_service: WhisperSpeechService, + logger: Any, +) -> Dict[str, Any]: + bot = session.get(BotInstance, bot_id) + if not bot: + raise HTTPException(status_code=404, detail="Bot not found") + speech_settings = get_speech_runtime_settings() + if not speech_settings["enabled"]: + raise HTTPException(status_code=400, detail="Speech recognition is disabled") + if not upload: + raise HTTPException(status_code=400, detail="no audio file uploaded") + + original_name = str(upload.filename or "audio.webm").strip() or "audio.webm" + safe_name = os.path.basename(original_name).replace("\\", "_").replace("/", "_") + ext = os.path.splitext(safe_name)[1].strip().lower() or ".webm" + if len(ext) > 12: + ext = ".webm" + + tmp_path = "" + try: + with tempfile.NamedTemporaryFile(delete=False, suffix=ext, prefix=".speech_", dir=DATA_ROOT) as tmp: + tmp_path = tmp.name + while True: + chunk = await upload.read(1024 * 1024) + if not chunk: + break + tmp.write(chunk) + + if not tmp_path or not os.path.exists(tmp_path) or os.path.getsize(tmp_path) <= 0: + raise HTTPException(status_code=400, detail="audio payload is empty") + + resolved_language = str(language or "").strip() or speech_settings["default_language"] + result = await asyncio.to_thread(speech_service.transcribe_file, tmp_path, resolved_language) + text = str(result.get("text") or "").strip() + if not text: + raise HTTPException(status_code=400, detail="No speech detected") + return { + "bot_id": bot_id, + "text": text, + "duration_seconds": result.get("duration_seconds"), + "max_audio_seconds": speech_settings["max_audio_seconds"], + "model": speech_settings["model"], + "device": speech_settings["device"], + "language": result.get("language") or resolved_language, + } + except SpeechDisabledError as exc: + logger.warning("speech transcribe disabled bot_id=%s file=%s language=%s detail=%s", bot_id, safe_name, language, exc) + raise HTTPException(status_code=400, detail=str(exc)) from exc + except SpeechDurationError as exc: + logger.warning( + "speech transcribe too long bot_id=%s file=%s language=%s max_seconds=%s", + bot_id, + safe_name, + language, + speech_settings["max_audio_seconds"], + ) + raise HTTPException(status_code=413, detail=f"Audio duration exceeds {speech_settings['max_audio_seconds']} seconds") from exc + except SpeechServiceError as exc: + logger.exception("speech transcribe failed bot_id=%s file=%s language=%s", bot_id, safe_name, language) + raise HTTPException(status_code=400, detail=str(exc)) from exc + except HTTPException: + raise + except Exception as exc: + logger.exception("speech transcribe unexpected error bot_id=%s file=%s language=%s", bot_id, safe_name, language) + raise HTTPException(status_code=500, detail=f"speech transcription failed: {exc}") from exc + finally: + try: + await upload.close() + except Exception: + pass + if tmp_path and os.path.exists(tmp_path): + try: + os.remove(tmp_path) + except Exception: + pass diff --git a/backend/services/template_service.py b/backend/services/template_service.py new file mode 100644 index 0000000..0ec0070 --- /dev/null +++ b/backend/services/template_service.py @@ -0,0 +1,71 @@ +from __future__ import annotations + +from typing import Any, Dict, List + +from core.settings import AGENT_MD_TEMPLATES_FILE, TOPIC_PRESETS_TEMPLATES_FILE + +TEMPLATE_KEYS = ("agents_md", "soul_md", "user_md", "tools_md", "identity_md") + + +def _load_json_object(path: str) -> Dict[str, Any]: + import json + + try: + with open(path, "r", encoding="utf-8") as file: + data = json.load(file) + if isinstance(data, dict): + return data + except Exception: + pass + return {} + + +def _normalize_md_text(value: Any) -> str: + return str(value or "").replace("\r\n", "\n").strip() + + +def _write_json_atomic(path: str, payload: Dict[str, Any]) -> None: + import json + import os + + os.makedirs(os.path.dirname(path), exist_ok=True) + tmp_path = f"{path}.tmp" + with open(tmp_path, "w", encoding="utf-8") as file: + json.dump(payload, file, ensure_ascii=False, indent=2) + os.replace(tmp_path, path) + + +def get_agent_md_templates() -> Dict[str, str]: + raw = _load_json_object(str(AGENT_MD_TEMPLATES_FILE)) + return {key: _normalize_md_text(raw.get(key)) for key in TEMPLATE_KEYS} + + +def get_topic_presets() -> Dict[str, Any]: + raw = _load_json_object(str(TOPIC_PRESETS_TEMPLATES_FILE)) + presets = raw.get("presets") + if not isinstance(presets, list): + return {"presets": []} + return {"presets": [dict(row) for row in presets if isinstance(row, dict)]} + + +def update_agent_md_templates(raw: Dict[str, Any]) -> Dict[str, str]: + payload = {key: _normalize_md_text(raw.get(key)) for key in TEMPLATE_KEYS} + _write_json_atomic(str(AGENT_MD_TEMPLATES_FILE), payload) + return payload + + +def update_topic_presets(raw: Dict[str, Any]) -> Dict[str, Any]: + presets = raw.get("presets") if isinstance(raw, dict) else None + if presets is None: + payload: Dict[str, List[Dict[str, Any]]] = {"presets": []} + elif isinstance(presets, list): + payload = {"presets": [dict(row) for row in presets if isinstance(row, dict)]} + else: + raise ValueError("topic_presets.presets must be an array") + _write_json_atomic(str(TOPIC_PRESETS_TEMPLATES_FILE), payload) + return payload + + +def get_agent_template_value(key: str) -> str: + return get_agent_md_templates().get(key, "") + diff --git a/backend/services/workspace_service.py b/backend/services/workspace_service.py new file mode 100644 index 0000000..487b8c2 --- /dev/null +++ b/backend/services/workspace_service.py @@ -0,0 +1,446 @@ +import mimetypes +import os +import re +from datetime import datetime +from typing import Any, Dict, Generator, List, Optional +from urllib.parse import quote + +from fastapi import HTTPException, Request, UploadFile +from fastapi.responses import FileResponse, RedirectResponse, Response, StreamingResponse + +from core.utils import _workspace_stat_ctime_iso +from services.bot_storage_service import _workspace_root +from services.platform_service import get_platform_settings_snapshot + +TEXT_PREVIEW_EXTENSIONS = { + "", + ".md", + ".txt", + ".log", + ".json", + ".yaml", + ".yml", + ".cfg", + ".ini", + ".csv", + ".tsv", + ".toml", + ".py", + ".sh", +} + +MARKDOWN_EXTENSIONS = {".md", ".markdown"} + +def _resolve_workspace_path(bot_id: str, rel_path: Optional[str] = None) -> tuple[str, str]: + root = _workspace_root(bot_id) + rel = (rel_path or "").strip().replace("\\", "/") + target = os.path.abspath(os.path.join(root, rel)) + if os.path.commonpath([root, target]) != root: + raise HTTPException(status_code=400, detail="invalid workspace path") + return root, target + + +def _write_text_atomic(target: str, content: str) -> None: + os.makedirs(os.path.dirname(target), exist_ok=True) + tmp = f"{target}.tmp" + with open(tmp, "w", encoding="utf-8") as fh: + fh.write(content) + os.replace(tmp, target) + +def _build_workspace_tree(path: str, root: str, depth: int) -> List[Dict[str, Any]]: + rows: List[Dict[str, Any]] = [] + try: + names = sorted(os.listdir(path), key=lambda v: (not os.path.isdir(os.path.join(path, v)), v.lower())) + except FileNotFoundError: + return rows + + for name in names: + if name in {".DS_Store"}: + continue + abs_path = os.path.join(path, name) + rel_path = os.path.relpath(abs_path, root).replace("\\", "/") + stat = os.stat(abs_path) + base: Dict[str, Any] = { + "name": name, + "path": rel_path, + "ctime": _workspace_stat_ctime_iso(stat), + "mtime": datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z", + } + if os.path.isdir(abs_path): + node = {**base, "type": "dir"} + if depth > 0: + node["children"] = _build_workspace_tree(abs_path, root, depth - 1) + rows.append(node) + continue + rows.append( + { + **base, + "type": "file", + "size": stat.st_size, + "ext": os.path.splitext(name)[1].lower(), + } + ) + return rows + +def _list_workspace_dir(path: str, root: str) -> List[Dict[str, Any]]: + rows: List[Dict[str, Any]] = [] + names = sorted(os.listdir(path), key=lambda v: (not os.path.isdir(os.path.join(path, v)), v.lower())) + for name in names: + if name in {".DS_Store"}: + continue + abs_path = os.path.join(path, name) + rel_path = os.path.relpath(abs_path, root).replace("\\", "/") + stat = os.stat(abs_path) + rows.append( + { + "name": name, + "path": rel_path, + "type": "dir" if os.path.isdir(abs_path) else "file", + "size": stat.st_size if os.path.isfile(abs_path) else None, + "ext": os.path.splitext(name)[1].lower() if os.path.isfile(abs_path) else "", + "ctime": _workspace_stat_ctime_iso(stat), + "mtime": datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z", + } + ) + return rows + +def _list_workspace_dir_recursive(path: str, root: str) -> List[Dict[str, Any]]: + rows: List[Dict[str, Any]] = [] + for walk_root, dirnames, filenames in os.walk(path): + dirnames.sort(key=lambda v: v.lower()) + filenames.sort(key=lambda v: v.lower()) + + for name in dirnames: + if name in {".DS_Store"}: + continue + abs_path = os.path.join(walk_root, name) + rel_path = os.path.relpath(abs_path, root).replace("\\", "/") + stat = os.stat(abs_path) + rows.append( + { + "name": name, + "path": rel_path, + "type": "dir", + "size": None, + "ext": "", + "ctime": _workspace_stat_ctime_iso(stat), + "mtime": datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z", + } + ) + + for name in filenames: + if name in {".DS_Store"}: + continue + abs_path = os.path.join(walk_root, name) + rel_path = os.path.relpath(abs_path, root).replace("\\", "/") + stat = os.stat(abs_path) + rows.append( + { + "name": name, + "path": rel_path, + "type": "file", + "size": stat.st_size, + "ext": os.path.splitext(name)[1].lower(), + "ctime": _workspace_stat_ctime_iso(stat), + "mtime": datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z", + } + ) + + rows.sort(key=lambda v: (v.get("type") != "dir", str(v.get("path", "")).lower())) + return rows + + +def _stream_file_range(target: str, start: int, end: int, chunk_size: int = 1024 * 1024) -> Generator[bytes, None, None]: + with open(target, "rb") as fh: + fh.seek(start) + remaining = end - start + 1 + while remaining > 0: + chunk = fh.read(min(chunk_size, remaining)) + if not chunk: + break + remaining -= len(chunk) + yield chunk + + +def _build_ranged_workspace_response(target: str, media_type: str, range_header: str) -> Response: + file_size = os.path.getsize(target) + range_match = re.match(r"bytes=(\d*)-(\d*)", range_header.strip()) + if not range_match: + raise HTTPException(status_code=416, detail="Invalid range") + + start_raw, end_raw = range_match.groups() + if start_raw == "" and end_raw == "": + raise HTTPException(status_code=416, detail="Invalid range") + + if start_raw == "": + length = int(end_raw) + if length <= 0: + raise HTTPException(status_code=416, detail="Invalid range") + start = max(file_size - length, 0) + end = file_size - 1 + else: + start = int(start_raw) + end = int(end_raw) if end_raw else file_size - 1 + + if start >= file_size or start < 0: + raise HTTPException(status_code=416, detail="Requested range not satisfiable") + end = min(end, file_size - 1) + if end < start: + raise HTTPException(status_code=416, detail="Requested range not satisfiable") + + content_length = end - start + 1 + headers = { + "Accept-Ranges": "bytes", + "Content-Range": f"bytes {start}-{end}/{file_size}", + "Content-Length": str(content_length), + } + return StreamingResponse( + _stream_file_range(target, start, end), + status_code=206, + media_type=media_type or "application/octet-stream", + headers=headers, + ) + + +def _build_workspace_raw_url(bot_id: str, path: str, public: bool) -> str: + normalized = "/".join(part for part in str(path or "").strip().split("/") if part) + if not normalized: + return "" + prefix = "/public" if public else "/api" + return f"{prefix}/bots/{quote(bot_id, safe='')}/workspace/raw/{quote(normalized, safe='/')}" + + +def _serve_workspace_file( + *, + bot_id: str, + path: str, + download: bool, + request: Request, + public: bool = False, + redirect_html_to_raw: bool = False, +) -> Response: + _root, target = _resolve_workspace_path(bot_id, path) + if not os.path.isfile(target): + raise HTTPException(status_code=404, detail="File not found") + + media_type, _ = mimetypes.guess_type(target) + if redirect_html_to_raw and not download and str(media_type or "").startswith("text/html"): + raw_url = _build_workspace_raw_url(bot_id, path, public=public) + if raw_url: + return RedirectResponse(url=raw_url, status_code=307) + range_header = request.headers.get("range", "") if request else "" + if range_header and not download: + return _build_ranged_workspace_response(target, media_type or "application/octet-stream", range_header) + + common_headers = {"Accept-Ranges": "bytes"} + if download: + return FileResponse( + target, + media_type=media_type or "application/octet-stream", + filename=os.path.basename(target), + headers=common_headers, + ) + return FileResponse(target, media_type=media_type or "application/octet-stream", headers=common_headers) + + +def get_workspace_tree_data( + bot_id: str, + *, + path: Optional[str] = None, + recursive: bool = False, +) -> Dict[str, Any]: + root = _workspace_root(bot_id) + if not os.path.isdir(root): + return {"bot_id": bot_id, "root": root, "cwd": "", "parent": None, "entries": []} + + _, target = _resolve_workspace_path(bot_id, path) + if not os.path.isdir(target): + raise HTTPException(status_code=400, detail="workspace path is not a directory") + + cwd = os.path.relpath(target, root).replace("\\", "/") + if cwd == ".": + cwd = "" + + parent = None + if cwd: + parent = os.path.dirname(cwd).replace("\\", "/") + if parent == ".": + parent = "" + + return { + "bot_id": bot_id, + "root": root, + "cwd": cwd, + "parent": parent, + "entries": _list_workspace_dir_recursive(target, root) if recursive else _list_workspace_dir(target, root), + } + + +def read_workspace_text_file( + bot_id: str, + *, + path: str, + max_bytes: int = 200000, +) -> Dict[str, Any]: + root, target = _resolve_workspace_path(bot_id, path) + if not os.path.isfile(target): + raise HTTPException(status_code=404, detail="workspace file not found") + + ext = os.path.splitext(target)[1].lower() + if ext not in TEXT_PREVIEW_EXTENSIONS: + raise HTTPException(status_code=400, detail=f"unsupported file type: {ext or '(none)'}") + + safe_max = max(4096, min(int(max_bytes), 1000000)) + with open(target, "rb") as file: + raw = file.read(safe_max + 1) + + if b"\x00" in raw: + raise HTTPException(status_code=400, detail="binary file is not previewable") + + truncated = len(raw) > safe_max + body = raw[:safe_max] if truncated else raw + rel_path = os.path.relpath(target, root).replace("\\", "/") + + return { + "bot_id": bot_id, + "path": rel_path, + "size": os.path.getsize(target), + "is_markdown": ext in MARKDOWN_EXTENSIONS, + "truncated": truncated, + "content": body.decode("utf-8", errors="replace"), + } + + +def update_workspace_markdown_file( + bot_id: str, + *, + path: str, + content: str, +) -> Dict[str, Any]: + root, target = _resolve_workspace_path(bot_id, path) + if not os.path.isfile(target): + raise HTTPException(status_code=404, detail="workspace file not found") + + ext = os.path.splitext(target)[1].lower() + if ext not in MARKDOWN_EXTENSIONS: + raise HTTPException( + status_code=400, + detail=f"editing is only supported for markdown files: {ext or '(none)'}", + ) + + normalized_content = str(content or "") + encoded = normalized_content.encode("utf-8") + if len(encoded) > 2_000_000: + raise HTTPException(status_code=413, detail="markdown file too large to save") + if "\x00" in normalized_content: + raise HTTPException(status_code=400, detail="markdown content contains invalid null bytes") + + _write_text_atomic(target, normalized_content) + rel_path = os.path.relpath(target, root).replace("\\", "/") + return { + "bot_id": bot_id, + "path": rel_path, + "size": os.path.getsize(target), + "is_markdown": True, + "truncated": False, + "content": normalized_content, + } + + +def serve_workspace_file( + *, + bot_id: str, + path: str, + download: bool, + request: Request, + public: bool = False, + redirect_html_to_raw: bool = False, +) -> Response: + return _serve_workspace_file( + bot_id=bot_id, + path=path, + download=download, + request=request, + public=public, + redirect_html_to_raw=redirect_html_to_raw, + ) + + +def _sanitize_upload_filename(original_name: str) -> str: + name = os.path.basename(original_name).replace("\\", "_").replace("/", "_") + name = re.sub(r"[^\w.\-()+@ ]+", "_", name) + return name or "upload.bin" + + +async def upload_workspace_files_to_workspace( + bot_id: str, + *, + files: List[UploadFile], + path: Optional[str] = None, +) -> Dict[str, Any]: + if not files: + raise HTTPException(status_code=400, detail="no files uploaded") + + platform_settings = get_platform_settings_snapshot() + max_bytes = platform_settings.upload_max_mb * 1024 * 1024 + allowed_extensions = set(platform_settings.allowed_attachment_extensions) + + root, upload_dir = _resolve_workspace_path(bot_id, path or "uploads") + os.makedirs(upload_dir, exist_ok=True) + safe_dir_real = os.path.abspath(upload_dir) + if os.path.commonpath([root, safe_dir_real]) != root: + raise HTTPException(status_code=400, detail="invalid upload target path") + + rows: List[Dict[str, Any]] = [] + for upload in files: + original = (upload.filename or "upload.bin").strip() or "upload.bin" + name = _sanitize_upload_filename(original) + ext = str(os.path.splitext(name)[1] or "").strip().lower() + if allowed_extensions and ext not in allowed_extensions: + raise HTTPException( + status_code=400, + detail=f"File '{name}' extension is not allowed. Allowed: {', '.join(sorted(allowed_extensions))}", + ) + + abs_path = os.path.join(safe_dir_real, name) + if os.path.exists(abs_path): + base, file_ext = os.path.splitext(name) + name = f"{base}-{int(datetime.utcnow().timestamp())}{file_ext}" + abs_path = os.path.join(safe_dir_real, name) + + total_size = 0 + try: + with open(abs_path, "wb") as file: + while True: + chunk = await upload.read(1024 * 1024) + if not chunk: + break + total_size += len(chunk) + if total_size > max_bytes: + raise HTTPException( + status_code=413, + detail=f"File '{name}' too large (max {max_bytes // (1024 * 1024)}MB)", + ) + file.write(chunk) + except HTTPException: + if os.path.exists(abs_path): + os.remove(abs_path) + raise + except OSError as exc: + if os.path.exists(abs_path): + os.remove(abs_path) + raise HTTPException( + status_code=500, + detail=f"Failed to write file '{name}': {exc.strerror or str(exc)}", + ) + except Exception: + if os.path.exists(abs_path): + os.remove(abs_path) + raise HTTPException(status_code=500, detail=f"Failed to upload file '{name}'") + finally: + await upload.close() + + rel_path = os.path.relpath(abs_path, root).replace("\\", "/") + rows.append({"name": name, "path": rel_path, "size": total_size}) + + return {"bot_id": bot_id, "files": rows} diff --git a/bot-images/litellm_provider.py b/bot-images/litellm_provider.py deleted file mode 100644 index d14e4c0..0000000 --- a/bot-images/litellm_provider.py +++ /dev/null @@ -1,355 +0,0 @@ -"""LiteLLM provider implementation for multi-provider support.""" - -import hashlib -import os -import secrets -import string -from typing import Any - -import json_repair -import litellm -from litellm import acompletion -from loguru import logger - -from nanobot.providers.base import LLMProvider, LLMResponse, ToolCallRequest -from nanobot.providers.registry import find_by_model, find_gateway - -# Standard chat-completion message keys. -_ALLOWED_MSG_KEYS = frozenset({"role", "content", "tool_calls", "tool_call_id", "name", "reasoning_content"}) -_ANTHROPIC_EXTRA_KEYS = frozenset({"thinking_blocks"}) -_ALNUM = string.ascii_letters + string.digits - -def _short_tool_id() -> str: - """Generate a 9-char alphanumeric ID compatible with all providers (incl. Mistral).""" - return "".join(secrets.choice(_ALNUM) for _ in range(9)) - - -class LiteLLMProvider(LLMProvider): - """ - LLM provider using LiteLLM for multi-provider support. - - Supports OpenRouter, Anthropic, OpenAI, Gemini, MiniMax, and many other providers through - a unified interface. Provider-specific logic is driven by the registry - (see providers/registry.py) — no if-elif chains needed here. - """ - - def __init__( - self, - api_key: str | None = None, - api_base: str | None = None, - default_model: str = "anthropic/claude-opus-4-5", - extra_headers: dict[str, str] | None = None, - provider_name: str | None = None, - ): - super().__init__(api_key, api_base) - self.default_model = default_model - self.extra_headers = extra_headers or {} - - # Detect gateway / local deployment. - # provider_name (from config key) is the primary signal; - # api_key / api_base are fallback for auto-detection. - self._gateway = find_gateway(provider_name, api_key, api_base) - - # Configure environment variables - if api_key: - self._setup_env(api_key, api_base, default_model) - - if api_base: - litellm.api_base = api_base - - # Disable LiteLLM logging noise - litellm.suppress_debug_info = True - # Drop unsupported parameters for providers (e.g., gpt-5 rejects some params) - litellm.drop_params = True - - self._langsmith_enabled = bool(os.getenv("LANGSMITH_API_KEY")) - - def _setup_env(self, api_key: str, api_base: str | None, model: str) -> None: - """Set environment variables based on detected provider.""" - spec = self._gateway or find_by_model(model) - if not spec: - return - if not spec.env_key: - # OAuth/provider-only specs (for example: openai_codex) - return - - # Gateway/local overrides existing env; standard provider doesn't - if self._gateway: - os.environ[spec.env_key] = api_key - else: - os.environ.setdefault(spec.env_key, api_key) - - # Resolve env_extras placeholders: - # {api_key} → user's API key - # {api_base} → user's api_base, falling back to spec.default_api_base - effective_base = api_base or spec.default_api_base - for env_name, env_val in spec.env_extras: - resolved = env_val.replace("{api_key}", api_key) - resolved = resolved.replace("{api_base}", effective_base) - os.environ.setdefault(env_name, resolved) - - def _resolve_model(self, model: str) -> str: - """Resolve model name by applying provider/gateway prefixes.""" - if self._gateway: - prefix = self._gateway.litellm_prefix - if self._gateway.strip_model_prefix: - model = model.split("/")[-1] - if prefix: - model = f"{prefix}/{model}" - return model - - # Standard mode: auto-prefix for known providers - spec = find_by_model(model) - if spec and spec.litellm_prefix: - model = self._canonicalize_explicit_prefix(model, spec.name, spec.litellm_prefix) - if not any(model.startswith(s) for s in spec.skip_prefixes): - model = f"{spec.litellm_prefix}/{model}" - - return model - - @staticmethod - def _canonicalize_explicit_prefix(model: str, spec_name: str, canonical_prefix: str) -> str: - """Normalize explicit provider prefixes like `github-copilot/...`.""" - if "/" not in model: - return model - prefix, remainder = model.split("/", 1) - if prefix.lower().replace("-", "_") != spec_name: - return model - return f"{canonical_prefix}/{remainder}" - - def _supports_cache_control(self, model: str) -> bool: - """Return True when the provider supports cache_control on content blocks.""" - if self._gateway is not None: - return self._gateway.supports_prompt_caching - spec = find_by_model(model) - return spec is not None and spec.supports_prompt_caching - - def _apply_cache_control( - self, - messages: list[dict[str, Any]], - tools: list[dict[str, Any]] | None, - ) -> tuple[list[dict[str, Any]], list[dict[str, Any]] | None]: - """Return copies of messages and tools with cache_control injected.""" - new_messages = [] - for msg in messages: - if msg.get("role") == "system": - content = msg["content"] - if isinstance(content, str): - new_content = [{"type": "text", "text": content, "cache_control": {"type": "ephemeral"}}] - else: - new_content = list(content) - new_content[-1] = {**new_content[-1], "cache_control": {"type": "ephemeral"}} - new_messages.append({**msg, "content": new_content}) - else: - new_messages.append(msg) - - new_tools = tools - if tools: - new_tools = list(tools) - new_tools[-1] = {**new_tools[-1], "cache_control": {"type": "ephemeral"}} - - return new_messages, new_tools - - def _apply_model_overrides(self, model: str, kwargs: dict[str, Any]) -> None: - """Apply model-specific parameter overrides from the registry.""" - model_lower = model.lower() - spec = find_by_model(model) - if spec: - for pattern, overrides in spec.model_overrides: - if pattern in model_lower: - kwargs.update(overrides) - return - - @staticmethod - def _extra_msg_keys(original_model: str, resolved_model: str) -> frozenset[str]: - """Return provider-specific extra keys to preserve in request messages.""" - spec = find_by_model(original_model) or find_by_model(resolved_model) - if (spec and spec.name == "anthropic") or "claude" in original_model.lower() or resolved_model.startswith("anthropic/"): - return _ANTHROPIC_EXTRA_KEYS - return frozenset() - - @staticmethod - def _normalize_tool_call_id(tool_call_id: Any) -> Any: - """Normalize tool_call_id to a provider-safe 9-char alphanumeric form.""" - if not isinstance(tool_call_id, str): - return tool_call_id - if len(tool_call_id) == 9 and tool_call_id.isalnum(): - return tool_call_id - return hashlib.sha1(tool_call_id.encode()).hexdigest()[:9] - - @staticmethod - def _sanitize_messages(messages: list[dict[str, Any]], extra_keys: frozenset[str] = frozenset()) -> list[dict[str, Any]]: - """Strip non-standard keys and ensure assistant messages have a content key.""" - allowed = _ALLOWED_MSG_KEYS | extra_keys - sanitized = LLMProvider._sanitize_request_messages(messages, allowed) - id_map: dict[str, str] = {} - - def map_id(value: Any) -> Any: - if not isinstance(value, str): - return value - return id_map.setdefault(value, LiteLLMProvider._normalize_tool_call_id(value)) - - for clean in sanitized: - # Keep assistant tool_calls[].id and tool tool_call_id in sync after - # shortening, otherwise strict providers reject the broken linkage. - if isinstance(clean.get("tool_calls"), list): - normalized_tool_calls = [] - for tc in clean["tool_calls"]: - if not isinstance(tc, dict): - normalized_tool_calls.append(tc) - continue - tc_clean = dict(tc) - tc_clean["id"] = map_id(tc_clean.get("id")) - normalized_tool_calls.append(tc_clean) - clean["tool_calls"] = normalized_tool_calls - - if "tool_call_id" in clean and clean["tool_call_id"]: - clean["tool_call_id"] = map_id(clean["tool_call_id"]) - return sanitized - - async def chat( - self, - messages: list[dict[str, Any]], - tools: list[dict[str, Any]] | None = None, - model: str | None = None, - max_tokens: int = 4096, - temperature: float = 0.7, - reasoning_effort: str | None = None, - tool_choice: str | dict[str, Any] | None = None, - ) -> LLMResponse: - """ - Send a chat completion request via LiteLLM. - - Args: - messages: List of message dicts with 'role' and 'content'. - tools: Optional list of tool definitions in OpenAI format. - model: Model identifier (e.g., 'anthropic/claude-sonnet-4-5'). - max_tokens: Maximum tokens in response. - temperature: Sampling temperature. - - Returns: - LLMResponse with content and/or tool calls. - """ - original_model = model or self.default_model - model = self._resolve_model(original_model) - extra_msg_keys = self._extra_msg_keys(original_model, model) - - if self._supports_cache_control(original_model): - messages, tools = self._apply_cache_control(messages, tools) - - # Clamp max_tokens to at least 1 — negative or zero values cause - # LiteLLM to reject the request with "max_tokens must be at least 1". - max_tokens = max(1, max_tokens) - - kwargs: dict[str, Any] = { - "model": model, - "messages": self._sanitize_messages(self._sanitize_empty_content(messages), extra_keys=extra_msg_keys), - "max_tokens": max_tokens, - "temperature": temperature, - } - - if self._gateway: - kwargs.update(self._gateway.litellm_kwargs) - - # Apply model-specific overrides (e.g. kimi-k2.5 temperature) - self._apply_model_overrides(model, kwargs) - - if self._langsmith_enabled: - kwargs.setdefault("callbacks", []).append("langsmith") - - # Pass api_key directly — more reliable than env vars alone - if self.api_key: - kwargs["api_key"] = self.api_key - - # Pass api_base for custom endpoints - if self.api_base: - kwargs["api_base"] = self.api_base - - # Pass extra headers (e.g. APP-Code for AiHubMix) - if self.extra_headers: - kwargs["extra_headers"] = self.extra_headers - - if reasoning_effort: - kwargs["reasoning_effort"] = reasoning_effort - kwargs["drop_params"] = True - - if tools: - kwargs["tools"] = tools - kwargs["tool_choice"] = tool_choice or "auto" - - try: - response = await acompletion(**kwargs) - return self._parse_response(response) - except Exception as e: - # Return error as content for graceful handling - return LLMResponse( - content=f"Error calling LLM: {str(e)}", - finish_reason="error", - ) - - def _parse_response(self, response: Any) -> LLMResponse: - """Parse LiteLLM response into our standard format.""" - choice = response.choices[0] - message = choice.message - content = message.content - finish_reason = choice.finish_reason - - # Some providers (e.g. GitHub Copilot) split content and tool_calls - # across multiple choices. Merge them so tool_calls are not lost. - raw_tool_calls = [] - for ch in response.choices: - msg = ch.message - if hasattr(msg, "tool_calls") and msg.tool_calls: - raw_tool_calls.extend(msg.tool_calls) - if ch.finish_reason in ("tool_calls", "stop"): - finish_reason = ch.finish_reason - if not content and msg.content: - content = msg.content - - if len(response.choices) > 1: - logger.debug("LiteLLM response has {} choices, merged {} tool_calls", - len(response.choices), len(raw_tool_calls)) - - tool_calls = [] - for tc in raw_tool_calls: - # Parse arguments from JSON string if needed - args = tc.function.arguments - if isinstance(args, str): - args = json_repair.loads(args) - - provider_specific_fields = getattr(tc, "provider_specific_fields", None) or None - function_provider_specific_fields = ( - getattr(tc.function, "provider_specific_fields", None) or None - ) - - tool_calls.append(ToolCallRequest( - id=_short_tool_id(), - name=tc.function.name, - arguments=args, - provider_specific_fields=provider_specific_fields, - function_provider_specific_fields=function_provider_specific_fields, - )) - - usage = {} - if hasattr(response, "usage") and response.usage: - usage = { - "prompt_tokens": response.usage.prompt_tokens, - "completion_tokens": response.usage.completion_tokens, - "total_tokens": response.usage.total_tokens, - } - - reasoning_content = getattr(message, "reasoning_content", None) or None - thinking_blocks = getattr(message, "thinking_blocks", None) or None - - return LLMResponse( - content=content, - tool_calls=tool_calls, - finish_reason=finish_reason or "stop", - usage=usage, - reasoning_content=reasoning_content, - thinking_blocks=thinking_blocks, - ) - - def get_default_model(self) -> str: - """Get the default model.""" - return self.default_model diff --git a/design/code-structure-standards.md b/design/code-structure-standards.md new file mode 100644 index 0000000..5472953 --- /dev/null +++ b/design/code-structure-standards.md @@ -0,0 +1,304 @@ +# Dashboard Nanobot 代码结构规范(强制执行) + +本文档定义后续前端、后端、`dashboard-edge` 的结构边界与拆分规则。 + +目标不是“尽可能多拆文件”,而是: + +- 保持装配层足够薄 +- 保持业务边界清晰 +- 避免再次出现单文件多职责膨胀 +- 让后续迭代继续走低风险、小步验证路线 + +本文档自落地起作为**后续开发强制规范**执行。 + +--- + +## 1. 总原则 + +### 1.1 先分层,再分文件 + +- 优先先把“页面装配 / 业务编排 / 基础设施 / 纯视图”分开,再决定是否继续拆文件。 +- 不允许为了“看起来模块化”而把强耦合逻辑拆成大量碎文件。 +- 允许保留中等体量的“单主题控制器”文件,但不允许继续把多个主题堆进一个文件。 + +### 1.2 低风险重构优先 + +- 结构重构优先做“搬运与收口”,不顺手修改业务行为。 +- 同一轮改动里,默认**不要**同时做: + - 大规模结构调整 + - 新功能 + - 行为修复 +- 如果确实需要行为修复,只允许修复拆分直接引入的问题。 + +### 1.3 装配层必须薄 + +- 页面层、路由层、应用启动层都只负责装配。 +- 装配层可以做依赖注入、状态接线、事件转发。 +- 装配层不允许承载复杂业务判断、持久化细节、长流程编排。 + +### 1.4 新文件必须按主题命名 + +- 文件名必须直接表达职责。 +- 禁止模糊命名,例如: + - `helpers2.py` + - `misc.ts` + - `commonPage.tsx` + - `temp_service.py` + +--- + +## 2. 前端结构规范 + +### 2.1 目录分层 + +前端统一按以下层次组织: + +- `frontend/src/app` + - 应用壳、全局路由视图、全局初始化 +- `frontend/src/modules/` + - 领域模块入口 +- `frontend/src/modules//components` + - 纯视图组件、弹层、区块组件 +- `frontend/src/modules//hooks` + - 领域内控制器 hook、状态编排 hook +- `frontend/src/modules//api` + - 仅该领域使用的 API 请求封装 +- `frontend/src/modules//shared` + - 领域内共享的纯函数、常量、类型桥接 +- `frontend/src/components` + - 跨模块通用 UI 组件 +- `frontend/src/utils` + - 真正跨领域的通用工具 + +### 2.2 页面文件职责 + +页面文件如: + +- `frontend/src/modules/platform/PlatformDashboardPage.tsx` +- `frontend/src/modules/platform/NodeWorkspacePage.tsx` +- `frontend/src/modules/platform/NodeHomePage.tsx` + +必须遵守: + +- 只做页面装配 +- 只组织已有区块、弹层、控制器 hook +- 不直接承载长段 API 请求、副作用、数据清洗逻辑 + +页面文件目标体量: + +- 目标:`< 500` 行 +- 可接受上限:`800` 行 +- 超过 `800` 行必须优先拆出页面控制器 hook 或区块装配组件 + +### 2.3 控制器 hook 规范 + +控制器 hook 用于承载: + +- 页面状态 +- 副作用 +- API 调用编排 +- 事件处理 +- 派生数据 + +典型命名: + +- `useNodeHomePage` +- `useNodeWorkspacePage` +- `usePlatformDashboardPage` + +规则: + +- 一个 hook 只服务一个明确页面或一个明确子流程 +- hook 不直接产出大量 JSX +- hook 内部允许组合更小的子 hook,但不要为了拆分而拆分 + +控制器 hook 目标体量: + +- 目标:`< 800` 行 +- 可接受上限:`1000` 行 +- 超过 `1000` 行时,必须再按主题拆成子 hook 或把重复逻辑提到 `shared`/`api` + +### 2.4 视图组件规范 + +组件分为两类: + +- 区块组件:例如列表区、详情区、摘要卡片区 +- 弹层组件:例如 Drawer、Modal、Sheet + +规则: + +- 视图组件默认不直接请求接口 +- 视图组件只接收已经整理好的 props +- 纯视图组件内部不保留与页面强耦合的业务缓存 + +### 2.5 前端复用原则 + +- 优先提炼“稳定复用的模式”,不要提炼“碰巧重复一次的代码” +- 三处以上重复,优先考虑抽取 +- 同域复用优先放 `modules//shared` +- 跨域复用优先放 `src/components` 或 `src/utils` + +### 2.6 前端禁止事项 + +- 禁止再次把页面做成“一个文件管状态、接口、弹层、列表、详情、搜索、分页” +- 禁止把样式、业务逻辑、视图结构三者重新耦合回单文件 +- 禁止创建无明确职责的超通用组件 +- 禁止为减少行数而做不可读的过度抽象 + +--- + +## 3. 后端结构规范 + +### 3.1 目录分层 + +后端统一按以下边界组织: + +- `backend/main.py` + - 仅启动入口 +- `backend/app_factory.py` + - 应用实例创建 +- `backend/bootstrap` + - 依赖装配、应用初始化、生命周期拼装 +- `backend/api` + - FastAPI 路由层 +- `backend/services` + - 业务用例与领域服务 +- `backend/core` + - 数据库、缓存、配置、基础设施适配 +- `backend/models` + - ORM 模型 +- `backend/schemas` + - 请求/响应 DTO +- `backend/providers` + - runtime/workspace/provision 适配层 + +### 3.2 启动与装配层规范 + +以下文件必须保持装配层属性: + +- `backend/main.py` +- `backend/app_factory.py` +- `backend/bootstrap/app_runtime.py` + +规则: + +- 只做依赖创建、注入、路由注册、生命周期绑定 +- 不写业务 SQL +- 不写领域规则判断 +- 不写长流程编排 + +### 3.3 Router 规范 + +`backend/api/*.py` 只允许承担: + +- HTTP 参数接收 +- schema 校验 +- 调用 service +- 把领域异常转换成 HTTP 异常 + +Router 不允许承担: + +- 多步业务编排 +- 大量数据聚合 +- 数据库表间拼装 +- 本地文件系统读写细节 + +Router 文件体量规则: + +- 目标:`< 300` 行 +- 可接受上限:`400` 行 +- 超过 `400` 行必须拆成子 router,并由装配层统一 `include_router` + +### 3.4 Service 规范 + +Service 必须按业务主题拆分。 + +允许的 service 类型: + +- `*_settings_service.py` +- `*_usage_service.py` +- `*_activity_service.py` +- `*_analytics_service.py` +- `*_overview_service.py` +- `*_query_service.py` +- `*_command_service.py` +- `*_lifecycle_service.py` + +Service 文件规则: + +- 一个文件只负责一个主题 +- 同一文件内允许有私有 helper,但 helper 只能服务当前主题 +- 如果一个主题明显包含“读模型 + 写模型 + 统计 + 配置”,应继续拆为多个 service + +Service 体量规则: + +- 目标:`< 350` 行 +- 可接受上限:`500` 行 +- 超过 `500` 行必须继续拆 + +### 3.6 Schema 规范 + +- `schemas` 只定义 DTO +- 不允许在 schema 中直接读数据库、读文件、发网络请求 +- schema 字段演进必须保持前后端契约可追踪 + +### 3.7 Core 规范 + +`core` 只允许放: + +- 数据库与 Session 管理 +- 缓存 +- 配置 +- 基础设施适配器 + +不允许把领域业务塞回 `core` 来“躲避 service 变大”。 + +### 3.8 Provider 规范 + +`providers` 只处理运行时/工作区/部署目标差异。 + +不允许把平台业务逻辑塞进 provider。 + +--- + +## 4. 本项目后续开发的执行规则 + +### 4.1 每轮改动的默认顺序 + +1. 先审计职责边界 +2. 先做装配层变薄 +3. 再提炼稳定复用块 +4. 最后再考虑继续细拆 + +### 4.2 校验规则 + +- 前端结构改动后,默认执行 `frontend` 构建校验 +- 后端结构改动后,默认至少执行 `python3 -m py_compile` +- 如果改动触达运行时或边界协议,再考虑追加更高层验证 + +### 4.3 文档同步规则 + +以下情况必须同步设计文档: + +- 新增一层目录边界 +- 新增一个领域的标准拆法 +- 改变页面/服务的职责划分 +- 把兼容层正式降级为装配/导出层 + +### 4.4 禁止事项 + +- 禁止回到“大文件集中堆功能”的开发方式 +- 禁止为了图省事把新逻辑加回兼容层 +- 禁止在没有明确复用收益时过度抽象 +- 禁止在一次改动里同时重写 UI、重写数据流、重写接口协议 + +--- + +## 5. 当前执行基线(2026-03) + +当前结构治理目标分两层: + +- 第一层:主入口、页面入口、路由入口必须变薄 +- 第二层:领域内部的 service / hook / overlays / sections 必须按主题稳定收口 + +后续所有新增功能与重构,均以本文档为准执行。 diff --git a/fix_services.py b/fix_services.py new file mode 100644 index 0000000..cbe3f88 --- /dev/null +++ b/fix_services.py @@ -0,0 +1,22 @@ +import os + +def fix_sf(path): + with open(path, 'r') as f: + lines = f.readlines() + new_lines = [] + for line in lines: + if "from services.bot_service import _skills_root" in line: continue + if "from services.bot_service import _workspace_root" in line: continue + new_lines.append(line) + + # Add corrected imports at the top + if path.endswith("skill_service.py"): + new_lines.insert(20, "from services.bot_service import _skills_root, _workspace_root\n") + elif path.endswith("workspace_service.py"): + new_lines.insert(20, "from services.bot_service import _workspace_root\n") + + with open(path, 'w') as f: + f.writelines(new_lines) + +# Wait, if _skills_root is defined in skill_service.py, it shouldn't be imported from bot_service. +# Let's check where it IS defined. diff --git a/frontend/src/App.css b/frontend/src/App.css index 922fba2..a30f8a3 100644 --- a/frontend/src/App.css +++ b/frontend/src/App.css @@ -65,6 +65,16 @@ body { overflow: hidden; } +.app-layout { + min-height: calc(100vh - 36px); +} + +.app-layout.has-nav-rail { + display: grid; + grid-template-columns: 86px minmax(0, 1fr); + gap: 14px; +} + .app-frame { height: calc(100vh - 36px); display: grid; @@ -72,16 +82,102 @@ body { gap: 14px; } -.app-frame.app-frame-no-header { - grid-template-rows: 1fr; - gap: 0; -} - .app-shell-compact .app-frame { height: calc(100dvh - 36px); min-height: calc(100dvh - 36px); } +.app-nav-rail { + height: calc(100vh - 36px); + position: sticky; + top: 18px; + border: 1px solid var(--line); + border-radius: 18px; + background: color-mix(in oklab, var(--panel) 90%, transparent); + box-shadow: var(--shadow); + backdrop-filter: blur(6px); + display: flex; + flex-direction: column; + align-items: center; + gap: 18px; + padding: 16px 10px; +} + +.app-nav-rail-brand { + width: 52px; + height: 52px; + border-radius: 16px; + border: 1px solid color-mix(in oklab, var(--line) 72%, transparent); + background: color-mix(in oklab, var(--panel-soft) 80%, transparent); + display: inline-flex; + align-items: center; + justify-content: center; +} + +.app-nav-rail-brand-icon { + width: 26px; + height: 26px; + object-fit: contain; +} + +.app-nav-rail-groups { + width: 100%; + flex: 1 1 auto; + display: flex; + flex-direction: column; + gap: 18px; +} + +.app-nav-group { + display: flex; + flex-direction: column; + align-items: center; + gap: 10px; +} + +.app-nav-group-label { + font-size: 10px; + font-weight: 800; + letter-spacing: 0.16em; + text-transform: uppercase; + color: var(--muted); +} + +.app-nav-group-items { + width: 100%; + display: grid; + gap: 8px; + justify-items: center; +} + +.app-nav-rail-item { + width: 48px; + height: 48px; + border-radius: 14px; + border: 1px solid transparent; + background: transparent; + color: var(--icon-muted); + display: inline-flex; + align-items: center; + justify-content: center; + cursor: pointer; + transition: transform 0.18s ease, border-color 0.18s ease, background 0.18s ease, color 0.18s ease; +} + +.app-nav-rail-item:hover { + transform: translateY(-1px); + border-color: color-mix(in oklab, var(--brand) 46%, var(--line) 54%); + background: color-mix(in oklab, var(--brand-soft) 18%, transparent); + color: var(--icon); +} + +.app-nav-rail-item.is-active { + border-color: color-mix(in oklab, var(--brand) 68%, var(--line) 32%); + background: color-mix(in oklab, var(--brand) 20%, transparent); + color: var(--icon); + box-shadow: inset 0 0 0 1px color-mix(in oklab, var(--brand) 24%, transparent); +} + .app-header { background: var(--panel); border: 1px solid var(--line); @@ -137,10 +233,8 @@ body { } .app-title-main { - display: inline-flex; + display: flex; align-items: center; - gap: 8px; - flex-wrap: wrap; min-width: 0; } @@ -157,20 +251,28 @@ body { .app-title h1 { margin: 0; - font-size: 20px; - font-weight: 800; - color: var(--title); } -.app-header-top-bot-mobile .app-title-main { - flex-direction: column; - align-items: flex-start; - gap: 0; +.app-route-heading { + font-size: 15px; + font-weight: 800; + color: var(--title); + letter-spacing: 0.12em; + text-transform: uppercase; +} + +.app-route-heading.is-entity { + font-size: 18px; + letter-spacing: 0; + text-transform: none; } .app-header-top-bot-mobile .app-title h1 { font-size: 18px; line-height: 1.15; + color: var(--title); + letter-spacing: 0; + text-transform: none; } .app-title p { @@ -196,6 +298,25 @@ body { color: var(--brand); } +.app-route-title-row { + display: inline-flex; + align-items: center; + gap: 12px; +} + +.app-route-title-icon { + width: 34px; + height: 34px; + border-radius: 10px; + border: 1px solid color-mix(in oklab, var(--line) 72%, transparent); + background: color-mix(in oklab, var(--panel-soft) 78%, transparent); + color: var(--icon); + display: inline-flex; + align-items: center; + justify-content: center; + flex: 0 0 auto; +} + .global-switches { display: flex; gap: 8px; @@ -281,12 +402,13 @@ body { .main-stage { min-height: 0; height: 100%; + overflow: auto; } .app-shell-compact .main-stage { min-height: 0; height: 100%; - overflow: hidden; + overflow: auto; } .app-login-shell { @@ -376,22 +498,6 @@ body { height: 100%; } -.grid-ops { - display: grid; - grid-template-columns: 320px 1fr 360px; - gap: 12px; - height: 100%; -} - -.grid-ops.grid-ops-forced { - grid-template-columns: minmax(0, 1fr) 360px; -} - -.grid-ops.grid-ops-compact { - grid-template-columns: minmax(0, 1fr); - grid-template-rows: minmax(0, 1fr); -} - .stack { display: flex; flex-direction: column; @@ -635,15 +741,6 @@ body { overflow: auto; } -.wizard-image-list { - display: grid; - gap: 10px; -} - -.wizard-image-list .card { - margin: 0; -} - .table { width: 100%; border-collapse: collapse; @@ -682,337 +779,9 @@ body { font-family: 'SF Mono', Menlo, Consolas, monospace; } -.wizard-steps { - display: grid; - grid-template-columns: repeat(3, minmax(0, 1fr)); - gap: 8px; - margin-bottom: 12px; -} - -.wizard-shell { - background: - linear-gradient(160deg, color-mix(in oklab, var(--panel) 88%, var(--brand-soft) 12%) 0%, var(--panel) 100%); - min-height: 760px; -} - -.wizard-head { - padding: 2px 2px 4px; -} - -.wizard-steps-enhanced .wizard-step { - border-radius: 12px; - border-color: color-mix(in oklab, var(--line) 72%, transparent); - background: color-mix(in oklab, var(--panel-soft) 82%, transparent); - font-weight: 700; -} - -.wizard-steps-4 { - grid-template-columns: repeat(4, minmax(0, 1fr)); -} - -.wizard-step { - border: 1px solid var(--line); - border-radius: 10px; - padding: 8px; - font-size: 12px; - color: var(--muted); - background: color-mix(in oklab, var(--panel-soft) 80%, black 20%); -} - -.wizard-step.active { - color: var(--text); - border-color: color-mix(in oklab, var(--brand) 65%, var(--line) 35%); - background: color-mix(in oklab, var(--brand-soft) 56%, var(--panel-soft) 44%); - box-shadow: inset 0 0 0 1px color-mix(in oklab, var(--brand) 45%, transparent); -} - -.summary-grid { - display: grid; - grid-template-columns: repeat(2, minmax(0, 1fr)); - gap: 8px; -} - -.log-view { - background: color-mix(in oklab, var(--panel-soft) 82%, black 18%); - border: 1px solid var(--line); - border-radius: 10px; - padding: 8px; - height: 100%; - overflow: auto; - font-size: 12px; - font-family: 'SF Mono', Menlo, Consolas, monospace; -} - -.log-line { - padding: 5px 0; - border-bottom: 1px solid color-mix(in oklab, var(--line) 66%, transparent); - color: var(--text); -} - -.ops-chat-panel { - min-width: 0; -} - -.ops-header-band { - border: 1px solid var(--line); - border-radius: 12px; - padding: 10px; - background: color-mix(in oklab, var(--panel-soft) 88%, var(--brand-soft) 12%); -} - -.ops-log-view { - min-height: 420px; -} - -.bot-card { - margin-bottom: 8px; - cursor: pointer; -} - -.bot-card.selected { - border-color: var(--brand); - box-shadow: inset 0 0 0 1px color-mix(in oklab, var(--brand) 45%, transparent); -} - -.bot-name { - font-weight: 700; -} - -.bot-id, -.bot-meta { - color: color-mix(in oklab, var(--text) 70%, var(--muted) 30%); - font-size: 12px; - font-weight: 600; -} - -.telemetry-card { - display: grid; - gap: 7px; - font-size: 13px; -} - -.section-mini-title { - margin: 0; - font-size: 13px; - color: var(--subtitle); - font-weight: 700; -} - -.chat-tabs { - display: flex; - gap: 8px; -} - -.chat-view { - min-height: 420px; - max-height: 62vh; - overflow: auto; - border: 1px solid var(--line); - border-radius: 12px; - padding: 10px; - background: var(--panel-soft); -} - -.chat-bubble { - margin-bottom: 8px; +.btn-sm { padding: 8px 10px; - border-radius: 10px; - line-height: 1.45; - white-space: pre-wrap; - color: var(--text); - font-size: 14px; - font-weight: 600; -} - -.chat-bubble.assistant { - border: 1px solid color-mix(in oklab, var(--brand) 45%, var(--line) 55%); - background: color-mix(in oklab, var(--brand-soft) 36%, var(--panel-soft) 64%); -} - -.chat-bubble.user { - border: 1px solid color-mix(in oklab, var(--ok) 55%, var(--line) 45%); - background: color-mix(in oklab, var(--ok) 18%, var(--panel-soft) 82%); -} - -.chat-bubble.system { - border: 1px dashed color-mix(in oklab, var(--warn) 50%, var(--line) 50%); - color: var(--text); - background: color-mix(in oklab, var(--warn) 18%, var(--panel-soft) 82%); font-size: 12px; - font-weight: 700; -} - -.telemetry-strong { - color: var(--text); -} - -.telemetry-strong .mono { - color: var(--text); -} - -.event-list { - display: grid; - gap: 8px; - max-height: 220px; - overflow: auto; -} - -.event-item { - display: grid; - grid-template-columns: 92px 1fr; - gap: 8px; - align-items: center; - padding: 6px 8px; - border: 1px solid var(--line); - border-radius: 8px; - background: color-mix(in oklab, var(--panel-soft) 90%, var(--panel) 10%); -} - -.event-state { - font-size: 11px; - font-weight: 700; -} - -.event-thinking .event-state { - color: #6ea5ff; -} - -.event-tool_call .event-state { - color: #67d3b1; -} - -.event-success .event-state { - color: #53cf95; -} - -.event-error .event-state { - color: #ef6666; -} - -.dialog-status-strip { - display: grid; - grid-template-columns: auto auto 1fr; - gap: 8px; - align-items: center; -} - -.state-chip { - display: inline-flex; - align-items: center; - gap: 6px; - border: 1px solid var(--line); - border-radius: 999px; - padding: 4px 10px; - font-size: 12px; - font-weight: 800; - color: var(--text); -} - -.state-running { - border-color: color-mix(in oklab, var(--ok) 60%, var(--line) 40%); - background: color-mix(in oklab, var(--ok) 22%, transparent); -} - -.state-active { - border-color: color-mix(in oklab, var(--brand) 65%, var(--line) 35%); - background: color-mix(in oklab, var(--brand) 18%, transparent); -} - -.state-last-action { - font-size: 13px; - color: var(--text); - font-weight: 700; -} - -.dialog-midstate-strip { - display: flex; - gap: 8px; - overflow: auto; -} - -.midstate-pill { - min-width: 240px; - border: 1px solid var(--line); - border-radius: 10px; - padding: 6px 8px; - display: grid; - gap: 4px; - color: var(--text); - background: var(--panel-soft); -} - -.midstate-pill .mono { - font-size: 11px; - font-weight: 800; -} - -.midstate-pill.state-thinking .mono { - color: #6ea5ff; -} - -.midstate-pill.state-tool_call .mono { - color: #67d3b1; -} - -.midstate-pill.state-success .mono { - color: #53cf95; -} - -.midstate-pill.state-error .mono { - color: #ef6666; -} - -.chat-meta-row { - display: flex; - align-items: center; - justify-content: space-between; - margin-bottom: 4px; -} - -.chat-role { - font-size: 11px; - font-weight: 800; - color: color-mix(in oklab, var(--text) 85%, var(--muted) 15%); -} - -.chat-time { - font-size: 10px; - color: var(--muted); -} - -.agent-tabs { - display: flex; - gap: 8px; - flex-wrap: wrap; -} - -.agent-tabs-vertical { - display: flex; - flex-direction: column; - gap: 8px; - min-width: 150px; -} - -.wizard-agent-layout { - display: grid; - grid-template-columns: 170px 1fr; - gap: 10px; - min-height: 420px; -} - -.agent-tab { - border: 1px solid var(--line); - background: var(--panel-soft); - color: var(--text); - border-radius: 8px; - padding: 6px 10px; - cursor: pointer; - font-size: 12px; -} - -.agent-tab.active { - border-color: var(--brand); - background: color-mix(in oklab, var(--brand-soft) 54%, var(--panel-soft) 46%); } .modal-mask { @@ -1105,21 +874,6 @@ body { gap: 6px; } -.wizard-step2-grid { - gap: 10px; -} - -.wizard-step2-card { - gap: 8px; - border-radius: 14px; - padding: 12px; - background: color-mix(in oklab, var(--panel-soft) 86%, var(--panel) 14%); -} - -.wizard-note-card { - border-style: dashed; -} - .token-input-row { align-items: start; } @@ -1132,40 +886,6 @@ body { width: 100%; } -.wizard-channel-list { - display: grid; - gap: 8px; -} - -.wizard-channel-card { - min-width: 0; - display: grid; - gap: 6px; -} - -.wizard-channel-compact { - padding: 10px; - border-radius: 10px; -} - -.wizard-dashboard-switches { - display: flex; - flex-wrap: wrap; - gap: 14px; - align-items: center; -} - -.wizard-channel-summary { - display: grid; - gap: 8px; -} - -.wizard-icon-btn { - display: inline-flex; - align-items: center; - gap: 6px; -} - .icon-btn { width: 34px; height: 34px; @@ -1245,10 +965,7 @@ body { .app-shell[data-theme='light'] .panel-desc, .app-shell[data-theme='light'] .field-label, .app-shell[data-theme='light'] .kicker, -.app-shell[data-theme='light'] .sub, -.app-shell[data-theme='light'] .chat-time, -.app-shell[data-theme='light'] .bot-id, -.app-shell[data-theme='light'] .bot-meta { +.app-shell[data-theme='light'] .sub { color: var(--subtitle); } @@ -1329,1415 +1046,11 @@ body { color: var(--text); } -.platform-grid { - display: grid; - grid-template-columns: 320px minmax(0, 1fr); - gap: 18px; - min-height: 0; -} - -.platform-grid.is-compact { - grid-template-columns: 1fr; -} - -.platform-main { - display: flex; - flex-direction: column; - gap: 18px; - min-width: 0; -} - -.platform-bot-list-panel { - min-height: 0; - display: grid; - grid-template-rows: auto auto minmax(0, 1fr) auto; - gap: 10px; -} - -.platform-list-actions { - display: flex; - gap: 8px; -} - -.platform-loading-card { - background: linear-gradient(135deg, rgba(55, 162, 255, 0.16), rgba(23, 43, 88, 0.28)); - border: 1px solid rgba(99, 170, 255, 0.22); -} - -.platform-loading-title { - font-size: 18px; - font-weight: 800; +.app-route-crumb.is-current { + cursor: default; color: var(--title); } -.platform-loading-subtitle { - margin-top: 6px; - font-size: 13px; - color: var(--text); -} - -.platform-loading-description { - margin-top: 8px; - font-size: 12px; - color: var(--muted); -} - -.platform-bot-list-scroll { - display: flex; - flex-direction: column; - gap: 10px; - min-height: 0; - overflow: auto; - padding-right: 2px; -} - -.platform-bot-card { - display: flex; - flex-direction: column; - gap: 10px; - padding: 14px; - border-radius: 16px; - border: 1px solid rgba(255, 255, 255, 0.08); - background: rgba(8, 13, 22, 0.72); - cursor: pointer; - transition: transform 0.18s ease, border-color 0.18s ease, box-shadow 0.18s ease; -} - -.platform-bot-card:hover, -.platform-bot-card.is-selected { - transform: translateY(-1px); - border-color: rgba(97, 174, 255, 0.45); - box-shadow: 0 16px 40px rgba(8, 25, 60, 0.18); -} - -.platform-bot-name { - font-size: 15px; - font-weight: 700; - color: var(--title); -} - -.platform-bot-id { - margin-top: 4px; - font-size: 11px; - color: var(--muted); -} - -.platform-bot-meta { - display: flex; - flex-direction: column; - gap: 6px; - font-size: 12px; - color: var(--muted); -} - -.platform-bot-actions { - display: flex; - align-items: center; - justify-content: space-between; - gap: 12px; -} - -.platform-enable-switch { - display: inline-flex; - align-items: center; - gap: 8px; - font-size: 12px; - color: var(--muted); -} - -.platform-bot-actions-main { - display: flex; - gap: 8px; -} - -.platform-summary-grid { - display: grid; - grid-template-columns: repeat(6, minmax(0, 1fr)); - gap: 14px; -} - -.platform-summary-card { - display: flex; - flex-direction: column; - gap: 8px; -} - -.platform-summary-icon { - width: 42px; - height: 42px; - display: inline-flex; - align-items: center; - justify-content: center; - border-radius: 14px; - color: #fff; -} - -.platform-summary-icon.icon-bot { - background: linear-gradient(145deg, #5474ff 0%, #2f59d7 100%); -} - -.platform-summary-icon.icon-image { - background: linear-gradient(145deg, #28b7a1 0%, #17967d 100%); -} - -.platform-summary-icon.icon-token { - background: linear-gradient(145deg, #f0877d 0%, #d95a7d 100%); -} - -.platform-summary-icon.icon-resource { - background: linear-gradient(145deg, #ed7f9a 0%, #f5a65d 46%, #ffe56c 100%); -} - -.platform-resource-card { - grid-column: span 3; - gap: 14px; -} - -.platform-summary-label { - font-size: 11px; - text-transform: uppercase; - letter-spacing: 0.1em; - color: var(--muted); -} - -.platform-summary-value { - font-size: 28px; - font-weight: 800; - color: var(--title); -} - -.platform-summary-meta { - font-size: 12px; - color: var(--muted); -} - -.platform-resource-head { - display: flex; - align-items: center; - gap: 12px; -} - -.platform-resource-subtitle { - margin-top: 4px; - font-size: 12px; - color: var(--muted); -} - -.platform-resource-meters { - display: flex; - flex-direction: column; - gap: 12px; -} - -.platform-resource-meter { - display: grid; - grid-template-columns: 28px minmax(0, 1fr) 80px; - align-items: center; - gap: 12px; -} - -.platform-resource-meter-label { - display: flex; - align-items: center; - justify-content: center; - color: var(--title); -} - -.platform-resource-meter-track { - position: relative; - height: 14px; - overflow: hidden; - border-radius: 999px; - background: rgba(225, 232, 245, 0.58); -} - -.platform-resource-meter-fill { - height: 100%; - border-radius: inherit; - background: linear-gradient(90deg, #7f90ff 0%, #6c5ac3 100%); -} - -.platform-resource-meter-fill.is-memory { - background: linear-gradient(90deg, #6f72e0 0%, #5a43ad 100%); -} - -.platform-resource-meter-fill.is-storage { - background: linear-gradient(90deg, #7f90ff 0%, #6b49b9 100%); -} - -.platform-resource-meter-value { - font-size: 18px; - font-weight: 700; - color: var(--title); - text-align: right; -} - -.platform-resource-footnote { - font-size: 12px; - color: var(--muted); -} - -.platform-main-grid { - display: grid; - grid-template-columns: minmax(0, 1.2fr) minmax(320px, 0.8fr); - gap: 18px; -} - -.platform-monitor-grid { - display: grid; - grid-template-columns: minmax(0, 1fr); - gap: 12px; -} - -.platform-monitor-card { - min-height: 132px; -} - -.platform-monitor-title { - display: inline-flex; - align-items: center; - gap: 8px; - font-size: 12px; - font-weight: 700; - color: var(--title); -} - -.platform-monitor-main { - margin-top: 14px; - font-size: 20px; - font-weight: 800; - color: var(--title); -} - -.platform-monitor-meta { - margin-top: 8px; - font-size: 12px; - color: var(--muted); -} - -.platform-selected-bot-card { - min-height: 240px; - border-radius: 18px; - border-color: color-mix(in oklab, var(--line) 74%, var(--brand-soft) 26%); - background: - linear-gradient(180deg, color-mix(in oklab, var(--panel-soft) 82%, transparent), color-mix(in oklab, var(--panel) 90%, transparent)), - var(--panel); - box-shadow: inset 0 0 0 1px color-mix(in oklab, var(--line) 72%, transparent); -} - -.platform-selected-bot-head { - display: flex; - align-items: flex-start; - justify-content: space-between; - gap: 16px; -} - -.platform-selected-bot-headline { - display: flex; - align-items: flex-start; - justify-content: space-between; - gap: 16px; - flex-wrap: wrap; - min-width: 0; - flex: 1 1 auto; -} - -.platform-selected-bot-title-block { - display: flex; - flex-direction: column; - gap: 8px; - min-width: 0; -} - -.platform-selected-bot-statuses { - display: inline-flex; - align-items: center; - flex-wrap: wrap; - gap: 8px; -} - -.platform-selected-bot-name-row { - display: inline-flex; - align-items: center; - gap: 10px; - min-width: 0; -} - -.platform-selected-bot-actions { - display: inline-flex; - flex-wrap: wrap; - justify-content: flex-end; - gap: 10px; - padding: 4px; - border-radius: 999px; - border: 1px solid color-mix(in oklab, var(--line) 76%, transparent); - background: color-mix(in oklab, var(--panel-soft) 72%, transparent); -} - -.platform-more-menu-anchor { - position: relative; -} - -.platform-selected-bot-action-btn { - display: inline-flex; - align-items: center; - gap: 8px; - min-height: 36px; - padding: 0 14px; - border-radius: 999px; -} - -.platform-selected-bot-name { - font-size: 26px; - font-weight: 800; - color: var(--title); -} - -.platform-selected-bot-id { - font-size: 14px; - color: var(--muted); -} - -.platform-selected-bot-grid { - display: grid; - grid-template-columns: repeat(2, minmax(0, 1fr)); - gap: 12px 14px; - margin-top: 18px; -} - -.platform-selected-bot-info { - display: flex; - flex-direction: column; - gap: 6px; - min-width: 0; - padding: 12px 14px; - border-radius: 14px; - border: 1px solid color-mix(in oklab, var(--line) 72%, var(--brand-soft) 28%); - background: color-mix(in oklab, var(--panel-soft) 78%, transparent); -} - -.platform-selected-bot-info-label { - font-size: 12px; - color: var(--muted); -} - -.platform-selected-bot-info-value { - font-size: 14px; - color: var(--title); - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; -} - -.platform-selected-bot-last-row { - margin-top: 18px; - display: flex; - align-items: flex-end; - justify-content: space-between; - gap: 16px; -} - -.platform-selected-bot-last-copy { - min-width: 0; - flex: 1 1 auto; -} - -.platform-selected-bot-last-label { - display: block; - margin-bottom: 6px; - font-size: 12px; - font-weight: 700; - color: var(--title); -} - -.platform-selected-bot-last-preview { - display: block; - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; - color: var(--muted); -} - -.platform-selected-bot-last-body { - margin-top: 10px; - padding: 14px; - border-radius: 14px; - border: 1px solid color-mix(in oklab, var(--line) 72%, transparent); - background: color-mix(in oklab, var(--panel-soft) 76%, transparent); - color: var(--muted); - line-height: 1.6; - white-space: pre-wrap; - word-break: break-word; -} - -.platform-last-action-btn { - flex-shrink: 0; - align-self: flex-end; - margin-bottom: 2px; -} - -.platform-compact-sheet-mask { - align-items: flex-end; - padding: 0; - background: rgba(8, 13, 22, 0.28); - backdrop-filter: blur(10px); - animation: platform-sheet-mask-in 240ms ease-out both; -} - -.platform-compact-sheet-mask.is-closing { - animation: platform-sheet-mask-out 220ms ease-in both; -} - -.platform-compact-sheet-card { - position: relative; - width: 100%; - max-height: min(84vh, 760px); - overflow: hidden; - border-radius: 28px 28px 0 0; - border: 1px solid color-mix(in oklab, var(--line) 78%, var(--brand-soft) 22%); - border-bottom: 0; - background: - linear-gradient(180deg, color-mix(in oklab, var(--panel) 92%, var(--brand-soft) 8%) 0%, var(--panel) 100%); - box-shadow: - 0 -24px 60px rgba(8, 17, 34, 0.28), - 0 0 0 1px rgba(255, 255, 255, 0.04) inset; - animation: platform-sheet-up 240ms cubic-bezier(0.18, 0.84, 0.26, 1) both; -} - -.platform-compact-sheet-card.is-closing { - animation: platform-sheet-down 220ms ease-in both; -} - -.platform-compact-sheet-handle { - width: 58px; - height: 6px; - border-radius: 999px; - margin: 12px auto 8px; - background: color-mix(in oklab, var(--line) 70%, var(--title) 30%); -} - -.platform-compact-sheet-body { - display: flex; - flex-direction: column; - gap: 0; - max-height: calc(min(84vh, 760px) - 34px); - overflow-y: auto; - padding: 8px 12px 14px; -} - -.platform-compact-sheet-close { - position: absolute; - top: 14px; - right: 14px; - z-index: 2; - background: color-mix(in oklab, var(--panel-soft) 82%, #ffffff 18%); -} - -.platform-compact-overview { - display: flex; - flex-direction: column; - gap: 12px; -} - -.platform-compact-overview-head h2 { - margin: 0; - font-size: 18px; -} - -.platform-selected-bot-card-compact { - border-radius: 22px; - border-color: color-mix(in oklab, var(--line) 72%, var(--brand-soft) 28%); - background: - linear-gradient(180deg, color-mix(in oklab, var(--panel-soft) 82%, transparent), color-mix(in oklab, var(--panel) 92%, transparent)), - var(--panel); - box-shadow: - inset 0 0 0 1px color-mix(in oklab, var(--line) 76%, transparent), - 0 10px 28px rgba(8, 17, 34, 0.12); -} - -@keyframes platform-sheet-up { - from { - transform: translateY(48px) scale(0.985); - opacity: 0; - } - to { - transform: translateY(0); - opacity: 1; - } -} - -@keyframes platform-sheet-down { - from { - transform: translateY(0) scale(1); - opacity: 1; - } - to { - transform: translateY(42px) scale(0.988); - opacity: 0; - } -} - -@keyframes platform-sheet-mask-in { - from { - opacity: 0; - } - to { - opacity: 1; - } -} - -@keyframes platform-sheet-mask-out { - from { - opacity: 1; - } - to { - opacity: 0; - } -} - -.platform-image-list, -.platform-activity-list { - display: flex; - flex-direction: column; - gap: 10px; -} - -.platform-image-row, -.platform-activity-row { - display: flex; - align-items: flex-start; - justify-content: space-between; - gap: 12px; - padding: 12px 14px; - border-radius: 14px; - background: rgba(255, 255, 255, 0.03); - border: 1px solid rgba(255, 255, 255, 0.05); -} - -.platform-image-meta, -.platform-activity-detail, -.platform-activity-type { - font-size: 12px; - color: var(--muted); -} - -.platform-activity-row strong { - margin-right: 8px; -} - -.platform-activity-empty { - padding: 12px 14px; - border-radius: 14px; - border: 1px dashed rgba(97, 174, 255, 0.18); - color: var(--muted); - font-size: 12px; -} - -.platform-entry-grid { - display: grid; - grid-template-columns: repeat(3, minmax(0, 1fr)); - gap: 12px; -} - -.platform-entry-card { - display: flex; - flex-direction: column; - gap: 10px; - align-items: flex-start; - padding: 16px; - border-radius: 16px; - border: 1px solid rgba(97, 174, 255, 0.18); - background: linear-gradient(180deg, rgba(14, 22, 38, 0.84), rgba(8, 12, 21, 0.92)); - color: var(--text); - text-align: left; - transition: transform 0.18s ease, border-color 0.18s ease; -} - -.platform-entry-card:hover { - transform: translateY(-1px); - border-color: rgba(97, 174, 255, 0.34); -} - -.platform-entry-card strong { - color: var(--title); -} - -.platform-entry-card span { - font-size: 12px; - color: var(--muted); -} - -.platform-settings-shell { - max-width: min(1400px, 96vw); -} - -.platform-settings-info-card { - display: flex; - gap: 16px; - padding: 18px 20px; - border-radius: 18px; - border: 1px solid rgba(97, 174, 255, 0.28); - background: color-mix(in oklab, var(--panel) 68%, #dff0ff 32%); - color: var(--text); -} - -.platform-settings-info-icon { - width: 36px; - height: 36px; - min-width: 36px; - min-height: 36px; - flex: 0 0 36px; - aspect-ratio: 1 / 1; - border-radius: 999px; - display: inline-flex; - align-items: center; - justify-content: center; - background: #4273f2; - color: #fff; - font-weight: 800; -} - -.platform-settings-toolbar { - display: flex; - align-items: center; - justify-content: space-between; - gap: 16px; -} - -.platform-settings-search { - flex: 1 1 auto; - max-width: 460px; -} - -.platform-settings-table-wrap { - border: 1px solid var(--line); - border-radius: 0; - overflow: hidden; - max-height: 56vh; - overflow-y: auto; -} - -.platform-settings-table th, -.platform-settings-table td { - vertical-align: top; -} - -.platform-setting-public { - margin-top: 6px; - font-size: 12px; - color: #7bcf57; -} - -.platform-setting-value { - display: inline-block; - max-width: 320px; - white-space: pre-wrap; - word-break: break-word; -} - -.platform-settings-actions { - display: flex; - gap: 8px; -} - -.platform-setting-editor { - width: min(640px, 92vw); -} - -.platform-settings-pager { - display: flex; - align-items: center; - justify-content: space-between; - gap: 16px; - font-size: 12px; - color: var(--muted); -} - -.platform-template-shell { - max-width: min(1400px, 96vw); -} - -.platform-template-layout { - display: grid; - grid-template-columns: 280px minmax(0, 1fr); - gap: 16px; - min-height: 60vh; -} - -.platform-template-tabs { - display: flex; - flex-direction: column; - gap: 10px; - overflow-y: auto; - padding-right: 4px; -} - -.platform-template-tab { - display: flex; - flex-direction: column; - gap: 6px; - align-items: flex-start; - padding: 14px 16px; - border-radius: 16px; - border: 1px solid rgba(97, 174, 255, 0.16); - background: rgba(255, 255, 255, 0.03); - color: var(--text); - text-align: left; -} - -.platform-template-tab strong { - color: var(--title); -} - -.platform-template-tab span, -.platform-template-hint { - font-size: 12px; - color: var(--muted); - line-height: 1.6; -} - -.platform-template-tab.is-active { - border-color: rgba(97, 174, 255, 0.42); - background: rgba(97, 174, 255, 0.08); -} - -.platform-template-editor { - min-width: 0; - display: flex; - flex-direction: column; - gap: 12px; -} - -.platform-template-header { - display: flex; - align-items: flex-start; - justify-content: space-between; - gap: 16px; -} - -.platform-template-textarea { - min-height: 0; - flex: 1 1 auto; -} - -.skill-market-modal-shell { - max-width: min(1480px, 96vw); - display: flex; - flex-direction: column; - min-height: min(920px, calc(100dvh - 48px)); -} - -.skill-market-browser-shell { - max-width: min(1400px, 96vw); - width: min(1400px, 96vw); - display: flex; - flex-direction: column; - min-height: min(920px, calc(100dvh - 48px)); -} - -.skill-market-page-shell { - width: 100%; - margin: 0; - padding: 18px; - border-radius: 22px; - gap: 18px; - display: flex; - flex-direction: column; - min-height: calc(100dvh - 126px); -} - -.skill-market-page-info-card { - display: flex; - align-items: center; - justify-content: space-between; - gap: 18px; - padding: 22px 24px; - border-radius: 22px; -} - -.skill-market-page-info-main { - display: flex; - align-items: flex-start; - gap: 16px; - min-width: 0; - flex: 1 1 auto; -} - -.skill-market-page-info-copy { - min-width: 0; - display: grid; - gap: 6px; -} - -.skill-market-page-info-copy strong { - display: block; - color: var(--title); - font-size: 17px; - line-height: 1.35; -} - -.skill-market-page-info-copy div { - color: var(--subtitle); - font-size: 13px; - line-height: 1.7; -} - -.skill-market-admin-toolbar { - display: flex; - align-items: center; - justify-content: space-between; - gap: 16px; - flex-wrap: wrap; -} - -.skill-market-search { - flex: 1 1 auto; - max-width: 560px; -} - -.skill-market-admin-actions { - display: flex; - gap: 10px; - flex-wrap: wrap; - align-items: center; -} - -.skill-market-create-btn { - display: inline-flex; - align-items: center; - justify-content: center; - gap: 8px; - white-space: nowrap; -} - -.skill-market-create-btn svg { - flex: 0 0 auto; -} - -.skill-market-page-workspace { - position: relative; - min-height: 0; - flex: 1 1 auto; - padding-top: 3px; - padding-right: 4px; - overflow: auto; -} - -.skill-market-card-grid { - display: grid; - grid-template-columns: repeat(2, minmax(0, 1fr)); - gap: 12px; - align-content: start; - min-height: 0; - padding-right: 4px; -} - -.skill-market-list-shell { - grid-template-columns: repeat(3, minmax(0, 1fr)); -} - -.skill-market-browser-grid { - display: grid; - grid-template-columns: repeat(3, minmax(0, 1fr)); - gap: 12px; - min-height: 0; - flex: 1 1 auto; - align-content: start; - grid-auto-rows: 1fr; - padding-top: 3px; -} - -.skill-market-card, -.skill-market-empty-card { - min-height: 188px; -} - -.skill-market-card { - display: flex; - flex-direction: column; - gap: 10px; - padding: 14px; - border-radius: 18px; - border: 1px solid color-mix(in oklab, var(--line) 72%, #f0b36a 28%); - background: - radial-gradient(circle at top right, color-mix(in oklab, var(--brand-soft) 36%, transparent), transparent 38%), - linear-gradient(180deg, color-mix(in oklab, var(--panel) 88%, #ffffff 12%), color-mix(in oklab, var(--panel) 96%, #f4eadf 4%)); - box-shadow: 0 14px 30px rgba(13, 24, 45, 0.12); - transition: transform 0.18s ease, border-color 0.18s ease, box-shadow 0.18s ease; -} - -.skill-market-card:hover, -.skill-market-card.is-active { - transform: translateY(-1px); - border-color: color-mix(in oklab, var(--brand) 44%, var(--line) 56%); - box-shadow: 0 18px 34px rgba(13, 24, 45, 0.16); -} - -.skill-market-card-top, -.skill-market-editor-head { - display: flex; - align-items: flex-start; - justify-content: space-between; - gap: 12px; -} - -.skill-market-card-title-wrap { - min-width: 0; -} - -.skill-market-card-title-wrap h4 { - margin: 0; - font-size: 17px; - line-height: 1.25; - color: var(--title); - word-break: break-word; -} - -.skill-market-card-key { - margin-top: 5px; - color: var(--muted); - font-size: 11px; - word-break: break-word; -} - -.skill-market-card-actions { - display: flex; - gap: 8px; -} - -.skill-market-card-desc { - margin: 0; - color: var(--subtitle); - font-size: 13px; - line-height: 1.55; - min-height: 60px; - display: -webkit-box; - -webkit-line-clamp: 3; - -webkit-box-orient: vertical; - overflow: hidden; -} - -.skill-market-card-meta { - display: grid; - gap: 6px; - color: var(--muted); - font-size: 11px; -} - -.skill-market-card-meta span, -.skill-market-card-footer { - display: flex; - align-items: center; - gap: 8px; -} - -.skill-market-card-footer { - margin-top: auto; - justify-content: space-between; - gap: 12px; - padding-top: 10px; - border-top: 1px solid color-mix(in oklab, var(--line) 78%, transparent); - color: var(--muted); - font-size: 11px; -} - -.skill-market-card-status.is-ok { - color: #d98c1f; -} - -.skill-market-card-status.is-missing { - color: var(--err); -} - -.skill-market-browser-card { - min-height: 312px; - padding-bottom: 16px; -} - -.skill-market-browser-badge { - font-size: 11px; - padding: 6px 10px; - border-radius: 16px; -} - -.skill-market-browser-desc { - min-height: 80px; - -webkit-line-clamp: 4; -} - -.skill-market-browser-meta { - margin-top: auto; - gap: 8px; - font-size: 12px; -} - -.skill-market-browser-footer { - align-items: flex-end; -} - -.skill-market-install-btn { - min-height: 38px; - padding-inline: 14px; - border-radius: 16px; - box-shadow: 0 10px 24px rgba(43, 87, 199, 0.24); -} - -.skill-market-empty-card { - border: 1px dashed color-mix(in oklab, var(--line) 78%, var(--brand) 22%); - border-radius: 22px; - background: color-mix(in oklab, var(--panel) 92%, var(--brand-soft) 8%); -} - -.skill-market-editor { - display: flex; - flex-direction: column; - gap: 14px; - min-width: 0; - min-height: 0; -} - -.skill-market-editor-textarea { - min-height: 180px; -} - -.skill-market-upload-card { - display: grid; - gap: 10px; - padding: 14px; - border-radius: 14px; - border: 1px solid color-mix(in oklab, var(--line) 78%, var(--brand) 22%); - background: color-mix(in oklab, var(--panel) 92%, var(--brand-soft) 8%); -} - -.skill-market-upload-card.has-file { - border-color: color-mix(in oklab, var(--brand) 50%, var(--line) 50%); -} - -.skill-market-upload-foot { - color: var(--muted); - font-size: 12px; - line-height: 1.55; -} - -.skill-market-file-picker { - position: relative; - display: flex; - align-items: center; - justify-content: space-between; - gap: 14px; - min-height: 58px; - padding: 12px 14px; - border-radius: 12px; - border: 1px dashed color-mix(in oklab, var(--line) 60%, var(--brand) 40%); - background: color-mix(in oklab, var(--panel) 82%, #ffffff 18%); - color: var(--text); - cursor: pointer; - transition: border-color 0.18s ease, background 0.18s ease; -} - -.skill-market-file-picker:hover { - border-color: color-mix(in oklab, var(--brand) 58%, var(--line) 42%); - background: color-mix(in oklab, var(--panel) 74%, var(--brand-soft) 26%); -} - -.skill-market-file-picker input { - position: absolute; - inset: 0; - opacity: 0; - cursor: pointer; -} - -.skill-market-file-picker-copy { - min-width: 0; - display: grid; - gap: 0; -} - -.skill-market-file-picker-title { - color: var(--title); - font-size: 13px; - font-weight: 700; - line-height: 1.4; - word-break: break-word; -} - -.skill-market-file-picker-action { - flex: 0 0 auto; - display: inline-flex; - align-items: center; - justify-content: center; - min-height: 30px; - padding: 0 12px; - border-radius: 999px; - background: color-mix(in oklab, var(--brand) 14%, transparent); - color: var(--icon); - font-size: 12px; - font-weight: 700; -} - -.skill-market-browser-toolbar, -.skill-market-pager, -.row-actions-inline { - display: flex; - align-items: center; - justify-content: space-between; - gap: 14px; -} - -.skill-market-pager { - margin-top: 16px; - font-size: 12px; - color: var(--muted); -} - -.row-actions-inline { - justify-content: flex-end; - flex-wrap: wrap; -} - -.skill-market-page-size-hint { - white-space: nowrap; -} - -.skill-market-drawer-mask { - position: fixed; - inset: 0; - background: rgba(12, 18, 31, 0.26); - opacity: 0; - pointer-events: none; - transition: opacity 0.22s ease; - border-radius: 0; -} - -.skill-market-drawer-mask.is-open { - opacity: 1; - pointer-events: auto; -} - -.skill-market-drawer { - position: fixed; - top: 94px; - right: 18px; - bottom: 18px; - width: min(460px, calc(100vw - 36px)); - transform: translateX(calc(100% + 20px)); - transition: transform 0.22s ease; - z-index: 41; -} - -.skill-market-drawer.is-open { - transform: translateX(0); -} - -.skill-market-drawer .skill-market-editor { - height: 100%; - padding: 22px; - border-radius: 0; - box-shadow: 0 18px 42px rgba(13, 24, 45, 0.24); - overflow: auto; -} - -.app-shell[data-theme='light'] .skill-market-file-picker { - background: color-mix(in oklab, var(--panel) 80%, #f7fbff 20%); -} - -.app-shell[data-theme='light'] .skill-market-drawer-mask { - background: rgba(111, 138, 179, 0.16); -} - -.app-shell[data-theme='light'] .platform-entry-card { - border-color: #b7c7e6; - background: linear-gradient(180deg, #f7fbff 0%, #edf4ff 100%); - color: #173057; -} - -.app-shell[data-theme='light'] .platform-entry-card strong { - color: #173057; -} - -.app-shell[data-theme='light'] .platform-entry-card span { - color: #49648f; -} - -.app-shell[data-theme='light'] .platform-template-tab { - background: #f6f9ff; - border-color: #d4e1f7; -} - -.app-shell[data-theme='light'] .platform-template-tab.is-active { - background: #e9f1ff; - border-color: #8db4ff; -} - -.app-shell[data-theme='light'] .platform-selected-bot-last-body, -.app-shell[data-theme='light'] .platform-selected-bot-info, -.app-shell[data-theme='light'] .platform-image-row, -.app-shell[data-theme='light'] .platform-activity-row, -.app-shell[data-theme='light'] .platform-usage-row { - background: #f6f9ff; - border-color: #d4e1f7; -} - -.app-shell[data-theme='light'] .platform-resource-meter-track { - background: #e9eef9; -} - -.platform-usage-summary { - display: inline-flex; - flex-wrap: wrap; - gap: 16px; - font-size: 12px; - color: var(--muted); -} - -.platform-model-analytics-head { - display: flex; - align-items: flex-start; - justify-content: space-between; - gap: 16px; -} - -.platform-model-analytics-subtitle { - margin-top: 6px; - font-size: 13px; - color: var(--muted); -} - -.platform-model-analytics-total { - flex: 0 0 auto; - display: grid; - gap: 6px; - justify-items: end; - text-align: right; -} - -.platform-model-analytics-total strong { - font-size: clamp(28px, 4vw, 48px); - line-height: 1; -} - -.platform-model-analytics-total span { - font-size: 13px; - color: var(--muted); -} - -.platform-model-analytics-legend { - display: flex; - flex-wrap: wrap; - gap: 10px; -} - -.platform-model-analytics-chip { - display: inline-flex; - align-items: center; - gap: 10px; - padding: 10px 14px; - border-radius: 999px; - background: color-mix(in oklab, var(--panel-soft) 82%, transparent); - border: 1px solid color-mix(in oklab, var(--line) 76%, transparent); - font-size: 13px; -} - -.platform-model-analytics-chip strong { - font-size: 13px; -} - -.platform-model-analytics-chip span:last-child { - color: var(--muted); -} - -.platform-model-analytics-chip-dot { - width: 12px; - height: 12px; - border-radius: 999px; - flex: 0 0 auto; -} - -.platform-model-chart { - width: 100%; - overflow-x: auto; -} - -.platform-model-chart svg { - display: block; - width: 100%; - min-width: 720px; - height: auto; -} - -.platform-model-chart-grid { - stroke: color-mix(in oklab, var(--brand-soft) 34%, var(--line) 66%); - stroke-width: 1; -} - -.platform-model-chart-axis-label { - fill: var(--muted); - font-size: 12px; -} - -.platform-usage-table { - display: flex; - flex-direction: column; - gap: 6px; -} - -.platform-usage-head, -.platform-usage-row { - display: grid; - grid-template-columns: minmax(150px, 1.3fr) minmax(220px, 2fr) minmax(180px, 1.2fr) 90px 90px 90px minmax(130px, 1fr); - gap: 12px; - align-items: start; -} - -.platform-usage-head { - padding: 0 12px; - font-size: 11px; - text-transform: uppercase; - letter-spacing: 0.1em; - color: var(--muted); -} - -.platform-usage-row { - padding: 12px; - border-radius: 14px; - background: rgba(255, 255, 255, 0.03); - border: 1px solid rgba(255, 255, 255, 0.05); - font-size: 12px; -} - -.platform-usage-meta { - margin-top: 4px; - color: var(--muted); -} - -.platform-usage-content-cell, -.platform-usage-model { - min-width: 0; -} - -.platform-usage-preview { - color: var(--muted); - display: -webkit-box; - -webkit-line-clamp: 3; - -webkit-box-orient: vertical; - overflow: hidden; -} - -.platform-usage-pager { - display: flex; - align-items: center; - justify-content: space-between; - gap: 16px; - font-size: 12px; - color: var(--muted); -} - -.platform-usage-pager-actions { - display: inline-flex; - align-items: center; - gap: 8px; -} - .pager-icon-btn { width: 36px; height: 36px; @@ -2752,8 +1065,10 @@ body { color: var(--subtitle); } -.platform-last-action-modal { - width: min(760px, 92vw); +.platform-usage-pager-actions { + display: inline-flex; + align-items: center; + gap: 8px; } .platform-modal { diff --git a/frontend/src/App.h5.css b/frontend/src/App.h5.css index 5c96841..3984b11 100644 --- a/frontend/src/App.h5.css +++ b/frontend/src/App.h5.css @@ -113,6 +113,33 @@ background: var(--panel); } +.app-nav-drawer-hero { + min-height: 220px; +} + +.app-nav-drawer-groups { + overflow-y: auto; + padding: 12px 0 16px; +} + +.app-nav-drawer-group { + display: grid; + gap: 8px; +} + +.app-nav-drawer-group + .app-nav-drawer-group { + margin-top: 8px; +} + +.app-nav-drawer-group-title { + padding: 0 24px; + font-size: 11px; + font-weight: 800; + letter-spacing: 0.16em; + text-transform: uppercase; + color: var(--muted); +} + .app-bot-panel-drawer-item { width: 100%; border: 0; @@ -150,62 +177,19 @@ .grid-ops.grid-ops-compact { grid-template-columns: minmax(0, 1fr) minmax(260px, 360px); } - - .platform-summary-grid { - grid-template-columns: repeat(3, minmax(0, 1fr)); - } - - .platform-resource-card { - grid-column: span 3; - } } @media (max-width: 1160px) { .grid-2, .grid-ops, - .wizard-steps, - .wizard-steps-4, - .factory-kpi-grid, - .summary-grid, - .wizard-agent-layout { + .factory-kpi-grid { grid-template-columns: 1fr; } - .platform-grid, - .platform-main-grid, - .platform-monitor-grid, - .platform-entry-grid, - .platform-summary-grid { + .app-layout.has-nav-rail { grid-template-columns: 1fr; } - .platform-resource-card { - grid-column: auto; - } - - .platform-template-layout { - grid-template-columns: 1fr; - } - - .skill-market-admin-layout, - .skill-market-card-grid, - .skill-market-browser-grid { - grid-template-columns: 1fr; - } - - .skill-market-list-shell { - grid-template-columns: repeat(2, minmax(0, 1fr)); - } - - .platform-template-tabs { - max-height: 220px; - } - - .platform-usage-head, - .platform-usage-row { - grid-template-columns: minmax(140px, 1.1fr) minmax(200px, 1.8fr) minmax(160px, 1fr) 70px 70px 70px 100px; - } - .app-frame { height: auto; min-height: calc(100vh - 36px); @@ -241,23 +225,13 @@ justify-content: flex-end; } - .wizard-shell { - min-height: 640px; + .app-route-trail { + row-gap: 4px; } + } @media (max-width: 980px) { - .app-shell-compact .platform-grid.is-compact { - height: 100%; - min-height: 0; - grid-template-rows: minmax(0, 1fr); - } - - .app-shell-compact .platform-bot-list-panel { - height: 100%; - min-height: 0; - } - .grid-ops.grid-ops-compact { grid-template-columns: 1fr; grid-template-rows: minmax(0, 1fr); @@ -270,101 +244,6 @@ min-height: 0; } - .platform-bot-list-panel { - min-height: calc(100dvh - 170px); - } - - .platform-bot-actions, - .platform-image-row, - .platform-activity-row { - flex-direction: column; - align-items: flex-start; - } - - .platform-selected-bot-headline { - flex-direction: column; - align-items: flex-start; - gap: 8px; - } - - .platform-selected-bot-head { - flex-direction: column; - align-items: stretch; - } - - .platform-selected-bot-title-block, - .platform-selected-bot-statuses { - width: 100%; - } - - .platform-selected-bot-actions { - justify-content: flex-start; - } - - .platform-selected-bot-grid { - grid-template-columns: 1fr; - } - - .platform-resource-meter { - grid-template-columns: 24px minmax(0, 1fr) 64px; - } - - .platform-usage-head { - display: none; - } - - .platform-model-analytics-head { - flex-direction: column; - align-items: stretch; - } - - .platform-model-analytics-total { - justify-items: start; - text-align: left; - } - - .platform-usage-row { - grid-template-columns: 1fr; - } - - .platform-selected-bot-last-row, - .platform-settings-pager, - .platform-usage-pager, - .platform-template-header, - .skill-market-admin-toolbar, - .skill-market-browser-toolbar, - .skill-market-pager, - .skill-market-page-info-card, - .skill-market-page-info-main, - .skill-market-editor-head, - .skill-market-card-top, - .skill-market-card-footer, - .row-actions-inline { - flex-direction: column; - align-items: stretch; - } - - .platform-compact-sheet-card { - max-height: 90dvh; - } - - .platform-compact-sheet-body { - max-height: calc(90dvh - 60px); - padding: 0 10px 12px; - } - - .skill-market-list-shell { - grid-template-columns: 1fr; - } - - .skill-market-drawer { - position: fixed; - top: 84px; - right: 12px; - bottom: 12px; - width: min(460px, calc(100vw - 24px)); - } - .app-route-crumb { width: 100%; text-align: left; diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index e59e0b9..5f0771d 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -1,28 +1,31 @@ import { useEffect, useState, type ReactElement } from 'react'; import axios from 'axios'; -import { Activity, ChevronDown, ChevronUp, Menu, MessageSquareText, MoonStar, SunMedium, X } from 'lucide-react'; -import { useAppStore } from './store/appStore'; -import { useBotsSync } from './hooks/useBotsSync'; -import { APP_ENDPOINTS } from './config/env'; -import { pickLocale } from './i18n'; -import { appZhCn } from './i18n/app.zh-cn'; -import { appEn } from './i18n/app.en'; -import { LucentTooltip } from './components/lucent/LucentTooltip'; +import { Activity, Bot, Boxes, FileText, Hammer, LayoutDashboard, Menu, MessageSquareText, MoonStar, Settings2, SunMedium, X } from 'lucide-react'; + import { PasswordInput } from './components/PasswordInput'; +import { LucentTooltip } from './components/lucent/LucentTooltip'; +import { APP_ENDPOINTS } from './config/env'; +import { useBotsSync } from './hooks/useBotsSync'; +import { appEn } from './i18n/app.en'; +import { appZhCn } from './i18n/app.zh-cn'; +import { pickLocale } from './i18n'; +import { BotHomePage } from './modules/bot-home/BotHomePage'; +import { PlatformAdminDashboardPage } from './modules/platform/PlatformAdminDashboardPage'; +import { PlatformBotManagementPage } from './modules/platform/PlatformBotManagementPage'; +import { PlatformImageManagementPage } from './modules/platform/PlatformImageManagementPage'; +import { PlatformSettingsPage } from './modules/platform/components/PlatformSettingsModal'; +import { SkillMarketManagerPage } from './modules/platform/components/SkillMarketManagerModal'; +import { TemplateManagerPage } from './modules/platform/components/TemplateManagerModal'; +import { useAppStore } from './store/appStore'; import { clearBotAccessPassword, getBotAccessPassword, setBotAccessPassword } from './utils/botAccess'; import { clearPanelAccessPassword, getPanelAccessPassword, setPanelAccessPassword } from './utils/panelAccess'; -import { BotHomePage } from './modules/bot-home/BotHomePage'; -import { PlatformDashboardPage } from './modules/platform/PlatformDashboardPage'; -import { SkillMarketManagerPage } from './modules/platform/components/SkillMarketManagerModal'; -import { readCompactModeFromUrl, useAppRoute } from './utils/appRoute'; +import { getAppRouteMeta, navigateToRoute, readCompactModeFromUrl, useAppRoute, type AppRoute } from './utils/appRoute'; +import './components/ui/SharedUi.css'; import './App.css'; import './App.h5.css'; +import './modules/platform/PlatformDashboardPage.css'; -const defaultLoadingPage = { - title: 'Dashboard Nanobot', - subtitle: '平台正在准备管理面板', - description: '请稍候,正在加载 Bot 平台数据。', -}; +const defaultLoadingTitle = 'Dashboard Nanobot'; type CompactBotPanelTab = 'chat' | 'runtime'; @@ -30,11 +33,12 @@ function AuthenticatedApp() { const route = useAppRoute(); const { theme, setTheme, locale, setLocale, activeBots } = useAppStore(); const t = pickLocale(locale, { 'zh-cn': appZhCn, en: appEn }); + const isZh = locale === 'zh'; const [viewportCompact, setViewportCompact] = useState(() => { if (typeof window === 'undefined' || typeof window.matchMedia !== 'function') return false; return window.matchMedia('(max-width: 980px)').matches; }); - const [headerCollapsed, setHeaderCollapsed] = useState(false); + const [appNavDrawerOpen, setAppNavDrawerOpen] = useState(false); const [botPanelDrawerOpen, setBotPanelDrawerOpen] = useState(false); const [botCompactPanelTab, setBotCompactPanelTab] = useState('chat'); const [singleBotPassword, setSingleBotPassword] = useState(''); @@ -57,44 +61,26 @@ function AuthenticatedApp() { return () => media.removeEventListener('change', apply); }, []); - useEffect(() => { - setHeaderCollapsed(readCompactModeFromUrl() || viewportCompact); - }, [viewportCompact, route.kind, forcedBotId]); - const compactMode = readCompactModeFromUrl() || viewportCompact; - const isCompactShell = compactMode; - const hideHeader = route.kind === 'dashboard' && compactMode; - const showBotPanelDrawerEntry = route.kind === 'bot' && compactMode; - const allowHeaderCollapse = isCompactShell && !showBotPanelDrawerEntry; const forcedBot = forcedBotId ? activeBots[forcedBotId] : undefined; const forcedBotName = String(forcedBot?.name || '').trim(); const forcedBotIdLabel = String(forcedBotId || '').trim(); - const botHeaderTitle = forcedBotName || defaultLoadingPage.title; - const botHeaderSubtitle = forcedBotIdLabel || defaultLoadingPage.title; - const botDocumentTitle = [forcedBotName, forcedBotIdLabel].filter(Boolean).join(' ') || defaultLoadingPage.title; + const botDocumentTitle = [forcedBotName, forcedBotIdLabel].filter(Boolean).join(' ') || defaultLoadingTitle; const shouldPromptSingleBotPassword = Boolean( route.kind === 'bot' && forcedBotId && forcedBot?.has_access_password && !singleBotUnlocked, ); - const headerTitle = - showBotPanelDrawerEntry - ? (botCompactPanelTab === 'runtime' ? t.botPanels.runtime : t.botPanels.chat) - : route.kind === 'bot' - ? botHeaderTitle - : route.kind === 'dashboard-skills' - ? (locale === 'zh' ? '技能市场管理' : 'Skill Marketplace') - : t.title; + const routeMeta = getAppRouteMeta(route, { isZh, botName: forcedBotName || undefined }); + const showNavRail = route.kind !== 'bot' && !compactMode; + const showAppNavDrawerEntry = route.kind !== 'bot' && compactMode; + const showBotPanelDrawerEntry = route.kind === 'bot' && compactMode; + const useCompactSimpleHeader = showBotPanelDrawerEntry || showAppNavDrawerEntry; + const headerTitle = showBotPanelDrawerEntry + ? (botCompactPanelTab === 'runtime' ? t.botPanels.runtime : t.botPanels.chat) + : routeMeta.title; useEffect(() => { - if (route.kind === 'dashboard') { - document.title = t.title; - return; - } - if (route.kind === 'dashboard-skills') { - document.title = `${t.title} - ${locale === 'zh' ? '技能市场' : 'Skill Marketplace'}`; - return; - } - document.title = `${t.title} - ${botDocumentTitle}`; - }, [botDocumentTitle, locale, route.kind, t.title]); + document.title = `${t.title} - ${route.kind === 'bot' ? botDocumentTitle : routeMeta.title}`; + }, [botDocumentTitle, route.kind, routeMeta.title, t.title]); useEffect(() => { setSingleBotUnlocked(false); @@ -109,6 +95,10 @@ function AuthenticatedApp() { } }, [forcedBotId, showBotPanelDrawerEntry]); + useEffect(() => { + if (!showAppNavDrawerEntry) setAppNavDrawerOpen(false); + }, [route.kind, showAppNavDrawerEntry]); + useEffect(() => { if (route.kind !== 'bot' || !forcedBotId || !forcedBot?.has_access_password || singleBotUnlocked) return; const stored = getBotAccessPassword(forcedBotId); @@ -155,81 +145,151 @@ function AuthenticatedApp() { } }; - const navigateToDashboard = () => { - if (typeof window === 'undefined') return; - window.history.pushState({}, '', '/dashboard'); - window.dispatchEvent(new PopStateEvent('popstate')); - }; - const botPanelLabels = t.botPanels; - const drawerBotName = String(forcedBot?.name || '').trim() || defaultLoadingPage.title; + const drawerBotName = String(forcedBot?.name || '').trim() || defaultLoadingTitle; const drawerBotId = String(forcedBotId || '').trim() || '-'; const nextTheme = theme === 'dark' ? 'light' : 'dark'; const nextLocale = locale === 'zh' ? 'en' : 'zh'; + const navGroups: Array<{ + key: 'admin' | 'system'; + label: string; + items: Array<{ + kind: Exclude; + label: string; + icon: typeof LayoutDashboard; + }>; + }> = [ + { + key: 'admin', + label: 'Admin', + items: [ + { kind: 'admin-dashboard', label: 'Dashboard', icon: LayoutDashboard }, + { kind: 'admin-bots', label: isZh ? 'Bot 管理' : 'Bot Management', icon: Bot }, + ], + }, + { + key: 'system', + label: 'System', + items: [ + { kind: 'system-skills', label: isZh ? '技能市场' : 'Skill Marketplace', icon: Hammer }, + { kind: 'system-templates', label: isZh ? '模版管理' : 'Template Management', icon: FileText }, + { kind: 'system-settings', label: isZh ? '参数管理' : 'Parameter Management', icon: Settings2 }, + { kind: 'system-images', label: isZh ? '镜像管理' : 'Image Management', icon: Boxes }, + ], + }, + ]; + const currentNavItem = navGroups + .flatMap((group) => group.items) + .find((item) => item.kind === routeMeta.navKey); + const HeaderIcon = currentNavItem?.icon || Bot; + const desktopHeaderText = route.kind === 'bot' + ? [forcedBotName || routeMeta.title, forcedBotIdLabel].filter(Boolean).join(' / ') || routeMeta.title + : routeMeta.headerTrail; + + const renderRoutePage = () => { + switch (route.kind) { + case 'admin-dashboard': + return ; + case 'admin-bots': + return ; + case 'system-skills': + return ; + case 'system-templates': + return ; + case 'system-settings': + return ; + case 'system-images': + return ; + case 'bot': + return ( + + ); + default: + return ; + } + }; return (
-
- {!hideHeader ? ( -
{ - if (allowHeaderCollapse && headerCollapsed) setHeaderCollapsed(false); - }} - > -
+
+ {showNavRail ? ( + + ) : null} + +
+
+
- {showBotPanelDrawerEntry ? ( + {useCompactSimpleHeader ? ( ) : null} - {!showBotPanelDrawerEntry ? ( - Nanobot - ) : null}
-

{headerTitle}

- {!showBotPanelDrawerEntry && route.kind === 'dashboard-skills' ? ( - - ) : !showBotPanelDrawerEntry ? ( -
- {route.kind === 'dashboard' - ? (locale === 'zh' ? '平台总览' : 'Platform overview') - : route.kind === 'bot' - ? botHeaderSubtitle - : (locale === 'zh' ? 'Bot 首页' : 'Bot Home')} + {!useCompactSimpleHeader ? ( +
+ +

+ {desktopHeaderText} +

- ) : null} - {allowHeaderCollapse ? ( - - ) : null} + ) : ( +

{headerTitle}

+ )}
- {showBotPanelDrawerEntry ? ( + {useCompactSimpleHeader ? (
- ) : !headerCollapsed ? ( + ) : (
@@ -278,28 +338,72 @@ function AuthenticatedApp() {
- ) : null} + )}
- ) : null} -
- {route.kind === 'dashboard' ? ( - - ) : route.kind === 'dashboard-skills' ? ( - - ) : ( - - )} -
+
+ {renderRoutePage()} +
+
+ {showAppNavDrawerEntry && appNavDrawerOpen ? ( +
setAppNavDrawerOpen(false)}> + +
+ ) : null} + {showBotPanelDrawerEntry && botPanelDrawerOpen ? (
setBotPanelDrawerOpen(false)}>