Compare commits

...

38 Commits
dev ... main

Author SHA1 Message Date
mula.liu ecf223f945 v0.1.5 2026-04-28 16:25:46 +08:00
mula.liu 852e60435b v0.1.5 2026-04-26 15:38:14 +08:00
mula.liu 924f5452d2 v0.1.5 2026-04-26 15:27:02 +08:00
mula.liu b36d1b6bcb v0.1.5 2026-04-26 14:49:33 +08:00
mula.liu 54a880f376 v0.1.5 2026-04-25 13:55:24 +08:00
mula.liu aeaaa4fde5 v0.1.5 2026-04-24 16:57:29 +08:00
mula.liu ad2af1e71f v0.1.5 2026-04-24 11:07:52 +08:00
mula.liu 02a4000416 v0.1.4-p6 2026-04-17 13:53:47 +08:00
mula.liu 4c99826863 v0.1.4-p5 2026-04-14 10:41:09 +08:00
mula.liu fb461a7f5b v0.1.4-p5 2026-04-14 10:04:12 +08:00
mula.liu a6ec2368f4 v0.1.4-p5 2026-04-13 21:25:25 +08:00
mula.liu 3622117d85 fix git bugs. 2026-04-13 21:03:07 +08:00
mula.liu 9f98d3f68d fix git bugs. 2026-04-13 20:39:35 +08:00
mula.liu a774c398e8 Merge remote-tracking branch 'origin/main' 2026-04-13 20:10:24 +08:00
mula.liu e743ae7db5 fix git bugs. 2026-04-13 20:07:07 +08:00
AlanPaine 9331362ff4 修复数据问题 2026-04-13 19:55:43 +08:00
AlanPaine 725acd57c1 feat: add build.sh for dynamic docker image building and update deploy scripts 2026-04-13 19:35:41 +08:00
mula.liu f904d97a3d fix git bugs. 2026-04-13 19:28:36 +08:00
mula.liu 7971182478 两种模式的部署文件 2026-04-13 18:10:25 +08:00
mula.liu e8932bec17 完整的部署文件 2026-04-13 16:36:58 +08:00
mula.liu 4d7aa42a18 v0.1.4-p5 2026-04-13 15:54:26 +08:00
mula.liu 9ad37d0aa6 增加了全量部署脚本 2026-04-13 15:47:23 +08:00
mula.liu ae34bfc6a0 v0.1.4-p5 2026-04-05 00:29:37 +08:00
mula.liu ca1f941e4c v0.1.4-p5 2026-04-03 23:00:08 +08:00
mula.liu 95e3fd6c38 v0.1.4-p4 2026-04-02 22:11:28 +08:00
mula.liu 08b35d632b v0.1.4-p4 2026-04-02 20:42:56 +08:00
mula.liu 3ca7eff38b v0.1.4-p4 2026-04-02 20:27:06 +08:00
mula.liu 5058599de4 v0.1.4-p4 2026-04-02 13:00:15 +08:00
mula.liu 12ce92c6d9 v0.1.4-p4 2026-04-02 12:40:17 +08:00
mula.liu 9699b4e7c9 v0.1.4-p4 2026-04-02 12:14:08 +08:00
mula.liu 0167a9bc8a v0.1.4-p4 2026-04-01 17:20:17 +08:00
mula.liu ad59bc3794 v0.1.4-p4 2026-04-01 15:19:14 +08:00
mula.liu d196e57804 v0.1.4-p4 2026-03-31 14:56:31 +08:00
mula.liu c8d11ff654 v0.1.4-p4 2026-03-31 14:07:27 +08:00
mula.liu 1d20c8edb4 v0.1.4-p4 2026-03-31 14:04:34 +08:00
mula.liu 41212a7ac9 v0.1.4-p4 2026-03-31 12:31:47 +08:00
mula.liu d0e6171120 v0.1.4-p4 2026-03-28 10:17:11 +08:00
mula.liu 1ef72df0b1 v0..1.4-p4 2026-03-27 02:09:25 +08:00
290 changed files with 46887 additions and 25996 deletions

View File

@ -6,8 +6,15 @@ frontend/node_modules
frontend/dist
backend/venv
data
workspace
data/*
!data/templates/
!data/templates/**
!data/skills/
!data/skills/**
!data/model/
data/model/*
!data/model/README.md
/workspace
**/__pycache__
**/*.pyc

84
.env.full.example 100644
View File

@ -0,0 +1,84 @@
# Public exposed port (only nginx is exposed)
NGINX_PORT=8080
# Project data is always mounted from the repository root `./data`.
# Only workspace root still needs an absolute host path.
HOST_BOTS_WORKSPACE_ROOT=/opt/dashboard-nanobot/workspace/bots
# Fixed Docker bridge subnet for the compose network.
# Change this if it conflicts with your host LAN / VPN / intranet routing.
DOCKER_NETWORK_NAME=dashboard-nanobot-network
DOCKER_NETWORK_SUBNET=172.20.0.0/16
# Optional custom image tags
BACKEND_IMAGE_TAG=latest
FRONTEND_IMAGE_TAG=latest
# Optional base images / mirrors
PYTHON_BASE_IMAGE=python:3.12-slim
NODE_BASE_IMAGE=node:22-alpine
NGINX_BASE_IMAGE=nginx:alpine
POSTGRES_IMAGE=postgres:16-alpine
REDIS_IMAGE=redis:7-alpine
# Python package index mirror (recommended in CN)
PIP_INDEX_URL=https://pypi.tuna.tsinghua.edu.cn/simple
PIP_TRUSTED_HOST=pypi.tuna.tsinghua.edu.cn
# Frontend package registry mirror (used by yarn, recommended in CN)
NPM_REGISTRY=https://registry.npmmirror.com
# Container timezone
TZ=Asia/Shanghai
# PostgreSQL bootstrap account.
# These values are used by the postgres container itself.
POSTGRES_SUPERUSER=postgres
POSTGRES_SUPERPASSWORD=change_me_pg_super_password
POSTGRES_BOOTSTRAP_DB=postgres
# Dashboard application database account.
# deploy-full.sh will call scripts/init-full-db.sh to create/update these idempotently.
POSTGRES_APP_DB=nanobot
POSTGRES_APP_USER=nanobot
POSTGRES_APP_PASSWORD=change_me_nanobot_password
DATABASE_POOL_SIZE=20
DATABASE_MAX_OVERFLOW=40
DATABASE_POOL_TIMEOUT=30
DATABASE_POOL_RECYCLE=1800
# Redis cache (managed by docker-compose.full.yml)
REDIS_ENABLED=true
REDIS_DB=8
REDIS_PREFIX=nanobot
REDIS_DEFAULT_TTL=60
# Default timezone injected into newly created bot runtime env (`TZ`).
# If unset, backend falls back to `TZ` and then `Asia/Shanghai`.
DEFAULT_BOT_SYSTEM_TIMEZONE=Asia/Shanghai
# Panel access protection (deployment secret, not stored in sys_setting)
PANEL_ACCESS_PASSWORD=change_me_panel_password
# Browser credential requests must use an explicit CORS allowlist (deployment security setting).
# If frontend and backend are served under the same origin via nginx `/api` proxy,
# this can usually stay unset. Otherwise set the real dashboard origin(s).
# Example:
# CORS_ALLOWED_ORIGINS=https://dashboard.example.com
# Nginx upload entry limit (MB).
# The backend business limit is stored in `sys_setting.upload_max_mb`;
# for full deployment this value is also used as the initial DB seed.
UPLOAD_MAX_MB=200
# Local speech-to-text (Whisper via whisper.cpp model file)
STT_ENABLED=true
STT_MODEL=ggml-small-q8_0.bin
STT_MODEL_DIR=/app/data/model
STT_DEVICE=cpu
STT_MAX_AUDIO_SECONDS=20
STT_DEFAULT_LANGUAGE=zh
STT_FORCE_SIMPLIFIED=true
STT_AUDIO_PREPROCESS=true
STT_AUDIO_FILTER=highpass=f=120,lowpass=f=7600,afftdn=nf=-20
STT_INITIAL_PROMPT=以下内容可能包含简体中文和英文术语。请优先输出简体中文,英文单词、缩写、品牌名和数字保持原文,不要翻译。

View File

@ -1,10 +1,14 @@
# Public exposed port (only nginx is exposed)
NGINX_PORT=8080
NGINX_PORT=8082
# REQUIRED absolute host paths.
# They must exist and be writable by docker daemon.
HOST_DATA_ROOT=/opt/dashboard-nanobot/data
HOST_BOTS_WORKSPACE_ROOT=/opt/dashboard-nanobot/workspace/bots
# Project data is always mounted from the repository root `./data`.
# Only workspace root still needs an absolute host path.
HOST_BOTS_WORKSPACE_ROOT=/dep/dashboard-nanobot/workspace/bots
# Fixed Docker bridge subnet for the compose network.
# Change this if it conflicts with your host LAN / VPN / intranet routing.
DOCKER_NETWORK_NAME=dashboard-nanobot-network
DOCKER_NETWORK_SUBNET=172.20.0.0/16
# Optional custom image tags
BACKEND_IMAGE_TAG=latest
@ -23,43 +27,45 @@ PIP_TRUSTED_HOST=pypi.tuna.tsinghua.edu.cn
# Frontend package registry mirror (used by yarn, recommended in CN)
NPM_REGISTRY=https://registry.npmmirror.com
# Database (choose one: SQLite / PostgreSQL / MySQL)
# SQLite example:
# DATABASE_URL=sqlite:///${HOST_DATA_ROOT}/nanobot_dashboard.db
# PostgreSQL example:
# DATABASE_URL=postgresql+psycopg://user:password@127.0.0.1:5432/nanobot_dashboard
# MySQL example:
# DATABASE_URL=mysql+pymysql://user:password@127.0.0.1:3306/nanobot_dashboard
DATABASE_URL=postgresql+psycopg://postgres:change_me@127.0.0.1:5432/dashboard
DATABASE_URL=postgresql+psycopg://postgres:postgres@10.100.52.43:5433/nanobot
DATABASE_POOL_SIZE=20
DATABASE_MAX_OVERFLOW=40
DATABASE_POOL_TIMEOUT=30
DATABASE_POOL_RECYCLE=1800
# Redis cache (optional)
# REDIS_URL must be reachable from the backend container.
# In docker-compose.prod.yml, 127.0.0.1 points to the backend container itself, not the host machine.
REDIS_ENABLED=true
REDIS_URL=redis://127.0.0.1:6379/8
REDIS_URL=redis://10.100.52.43:6380/8
REDIS_PASSWORD=Unis@123
REDIS_PREFIX=nanobot
REDIS_DEFAULT_TTL=60
# Chat history page size for upward lazy loading (per request)
CHAT_PULL_PAGE_SIZE=60
COMMAND_AUTO_UNLOCK_SECONDS=10
# Default timezone injected into newly created bot runtime env (`TZ`).
# If unset, backend falls back to `TZ` and then `Asia/Shanghai`.
DEFAULT_BOT_SYSTEM_TIMEZONE=Asia/Shanghai
# Panel access protection
# Panel access protection (deployment secret, not stored in sys_setting)
PANEL_ACCESS_PASSWORD=change_me_panel_password
WORKSPACE_PREVIEW_SIGNING_SECRET=change_me_workspace_preview_signing_secret
WORKSPACE_PREVIEW_TOKEN_TTL_SECONDS=3600
# Max upload size for backend validation (MB)
# Browser credential requests must use an explicit CORS allowlist (deployment security setting).
# If frontend and backend are served under the same origin via nginx `/api` proxy,
# this can usually stay unset. Otherwise set the real dashboard origin(s).
# Example:
# CORS_ALLOWED_ORIGINS=https://dashboard.example.com
# Nginx upload entry limit (MB).
# The backend business limit is stored in `sys_setting.upload_max_mb`;
# if you change the DB value later, remember to sync this nginx limit too.
UPLOAD_MAX_MB=200
# Workspace files that should use direct download behavior in dashboard
WORKSPACE_DOWNLOAD_EXTENSIONS=.pdf,.doc,.docx,.xls,.xlsx,.xlsm,.ppt,.pptx,.odt,.ods,.odp,.wps,.stl,.scad,.zip,.rar
# Local speech-to-text (Whisper via whisper.cpp model file)
STT_ENABLED=true
STT_MODEL=ggml-small-q8_0.bin
STT_MODEL_DIR=${HOST_DATA_ROOT}/model
STT_MODEL_DIR=/app/data/model
STT_DEVICE=cpu
STT_MAX_AUDIO_SECONDS=20
STT_DEFAULT_LANGUAGE=zh

15
.gitignore vendored
View File

@ -30,9 +30,16 @@ backend/__pycache__/
backend/*.log
# Project runtime data (generated locally)
data/
workspace/
engines/
data/*
!data/templates/
!data/templates/**
!data/skills/
!data/skills/**
!data/model/
data/model/*
!data/model/README.md
/workspace/
/engines/
# Frontend (Vite/Node)
frontend/node_modules/
@ -49,6 +56,7 @@ frontend/coverage/
.env
.env.*
!.env.example
!.env.full.example
!.env.prod.example
backend/.env
frontend/.env
@ -60,3 +68,4 @@ npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
bot-images/nanobot-base-*

View File

@ -13,7 +13,7 @@ Dashboard Nanobot 是面向 `nanobot` 的控制平面项目,提供镜像管理
- `USER.md`
- `TOOLS.md`
- `IDENTITY.md`
- 模板管理:系统级模板改为文件化配置(`backend/templates/agent_md_templates.json` 与 `backend/templates/topic_presets.json`)。
- 模板管理:系统级模板改为文件化配置(`data/templates/agent_md_templates.json` 与 `data/templates/topic_presets.json`)。
- 2D 运维 DashboardBot 列表、启停、命令发送、日志流、遥测。
- UI 全局支持Light/Dark 切换、中文/English 切换。
@ -23,7 +23,7 @@ Dashboard Nanobot 是面向 `nanobot` 的控制平面项目,提供镜像管理
graph TD
User((User)) --> Frontend[Frontend Control Plane]
Frontend --> API[FastAPI Backend]
API --> DB[(SQLite)]
API --> DB[(PostgreSQL)]
API --> Docker[Docker Daemon]
Docker --> BotA[Bot Container A]
@ -57,17 +57,25 @@ graph TD
- 架构设计:`design/architecture.md`
- 数据库设计:`design/database.md`
## 默认资源
- 项目根目录 `data/templates/` 保存默认模板资源,后端运行时直接读取这里的文件,不再在启动阶段做复制或兜底回填。
- 项目根目录 `data/skills/` 保存默认 skill 包,数据库初始化阶段会把这些默认 skill 注册到 `skill_market_item`
- `data/model/` 不包含语音识别模型文件;模型需要用户自行下载放入该目录或 `STT_MODEL_DIR` 指向的目录。
- 如果语音模型缺失,后端启动时会打印明确告警,但不会阻断服务启动。
## 环境变量配置
- 后端:
- 示例文件:`backend/.env.example`
- 本地配置:`backend/.env`
- 关键项:
- `DATABASE_URL`数据库连接串三选一SQLite / PostgreSQL / MySQL
- `DATABASE_URL`:数据库连接串(建议使用 PostgreSQL
- `DATABASE_ECHO`SQL 日志输出开关
- 不提供自动数据迁移(如需升级迁移请离线完成后再切换连接串)
- `DATA_ROOT`、`BOTS_WORKSPACE_ROOT`:运行数据与 Bot 工作目录
- `DEFAULT_*_MD`:可选覆盖值(一般留空,推荐走模板文件)
- `PANEL_ACCESS_PASSWORD`、`CORS_ALLOWED_ORIGINS`:仍属于部署层安全参数
- `DEFAULT_BOT_SYSTEM_TIMEZONE`:新建 Bot 默认注入的 `TZ`
- 前端:
- 示例文件:`frontend/.env.example`
- 本地配置:`frontend/.env`
@ -102,10 +110,11 @@ graph TD
1. 准备部署变量
- 复制 `.env.prod.example``.env.prod`(位于项目根目录)
- 配置绝对路径:
- `HOST_DATA_ROOT`
- `data/` 会自动映射到宿主机项目根目录下的 `./data`
- `deploy-prod.sh` 现在要求使用外部 PostgreSQL且目标库必须提前执行 `scripts/sql/create-tables.sql``scripts/sql/init-data.sql`
- 只需要配置绝对路径:
- `HOST_BOTS_WORKSPACE_ROOT`
- 如启用本地语音识别,请将 Whisper `.bin` 模型文件放到 `${HOST_DATA_ROOT}/model/`
- 如启用本地语音识别,请将 Whisper `.bin` 模型文件放到宿主机项目根目录的 `data/model/`
并让 `STT_MODEL` 指向完整文件名,例如 `ggml-small-q8_0.bin`
- 中国网络建议配置加速项:
- `PIP_INDEX_URL`、`PIP_TRUSTED_HOST`
@ -120,8 +129,66 @@ graph TD
### 关键说明
- `backend` 不开放宿主机端口,仅在内部网络被 Nginx 访问。
- 上传大小使用单一参数 `UPLOAD_MAX_MB` 控制(后端校验 + Nginx 限制)。
- `deploy-prod.sh` 仅负责前后端容器部署,不会初始化外部数据库;外部 PostgreSQL 需要事先建表并导入初始化数据。
- 如果启用 Redis`REDIS_URL` 必须从 `backend` 容器内部可达;在 `docker-compose.prod.yml` 里使用 `127.0.0.1` 只会指向后端容器自己,不是宿主机。
- Redis 不可达时,通用缓存健康检查会显示 `degraded`;面板登录认证会自动回退到数据库登录态,不再因为缓存不可达直接报错。
- `UPLOAD_MAX_MB` 仅用于 Nginx 入口限制;后端业务校验值来自 `sys_setting.upload_max_mb`
- 必须挂载 `/var/run/docker.sock`,否则后端无法操作 Bot 镜像与容器。
- `data/` 始终绑定到宿主机项目根目录下的 `./data`,其中模板、默认 skills、语音模型和运行数据都落在这里。
- `HOST_BOTS_WORKSPACE_ROOT` 必须是宿主机绝对路径,并且在 `docker-compose.prod.yml` 中以“同路径”挂载到后端容器。
原因:后端通过 Docker API 创建 Bot 容器时,使用的是宿主机可见的 bind 路径。
- 语音识别当前基于 `pywhispercpp==1.3.1` + Whisper `.bin` 模型文件,不使用 `faster-whisper`
## Docker 完整部署(内置 PostgreSQL / Redis
这套方案和 `deploy-prod.sh` 并存适合目标机器上直接把前端、后端、PostgreSQL、Redis 一起拉起。
### 文件
- `docker-compose.full.yml`
- `.env.full.example`
- `scripts/deploy-full.sh`
- `scripts/init-full-db.sh`
- `scripts/stop-full.sh`
- `scripts/sql/create-tables.sql`
- `scripts/sql/init-data.sql`
- `scripts/sql/init-postgres-bootstrap.sql`
- `scripts/sql/init-postgres-app.sql`
### 启动步骤
1. 准备部署变量
- 复制 `.env.full.example``.env.full`
- `data/` 会自动映射到宿主机项目根目录下的 `./data`
- 必填修改:
- `HOST_BOTS_WORKSPACE_ROOT`
- `POSTGRES_SUPERPASSWORD`
- `POSTGRES_APP_PASSWORD`
- `PANEL_ACCESS_PASSWORD`
- 如启用本地语音识别,请将 Whisper `.bin` 模型文件放到宿主机项目根目录的 `data/model/`
2. 启动完整栈
- `./scripts/deploy-full.sh`
3. 访问
- `http://<host>:${NGINX_PORT}`(默认 `8080`
### 初始化说明
- `scripts/deploy-full.sh` 会先启动 `postgres` / `redis`,然后自动调用 `scripts/init-full-db.sh`
- `scripts/init-full-db.sh` 负责:
- 等待 PostgreSQL 就绪
- 创建或更新业务账号
- 创建业务库并授权
- 修正 `public` schema 权限
- 执行 `scripts/sql/create-tables.sql` 创建业务表
- 执行 `scripts/sql/init-data.sql` 初始化 `sys_setting` 与默认 skill 市场数据
- 后端启动时只做初始化完整性校验,不再自动补表、补列、补数据或迁移旧结构;缺库表、缺 `sys_setting`、缺模板文件都会直接报错。
### 停止
- `./scripts/stop-full.sh`
### 注意事项
- `deploy-prod.sh``deploy-full.sh` 使用的是两套 compose 文件,但复用了相同容器名,不能同时在同一台机器上并行启动。
- PostgreSQL 数据默认落盘到宿主机项目根目录 `./data/postgres`Redis 数据默认落盘到 `./data/redis`
- 如果你只想保留前后端容器,继续使用 `deploy-prod.sh`;如果希望把依赖也打包进来,使用 `deploy-full.sh`

View File

@ -1,15 +1,14 @@
# Runtime paths
DATA_ROOT=../data
BOTS_WORKSPACE_ROOT=../workspace/bots
# Optional: when backend itself runs inside docker-compose and bot containers
# should join that same user-defined network, set the network name here.
# Leave empty for local development to use Docker's default bridge network.
DOCKER_NETWORK_NAME=
# Database
# SQLite (recommended): leave DATABASE_URL unset, backend will use:
# sqlite:///{DATA_ROOT}/nanobot_dashboard.db
# DATABASE_URL=sqlite:///../data/nanobot_dashboard.db
# PostgreSQL example:
# DATABASE_URL=postgresql+psycopg://user:password@127.0.0.1:5432/nanobot_dashboard
# MySQL example:
# DATABASE_URL=mysql+pymysql://user:password@127.0.0.1:3306/nanobot_dashboard
# PostgreSQL is required:
DATABASE_URL=postgresql+psycopg://user:password@127.0.0.1:5432/nanobot_dashboard
# Show SQL statements in backend logs (debug only).
DATABASE_ECHO=true
DATABASE_POOL_SIZE=20
@ -27,19 +26,24 @@ REDIS_DEFAULT_TTL=60
# Optional panel-level access password for all backend API/WS calls.
PANEL_ACCESS_PASSWORD=
# The following platform-level items are now managed in sys_setting / 平台参数:
WORKSPACE_PREVIEW_SIGNING_SECRET=
WORKSPACE_PREVIEW_TOKEN_TTL_SECONDS=3600
# Explicit CORS allowlist for browser credential requests.
# For local development, the backend defaults to common Vite dev origins.
# In production, prefer same-origin `/api` reverse proxy, or set your real dashboard origin explicitly.
# Example:
# CORS_ALLOWED_ORIGINS=http://localhost:5173,https://dashboard.example.com
# Default timezone injected into newly created bot runtime env (`TZ`).
DEFAULT_BOT_SYSTEM_TIMEZONE=Asia/Shanghai
# The following platform-level items are initialized by SQL and managed in sys_setting / 平台参数:
# - page_size
# - chat_pull_page_size
# - upload_max_mb
# - allowed_attachment_extensions
# - workspace_download_extensions
# - speech_enabled
# - speech_max_audio_seconds
# - speech_default_language
# - speech_force_simplified
# - speech_audio_preprocess
# - speech_audio_filter
# - speech_initial_prompt
# Local speech-to-text (Whisper via whisper.cpp model file)
STT_MODEL=ggml-small-q8_0.bin
@ -50,10 +54,3 @@ STT_DEVICE=cpu
APP_HOST=0.0.0.0
APP_PORT=8000
APP_RELOAD=true
# Optional overrides (fallback only; usually keep empty when using template files)
DEFAULT_AGENTS_MD=
DEFAULT_SOUL_MD=
DEFAULT_USER_MD=
DEFAULT_TOOLS_MD=
DEFAULT_IDENTITY_MD=

View File

@ -20,6 +20,7 @@ RUN if [ -n "${PIP_INDEX_URL}" ]; then pip config set global.index-url "${PIP_IN
&& pip install -r requirements.txt
COPY backend/ /app/backend/
COPY data/ /app/data/
EXPOSE 8000

View File

@ -0,0 +1,102 @@
from fastapi import APIRouter, Depends, HTTPException
from sqlmodel import Session
from core.database import get_session
from models.bot import BotInstance
from schemas.bot import (
BotEnvParamsUpdateRequest,
BotMcpConfigUpdateRequest,
BotToolsConfigUpdateRequest,
ChannelConfigRequest,
ChannelConfigUpdateRequest,
)
from services.bot_config_service import (
create_bot_channel_config,
delete_bot_channel_config,
get_bot_env_params_state,
get_bot_mcp_config_state,
get_bot_resources_snapshot,
get_bot_tools_config_state,
list_bot_channels_config,
reject_bot_tools_config_update,
update_bot_channel_config,
update_bot_env_params_state,
update_bot_mcp_config_state,
)
router = APIRouter()
@router.get("/api/bots/{bot_id}/resources")
def get_bot_resources(bot_id: str, session: Session = Depends(get_session)):
return get_bot_resources_snapshot(session, bot_id=bot_id)
@router.get("/api/bots/{bot_id}/channels")
def list_bot_channels(bot_id: str, session: Session = Depends(get_session)):
return list_bot_channels_config(session, bot_id=bot_id)
@router.get("/api/bots/{bot_id}/tools-config")
def get_bot_tools_config(bot_id: str, session: Session = Depends(get_session)):
return get_bot_tools_config_state(session, bot_id=bot_id)
@router.put("/api/bots/{bot_id}/tools-config")
def update_bot_tools_config(
bot_id: str,
payload: BotToolsConfigUpdateRequest,
session: Session = Depends(get_session),
):
return reject_bot_tools_config_update(session, bot_id=bot_id, payload=payload)
@router.get("/api/bots/{bot_id}/mcp-config")
def get_bot_mcp_config(bot_id: str, session: Session = Depends(get_session)):
return get_bot_mcp_config_state(session, bot_id=bot_id)
@router.put("/api/bots/{bot_id}/mcp-config")
def update_bot_mcp_config(
bot_id: str,
payload: BotMcpConfigUpdateRequest,
session: Session = Depends(get_session),
):
return update_bot_mcp_config_state(session, bot_id=bot_id, payload=payload)
@router.get("/api/bots/{bot_id}/env-params")
def get_bot_env_params(bot_id: str, session: Session = Depends(get_session)):
return get_bot_env_params_state(session, bot_id=bot_id)
@router.put("/api/bots/{bot_id}/env-params")
def update_bot_env_params(
bot_id: str,
payload: BotEnvParamsUpdateRequest,
session: Session = Depends(get_session),
):
return update_bot_env_params_state(session, bot_id=bot_id, payload=payload)
@router.post("/api/bots/{bot_id}/channels")
def create_bot_channel(
bot_id: str,
payload: ChannelConfigRequest,
session: Session = Depends(get_session),
):
return create_bot_channel_config(session, bot_id=bot_id, payload=payload)
@router.put("/api/bots/{bot_id}/channels/{channel_id}")
def update_bot_channel(
bot_id: str,
channel_id: str,
payload: ChannelConfigUpdateRequest,
session: Session = Depends(get_session),
):
return update_bot_channel_config(session, bot_id=bot_id, channel_id=channel_id, payload=payload)
@router.delete("/api/bots/{bot_id}/channels/{channel_id}")
def delete_bot_channel(bot_id: str, channel_id: str, session: Session = Depends(get_session)):
return delete_bot_channel_config(session, bot_id=bot_id, channel_id=channel_id)

View File

@ -0,0 +1,68 @@
from fastapi import APIRouter, Depends, HTTPException
from sqlmodel import Session
from core.database import get_session
from services.bot_lifecycle_service import (
deactivate_bot_instance,
delete_bot_instance,
disable_bot_instance,
enable_bot_instance,
start_bot_instance,
stop_bot_instance,
)
router = APIRouter()
@router.post("/api/bots/{bot_id}/start")
async def start_bot(bot_id: str, session: Session = Depends(get_session)):
try:
return await start_bot_instance(session, bot_id)
except ValueError as exc:
raise HTTPException(status_code=404, detail=str(exc)) from exc
except PermissionError as exc:
raise HTTPException(status_code=403, detail=str(exc)) from exc
except RuntimeError as exc:
raise HTTPException(status_code=500, detail=str(exc)) from exc
@router.post("/api/bots/{bot_id}/stop")
def stop_bot(bot_id: str, session: Session = Depends(get_session)):
try:
return stop_bot_instance(session, bot_id)
except ValueError as exc:
raise HTTPException(status_code=404, detail=str(exc)) from exc
except PermissionError as exc:
raise HTTPException(status_code=403, detail=str(exc)) from exc
@router.post("/api/bots/{bot_id}/enable")
def enable_bot(bot_id: str, session: Session = Depends(get_session)):
try:
return enable_bot_instance(session, bot_id)
except ValueError as exc:
raise HTTPException(status_code=404, detail=str(exc)) from exc
@router.post("/api/bots/{bot_id}/disable")
def disable_bot(bot_id: str, session: Session = Depends(get_session)):
try:
return disable_bot_instance(session, bot_id)
except ValueError as exc:
raise HTTPException(status_code=404, detail=str(exc)) from exc
@router.post("/api/bots/{bot_id}/deactivate")
def deactivate_bot(bot_id: str, session: Session = Depends(get_session)):
try:
return deactivate_bot_instance(session, bot_id)
except ValueError as exc:
raise HTTPException(status_code=404, detail=str(exc)) from exc
@router.delete("/api/bots/{bot_id}")
def delete_bot(bot_id: str, delete_workspace: bool = True, session: Session = Depends(get_session)):
try:
return delete_bot_instance(session, bot_id, delete_workspace=delete_workspace)
except ValueError as exc:
raise HTTPException(status_code=404, detail=str(exc)) from exc

View File

@ -0,0 +1,103 @@
from fastapi import APIRouter, Depends, HTTPException, Request, Response
from sqlmodel import Session
from core.database import get_session
from models.bot import BotInstance
from schemas.bot import BotCreateRequest, BotPageAuthLoginRequest, BotUpdateRequest
from services.platform_auth_service import (
clear_bot_token_cookie,
create_bot_token,
resolve_bot_request_auth,
revoke_bot_token,
set_bot_token_cookie,
)
from services.bot_management_service import (
authenticate_bot_page_access,
create_bot_record,
get_bot_detail_cached,
list_bots_with_cache,
update_bot_record,
)
from services.image_service import list_registered_images
from services.provider_service import test_provider_connection
router = APIRouter()
@router.post("/api/providers/test")
async def test_provider(payload: dict):
return await test_provider_connection(payload)
@router.post("/api/bots/{bot_id}/providers/test")
async def test_bot_provider(bot_id: str, payload: dict, session: Session = Depends(get_session)):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return await test_provider_connection(payload)
@router.get("/api/bots/{bot_id}/images")
def list_bot_images(bot_id: str, session: Session = Depends(get_session)):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return list_registered_images(session)
@router.post("/api/bots")
def create_bot(payload: BotCreateRequest, session: Session = Depends(get_session)):
return create_bot_record(session, payload=payload)
@router.get("/api/bots")
def list_bots(session: Session = Depends(get_session)):
return list_bots_with_cache(session)
@router.get("/api/bots/{bot_id}")
def get_bot_detail(bot_id: str, session: Session = Depends(get_session)):
return get_bot_detail_cached(session, bot_id=bot_id)
@router.post("/api/bots/{bot_id}/auth/login")
def login_bot_page(
bot_id: str,
payload: BotPageAuthLoginRequest,
request: Request,
response: Response,
session: Session = Depends(get_session),
):
result = authenticate_bot_page_access(session, bot_id=bot_id, password=payload.password)
try:
raw_token = create_bot_token(session, request, bot_id)
except RuntimeError as exc:
raise HTTPException(status_code=503, detail=str(exc)) from exc
set_bot_token_cookie(response, request, bot_id, raw_token, session)
return result
@router.get("/api/bots/{bot_id}/auth/status")
def get_bot_auth_status(bot_id: str, request: Request, session: Session = Depends(get_session)):
bot = session.get(BotInstance, bot_id)
if not bot:
return {"enabled": False, "authenticated": False, "auth_source": None, "bot_id": bot_id}
principal = resolve_bot_request_auth(session, request, bot_id)
return {
"enabled": bool(str(bot.access_password or "").strip()),
"authenticated": bool(principal.authenticated),
"auth_source": principal.auth_source if principal.authenticated else None,
"bot_id": bot_id,
}
@router.post("/api/bots/{bot_id}/auth/logout")
def logout_bot_page(bot_id: str, request: Request, response: Response, session: Session = Depends(get_session)):
revoke_bot_token(session, request, bot_id)
clear_bot_token_cookie(response, bot_id)
return {"success": True, "bot_id": bot_id}
@router.put("/api/bots/{bot_id}")
def update_bot(bot_id: str, payload: BotUpdateRequest, session: Session = Depends(get_session)):
return update_bot_record(session, bot_id=bot_id, payload=payload)

View File

@ -0,0 +1,138 @@
import logging
from typing import Optional
from fastapi import APIRouter, Depends, HTTPException, WebSocket, WebSocketDisconnect
from sqlmodel import Session
from core.database import engine, get_session
from core.docker_instance import docker_manager
from core.websocket_manager import manager
from services.bot_runtime_service import (
delete_cron_job as delete_cron_job_service,
ensure_monitor_websocket_access,
get_bot_logs as get_bot_logs_service,
list_cron_jobs as list_cron_jobs_service,
relogin_weixin as relogin_weixin_service,
start_cron_job as start_cron_job_service,
stop_cron_job as stop_cron_job_service,
)
from services.runtime_service import docker_callback
router = APIRouter()
logger = logging.getLogger("dashboard.backend")
@router.get("/api/bots/{bot_id}/logs")
def get_bot_logs(
bot_id: str,
tail: Optional[int] = 300,
offset: int = 0,
limit: Optional[int] = None,
reverse: bool = False,
session: Session = Depends(get_session),
):
try:
return get_bot_logs_service(
session,
bot_id=bot_id,
tail=tail,
offset=offset,
limit=limit,
reverse=reverse,
)
except LookupError as exc:
raise HTTPException(status_code=404, detail=str(exc)) from exc
@router.post("/api/bots/{bot_id}/weixin/relogin")
async def relogin_weixin(bot_id: str, session: Session = Depends(get_session)):
try:
return await relogin_weixin_service(session, bot_id=bot_id)
except LookupError as exc:
raise HTTPException(status_code=404, detail=str(exc)) from exc
except ValueError as exc:
raise HTTPException(status_code=404, detail=str(exc)) from exc
except RuntimeError as exc:
raise HTTPException(status_code=500, detail=str(exc)) from exc
@router.get("/api/bots/{bot_id}/cron/jobs")
def list_cron_jobs(bot_id: str, include_disabled: bool = True, session: Session = Depends(get_session)):
try:
return list_cron_jobs_service(session, bot_id=bot_id, include_disabled=include_disabled)
except LookupError as exc:
raise HTTPException(status_code=404, detail=str(exc)) from exc
@router.post("/api/bots/{bot_id}/cron/jobs/{job_id}/stop")
def stop_cron_job(bot_id: str, job_id: str, session: Session = Depends(get_session)):
try:
return stop_cron_job_service(session, bot_id=bot_id, job_id=job_id)
except LookupError as exc:
raise HTTPException(status_code=404, detail=str(exc)) from exc
@router.post("/api/bots/{bot_id}/cron/jobs/{job_id}/start")
def start_cron_job(bot_id: str, job_id: str, session: Session = Depends(get_session)):
try:
return start_cron_job_service(session, bot_id=bot_id, job_id=job_id)
except LookupError as exc:
raise HTTPException(status_code=404, detail=str(exc)) from exc
@router.delete("/api/bots/{bot_id}/cron/jobs/{job_id}")
def delete_cron_job(bot_id: str, job_id: str, session: Session = Depends(get_session)):
try:
return delete_cron_job_service(session, bot_id=bot_id, job_id=job_id)
except LookupError as exc:
raise HTTPException(status_code=404, detail=str(exc)) from exc
@router.websocket("/ws/monitor/{bot_id}")
async def websocket_endpoint(websocket: WebSocket, bot_id: str):
with Session(engine) as session:
try:
ensure_monitor_websocket_access(session, websocket, bot_id)
except PermissionError:
await websocket.close(code=4401, reason="Bot or panel authentication required")
return
except LookupError:
await websocket.close(code=4404, reason="Bot not found")
return
connected = False
try:
await manager.connect(bot_id, websocket)
connected = True
except Exception as exc:
logger.warning("websocket connect failed bot_id=%s detail=%s", bot_id, exc)
try:
await websocket.close(code=1011, reason="WebSocket accept failed")
except Exception:
pass
return
docker_manager.ensure_monitor(bot_id, docker_callback)
try:
while True:
await websocket.receive_text()
with Session(engine) as session:
try:
ensure_monitor_websocket_access(session, websocket, bot_id)
except PermissionError:
await websocket.close(code=4401, reason="Authentication expired")
return
except LookupError:
await websocket.close(code=4404, reason="Bot not found")
return
except WebSocketDisconnect:
pass
except RuntimeError as exc:
msg = str(exc or "").lower()
if "need to call \"accept\" first" not in msg and "not connected" not in msg:
logger.exception("websocket runtime error bot_id=%s", bot_id)
except Exception:
logger.exception("websocket unexpected error bot_id=%s", bot_id)
finally:
if connected:
manager.disconnect(bot_id, websocket)

View File

@ -0,0 +1,33 @@
import logging
from typing import Optional
from fastapi import APIRouter, Depends, File, Form, HTTPException, Request, UploadFile
from sqlmodel import Session
from core.database import get_session
from core.speech_service import WhisperSpeechService
from services.speech_transcribe_service import transcribe_bot_speech_upload
router = APIRouter()
logger = logging.getLogger("dashboard.backend")
@router.post("/api/bots/{bot_id}/speech/transcribe")
async def transcribe_bot_speech(
bot_id: str,
request: Request,
file: UploadFile = File(...),
language: Optional[str] = Form(None),
session: Session = Depends(get_session),
):
speech_service = getattr(request.app.state, "speech_service", None)
if not isinstance(speech_service, WhisperSpeechService):
raise HTTPException(status_code=500, detail="Speech service is not initialized")
return await transcribe_bot_speech_upload(
session,
bot_id,
upload=file,
language=language,
speech_service=speech_service,
logger=logger,
)

View File

@ -0,0 +1,79 @@
from typing import Optional
from fastapi import APIRouter, Depends
from sqlmodel import Session
from core.database import get_session
from schemas.bot import MessageFeedbackRequest
from services.chat_history_service import (
clear_bot_messages_payload,
clear_dashboard_direct_session_payload,
delete_bot_message_payload,
list_bot_messages_by_date_payload,
list_bot_messages_page_payload,
list_bot_messages_payload,
update_bot_message_feedback_payload,
)
router = APIRouter()
@router.get("/api/bots/{bot_id}/messages")
def list_bot_messages(bot_id: str, limit: int = 200, session: Session = Depends(get_session)):
return list_bot_messages_payload(session, bot_id, limit=limit)
@router.get("/api/bots/{bot_id}/messages/page")
def list_bot_messages_page(
bot_id: str,
limit: Optional[int] = None,
before_id: Optional[int] = None,
session: Session = Depends(get_session),
):
return list_bot_messages_page_payload(session, bot_id, limit=limit, before_id=before_id)
@router.get("/api/bots/{bot_id}/messages/by-date")
def list_bot_messages_by_date(
bot_id: str,
date: str,
tz_offset_minutes: Optional[int] = None,
limit: Optional[int] = None,
session: Session = Depends(get_session),
):
return list_bot_messages_by_date_payload(
session,
bot_id,
date=date,
tz_offset_minutes=tz_offset_minutes,
limit=limit,
)
@router.put("/api/bots/{bot_id}/messages/{message_id}/feedback")
def update_bot_message_feedback(
bot_id: str,
message_id: int,
payload: MessageFeedbackRequest,
session: Session = Depends(get_session),
):
return update_bot_message_feedback_payload(session, bot_id, message_id, payload.feedback)
@router.delete("/api/bots/{bot_id}/messages/{message_id}")
def delete_bot_message(
bot_id: str,
message_id: int,
session: Session = Depends(get_session),
):
return delete_bot_message_payload(session, bot_id, message_id)
@router.delete("/api/bots/{bot_id}/messages")
def clear_bot_messages(bot_id: str, session: Session = Depends(get_session)):
return clear_bot_messages_payload(session, bot_id)
@router.post("/api/bots/{bot_id}/sessions/dashboard-direct/clear")
def clear_bot_dashboard_direct_session(bot_id: str, session: Session = Depends(get_session)):
return clear_dashboard_direct_session_payload(session, bot_id)

View File

@ -0,0 +1,29 @@
from typing import Any, Dict, Tuple
from fastapi import APIRouter, Body, Depends
from sqlmodel import Session
from core.database import get_session
from services.chat_command_service import send_bot_command
router = APIRouter()
def _parse_command_payload(payload: Dict[str, Any] | None) -> Tuple[str, Any]:
body = payload if isinstance(payload, dict) else {}
return str(body.get("command") or ""), body.get("attachments")
@router.post("/api/bots/{bot_id}/command")
def send_command(
bot_id: str,
payload: Dict[str, Any] | None = Body(default=None),
session: Session = Depends(get_session),
):
command, attachments = _parse_command_payload(payload)
return send_bot_command(
session,
bot_id,
command=command,
attachments=attachments,
)

View File

@ -0,0 +1,46 @@
from fastapi import APIRouter, HTTPException
from sqlmodel import Session, select
from core.cache import auth_cache, cache
from core.database import engine
from core.settings import DATABASE_ENGINE, REDIS_ENABLED, REDIS_PREFIX, REDIS_URL
from models.bot import BotInstance
router = APIRouter()
@router.get("/api/health")
def get_health():
try:
with Session(engine) as session:
session.exec(select(BotInstance).limit(1)).first()
return {"status": "ok", "database": DATABASE_ENGINE}
except Exception as exc:
raise HTTPException(status_code=503, detail=f"database check failed: {exc}") from exc
@router.get("/api/health/cache")
def get_cache_health():
redis_url = str(REDIS_URL or "").strip()
configured = bool(REDIS_ENABLED and redis_url)
client_enabled = bool(getattr(cache, "enabled", False))
reachable = bool(cache.ping()) if client_enabled else False
status = "ok"
if configured and not reachable:
status = "degraded"
return {
"status": status,
"cache": {
"configured": configured,
"enabled": client_enabled,
"reachable": reachable,
"prefix": REDIS_PREFIX,
"status": str(getattr(cache, "status", "") or ""),
"detail": str(getattr(cache, "status_detail", "") or ""),
},
"auth_store": {
"enabled": bool(getattr(auth_cache, "enabled", False)),
"status": str(getattr(auth_cache, "status", "") or ""),
"detail": str(getattr(auth_cache, "status_detail", "") or ""),
},
}

View File

@ -0,0 +1,31 @@
from typing import Dict
from fastapi import APIRouter, Depends
from sqlmodel import Session
from core.database import get_session
from services.image_service import (
delete_registered_image,
list_docker_images_by_repository,
list_registered_images,
register_image as register_image_record,
)
router = APIRouter()
@router.get("/api/images")
def list_images(session: Session = Depends(get_session)):
return list_registered_images(session)
@router.delete("/api/images/{tag:path}")
def delete_image(tag: str, session: Session = Depends(get_session)):
return delete_registered_image(session, tag=tag)
@router.get("/api/docker-images")
def list_docker_images(repository: str = "nanobot-base"):
return list_docker_images_by_repository(repository)
@router.post("/api/images/register")
def register_image(payload: dict, session: Session = Depends(get_session)):
return register_image_record(session, payload)

View File

@ -0,0 +1,55 @@
from fastapi import APIRouter, Depends, HTTPException, Request, Response
from sqlmodel import Session
from core.database import get_session
from core.settings import PANEL_ACCESS_PASSWORD
from schemas.system import PanelLoginRequest
from services.platform_auth_service import (
clear_panel_token_cookie,
create_panel_token,
resolve_panel_request_auth,
revoke_panel_token,
set_panel_token_cookie,
)
router = APIRouter()
@router.get("/api/panel/auth/status")
def get_panel_auth_status(request: Request, session: Session = Depends(get_session)):
configured = str(PANEL_ACCESS_PASSWORD or "").strip()
principal = resolve_panel_request_auth(session, request)
return {
"enabled": bool(configured),
"authenticated": bool(principal.authenticated),
"auth_source": principal.auth_source if principal.authenticated else None,
}
@router.post("/api/panel/auth/login")
def panel_login(
payload: PanelLoginRequest,
request: Request,
response: Response,
session: Session = Depends(get_session),
):
configured = str(PANEL_ACCESS_PASSWORD or "").strip()
if not configured:
clear_panel_token_cookie(response)
return {"success": True, "enabled": False}
supplied = str(payload.password or "").strip()
if supplied != configured:
raise HTTPException(status_code=401, detail="Invalid panel access password")
try:
raw_token = create_panel_token(session, request)
except RuntimeError as exc:
raise HTTPException(status_code=503, detail=str(exc)) from exc
set_panel_token_cookie(response, request, raw_token, session)
return {"success": True, "enabled": True, "authenticated": True}
@router.post("/api/panel/auth/logout")
def panel_logout(request: Request, response: Response, session: Session = Depends(get_session)):
revoke_panel_token(session, request)
clear_panel_token_cookie(response)
return {"success": True}

View File

@ -3,30 +3,24 @@ from typing import Optional
from fastapi import APIRouter, Depends, HTTPException, Request
from sqlmodel import Session
from bootstrap.app_runtime import reload_platform_runtime
from core.cache import cache
from core.database import get_session
from schemas.platform import PlatformSettingsPayload, SystemSettingPayload
from services.platform_service import (
build_platform_overview,
from services.platform_activity_service import get_bot_activity_stats, list_activity_events
from services.platform_login_log_service import list_login_logs
from services.platform_overview_service import build_platform_overview
from services.platform_settings_service import get_platform_settings, save_platform_settings
from services.platform_system_settings_service import (
create_or_update_system_setting,
delete_system_setting,
get_platform_settings,
list_system_settings,
list_activity_events,
list_usage,
save_platform_settings,
)
from services.platform_usage_service import list_usage
router = APIRouter()
def _apply_platform_runtime_changes(request: Request) -> None:
cache.delete_prefix("")
speech_service = getattr(request.app.state, "speech_service", None)
if speech_service is not None and hasattr(speech_service, "reset_runtime"):
speech_service.reset_runtime()
@router.get("/api/platform/overview")
def get_platform_overview(request: Request, session: Session = Depends(get_session)):
docker_manager = getattr(request.app.state, "docker_manager", None)
@ -41,7 +35,7 @@ def get_platform_settings_api(session: Session = Depends(get_session)):
@router.put("/api/platform/settings")
def update_platform_settings_api(payload: PlatformSettingsPayload, request: Request, session: Session = Depends(get_session)):
result = save_platform_settings(session, payload).model_dump()
_apply_platform_runtime_changes(request)
reload_platform_runtime(request.app)
return result
@ -52,8 +46,8 @@ def clear_platform_cache():
@router.post("/api/platform/reload")
def reload_platform_runtime(request: Request):
_apply_platform_runtime_changes(request)
def reload_platform_runtime_api(request: Request):
reload_platform_runtime(request.app)
return {"status": "reloaded"}
@ -67,11 +61,35 @@ def get_platform_usage(
return list_usage(session, bot_id=bot_id, limit=limit, offset=offset)
@router.get("/api/platform/activity-stats")
def get_platform_activity_stats(session: Session = Depends(get_session)):
return {"items": get_bot_activity_stats(session)}
@router.get("/api/platform/events")
def get_platform_events(bot_id: Optional[str] = None, limit: int = 100, session: Session = Depends(get_session)):
return {"items": list_activity_events(session, bot_id=bot_id, limit=limit)}
@router.get("/api/platform/login-logs")
def get_platform_login_logs(
search: str = "",
auth_type: str = "",
status: str = "all",
limit: int = 50,
offset: int = 0,
session: Session = Depends(get_session),
):
return list_login_logs(
session,
search=search,
auth_type=auth_type,
status=status,
limit=limit,
offset=offset,
).model_dump()
@router.get("/api/platform/system-settings")
def get_system_settings(search: str = "", session: Session = Depends(get_session)):
return {"items": list_system_settings(session, search=search)}
@ -81,7 +99,7 @@ def get_system_settings(search: str = "", session: Session = Depends(get_session
def create_system_setting(payload: SystemSettingPayload, request: Request, session: Session = Depends(get_session)):
try:
result = create_or_update_system_setting(session, payload)
_apply_platform_runtime_changes(request)
reload_platform_runtime(request.app)
return result
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
@ -91,7 +109,7 @@ def create_system_setting(payload: SystemSettingPayload, request: Request, sessi
def update_system_setting(key: str, payload: SystemSettingPayload, request: Request, session: Session = Depends(get_session)):
try:
result = create_or_update_system_setting(session, payload.model_copy(update={"key": key}))
_apply_platform_runtime_changes(request)
reload_platform_runtime(request.app)
return result
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
@ -101,7 +119,7 @@ def update_system_setting(key: str, payload: SystemSettingPayload, request: Requ
def remove_system_setting(key: str, request: Request, session: Session = Depends(get_session)):
try:
delete_system_setting(session, key)
_apply_platform_runtime_changes(request)
reload_platform_runtime(request.app)
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
return {"status": "deleted", "key": key}

View File

@ -0,0 +1,100 @@
from typing import Optional
from fastapi import APIRouter, Depends, File, Form, HTTPException, UploadFile
from sqlmodel import Session
from core.database import get_session
from models.bot import BotInstance
from services.skill_market_service import (
create_skill_market_item_record,
delete_skill_market_item_record,
install_skill_market_item_for_bot,
list_bot_skill_market_items,
list_skill_market_items,
update_skill_market_item_record,
)
from services.skill_service import (
delete_workspace_skill_entry,
list_bot_skills as list_workspace_bot_skills,
upload_bot_skill_zip_to_workspace,
)
router = APIRouter()
@router.get("/api/platform/skills")
def list_skill_market(session: Session = Depends(get_session)):
return list_skill_market_items(session)
@router.post("/api/platform/skills")
async def create_skill_market_item(
skill_key: str = Form(""),
display_name: str = Form(""),
description: str = Form(""),
file: UploadFile = File(...),
session: Session = Depends(get_session),
):
return await create_skill_market_item_record(
session,
skill_key=skill_key,
display_name=display_name,
description=description,
upload=file,
)
@router.put("/api/platform/skills/{skill_id}")
async def update_skill_market_item(
skill_id: int,
skill_key: str = Form(""),
display_name: str = Form(""),
description: str = Form(""),
file: Optional[UploadFile] = File(None),
session: Session = Depends(get_session),
):
return await update_skill_market_item_record(
session,
skill_id=skill_id,
skill_key=skill_key,
display_name=display_name,
description=description,
upload=file,
)
@router.delete("/api/platform/skills/{skill_id}")
def delete_skill_market_item(skill_id: int, session: Session = Depends(get_session)):
return delete_skill_market_item_record(session, skill_id=skill_id)
@router.get("/api/bots/{bot_id}/skills")
def list_bot_skills(bot_id: str, session: Session = Depends(get_session)):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return list_workspace_bot_skills(bot_id)
@router.get("/api/bots/{bot_id}/skill-market")
def list_bot_skill_market(bot_id: str, session: Session = Depends(get_session)):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return list_bot_skill_market_items(session, bot_id=bot_id)
@router.post("/api/bots/{bot_id}/skill-market/{skill_id}/install")
def install_bot_skill_from_market(bot_id: str, skill_id: int, session: Session = Depends(get_session)):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return install_skill_market_item_for_bot(session, bot_id=bot_id, skill_id=skill_id)
@router.post("/api/bots/{bot_id}/skills/upload")
async def upload_bot_skill_zip(bot_id: str, file: UploadFile = File(...), session: Session = Depends(get_session)):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return await upload_bot_skill_zip_to_workspace(bot_id, upload=file)
@router.delete("/api/bots/{bot_id}/skills/{skill_name}")
def delete_bot_skill(bot_id: str, skill_name: str, session: Session = Depends(get_session)):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return delete_workspace_skill_entry(bot_id, skill_name=skill_name)

View File

@ -0,0 +1,73 @@
from fastapi import APIRouter, HTTPException
from core.speech_service import inspect_speech_model_status
from core.utils import _get_default_system_timezone
from schemas.system import SystemTemplatesUpdateRequest
from services.platform_settings_service import get_platform_settings_snapshot, get_speech_runtime_settings
from services.template_service import (
get_agent_md_templates,
get_topic_presets,
update_agent_md_templates,
update_topic_presets,
)
router = APIRouter()
@router.get("/api/system/defaults")
def get_system_defaults():
md_templates = get_agent_md_templates()
platform_settings = get_platform_settings_snapshot()
speech_settings = get_speech_runtime_settings()
model_status = inspect_speech_model_status()
return {
"templates": md_templates,
"limits": {
"upload_max_mb": platform_settings.upload_max_mb,
},
"workspace": {
"download_extensions": list(platform_settings.workspace_download_extensions),
"allowed_attachment_extensions": list(platform_settings.allowed_attachment_extensions),
},
"bot": {
"system_timezone": _get_default_system_timezone(),
},
"chat": {
"pull_page_size": platform_settings.chat_pull_page_size,
"page_size": platform_settings.page_size,
},
"topic_presets": get_topic_presets()["presets"],
"speech": {
"enabled": speech_settings["enabled"],
"model": speech_settings["model"],
"device": speech_settings["device"],
"max_audio_seconds": speech_settings["max_audio_seconds"],
"default_language": speech_settings["default_language"],
"ready": model_status["ready"],
"message": model_status["message"],
"expected_path": model_status["expected_path"],
},
}
@router.get("/api/system/templates")
def get_system_templates():
return {
"agent_md_templates": get_agent_md_templates(),
"topic_presets": get_topic_presets(),
}
@router.put("/api/system/templates")
def update_system_templates(payload: SystemTemplatesUpdateRequest):
if payload.agent_md_templates is not None:
update_agent_md_templates(payload.agent_md_templates.model_dump())
if payload.topic_presets is not None:
try:
update_topic_presets(payload.topic_presets)
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
return {
"status": "ok",
"agent_md_templates": get_agent_md_templates(),
"topic_presets": get_topic_presets(),
}

View File

@ -1,42 +1,24 @@
import json
from datetime import datetime
from typing import Any, Dict, List, Optional
from typing import Any, Dict, Optional
from fastapi import APIRouter, Depends, HTTPException
from fastapi import APIRouter, Depends
from pydantic import BaseModel
from sqlalchemy import func
from sqlmodel import Session, select
from sqlmodel import Session
from core.database import get_session
from models.bot import BotInstance
from models.topic import TopicItem, TopicTopic
from services.topic_service import (
_TOPIC_KEY_RE,
_list_topics,
_normalize_topic_key,
_topic_item_to_dict,
_topic_to_dict,
create_topic,
delete_topic,
delete_topic_item,
get_topic_item_stats,
list_topic_items,
list_topics,
mark_topic_item_read,
update_topic,
)
router = APIRouter()
def _count_topic_items(
session: Session,
bot_id: str,
topic_key: Optional[str] = None,
unread_only: bool = False,
) -> int:
stmt = select(func.count()).select_from(TopicItem).where(TopicItem.bot_id == bot_id)
normalized_topic_key = _normalize_topic_key(topic_key or "")
if normalized_topic_key:
stmt = stmt.where(TopicItem.topic_key == normalized_topic_key)
if unread_only:
stmt = stmt.where(TopicItem.is_read == False) # noqa: E712
value = session.exec(stmt).one()
return int(value or 0)
class TopicCreateRequest(BaseModel):
topic_key: str
name: Optional[str] = None
@ -56,112 +38,31 @@ class TopicUpdateRequest(BaseModel):
@router.get("/api/bots/{bot_id}/topics")
def list_bot_topics(bot_id: str, session: Session = Depends(get_session)):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return _list_topics(session, bot_id)
return list_topics(session, bot_id)
@router.post("/api/bots/{bot_id}/topics")
def create_bot_topic(bot_id: str, payload: TopicCreateRequest, session: Session = Depends(get_session)):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
topic_key = _normalize_topic_key(payload.topic_key)
if not topic_key:
raise HTTPException(status_code=400, detail="topic_key is required")
if not _TOPIC_KEY_RE.fullmatch(topic_key):
raise HTTPException(status_code=400, detail="invalid topic_key")
exists = session.exec(
select(TopicTopic)
.where(TopicTopic.bot_id == bot_id)
.where(TopicTopic.topic_key == topic_key)
.limit(1)
).first()
if exists:
raise HTTPException(status_code=400, detail=f"Topic already exists: {topic_key}")
now = datetime.utcnow()
row = TopicTopic(
return create_topic(
session,
bot_id=bot_id,
topic_key=topic_key,
name=str(payload.name or topic_key).strip() or topic_key,
description=str(payload.description or "").strip(),
is_active=bool(payload.is_active),
is_default_fallback=False,
routing_json=json.dumps(payload.routing or {}, ensure_ascii=False),
view_schema_json=json.dumps(payload.view_schema or {}, ensure_ascii=False),
created_at=now,
updated_at=now,
topic_key=payload.topic_key,
name=payload.name,
description=payload.description,
is_active=payload.is_active,
routing=payload.routing,
view_schema=payload.view_schema,
)
session.add(row)
session.commit()
session.refresh(row)
return _topic_to_dict(row)
@router.put("/api/bots/{bot_id}/topics/{topic_key}")
def update_bot_topic(bot_id: str, topic_key: str, payload: TopicUpdateRequest, session: Session = Depends(get_session)):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
normalized_key = _normalize_topic_key(topic_key)
if not normalized_key:
raise HTTPException(status_code=400, detail="topic_key is required")
row = session.exec(
select(TopicTopic)
.where(TopicTopic.bot_id == bot_id)
.where(TopicTopic.topic_key == normalized_key)
.limit(1)
).first()
if not row:
raise HTTPException(status_code=404, detail="Topic not found")
update_data = payload.model_dump(exclude_unset=True)
if "name" in update_data:
row.name = str(update_data.get("name") or "").strip() or row.topic_key
if "description" in update_data:
row.description = str(update_data.get("description") or "").strip()
if "is_active" in update_data:
row.is_active = bool(update_data.get("is_active"))
if "routing" in update_data:
row.routing_json = json.dumps(update_data.get("routing") or {}, ensure_ascii=False)
if "view_schema" in update_data:
row.view_schema_json = json.dumps(update_data.get("view_schema") or {}, ensure_ascii=False)
row.is_default_fallback = False
row.updated_at = datetime.utcnow()
session.add(row)
session.commit()
session.refresh(row)
return _topic_to_dict(row)
return update_topic(session, bot_id=bot_id, topic_key=topic_key, updates=payload.model_dump(exclude_unset=True))
@router.delete("/api/bots/{bot_id}/topics/{topic_key}")
def delete_bot_topic(bot_id: str, topic_key: str, session: Session = Depends(get_session)):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
normalized_key = _normalize_topic_key(topic_key)
if not normalized_key:
raise HTTPException(status_code=400, detail="topic_key is required")
row = session.exec(
select(TopicTopic)
.where(TopicTopic.bot_id == bot_id)
.where(TopicTopic.topic_key == normalized_key)
.limit(1)
).first()
if not row:
raise HTTPException(status_code=404, detail="Topic not found")
items = session.exec(
select(TopicItem)
.where(TopicItem.bot_id == bot_id)
.where(TopicItem.topic_key == normalized_key)
).all()
for item in items:
session.delete(item)
session.delete(row)
session.commit()
return {"status": "deleted", "bot_id": bot_id, "topic_key": normalized_key}
return delete_topic(session, bot_id=bot_id, topic_key=topic_key)
@router.get("/api/bots/{bot_id}/topic-items")
@ -172,97 +73,19 @@ def list_bot_topic_items(
limit: int = 50,
session: Session = Depends(get_session),
):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
normalized_limit = max(1, min(int(limit or 50), 100))
stmt = select(TopicItem).where(TopicItem.bot_id == bot_id)
normalized_topic_key = _normalize_topic_key(topic_key or "")
if normalized_topic_key:
stmt = stmt.where(TopicItem.topic_key == normalized_topic_key)
if cursor is not None:
normalized_cursor = int(cursor)
if normalized_cursor > 0:
stmt = stmt.where(TopicItem.id < normalized_cursor)
rows = session.exec(
stmt.order_by(TopicItem.id.desc()).limit(normalized_limit + 1)
).all()
next_cursor: Optional[int] = None
if len(rows) > normalized_limit:
next_cursor = rows[-1].id
rows = rows[:normalized_limit]
return {
"bot_id": bot_id,
"topic_key": normalized_topic_key or None,
"items": [_topic_item_to_dict(row) for row in rows],
"next_cursor": next_cursor,
"unread_count": _count_topic_items(session, bot_id, normalized_topic_key, unread_only=True),
"total_unread_count": _count_topic_items(session, bot_id, unread_only=True),
}
return list_topic_items(session, bot_id=bot_id, topic_key=topic_key, cursor=cursor, limit=limit)
@router.get("/api/bots/{bot_id}/topic-items/stats")
def get_bot_topic_item_stats(bot_id: str, session: Session = Depends(get_session)):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
latest_item = session.exec(
select(TopicItem)
.where(TopicItem.bot_id == bot_id)
.order_by(TopicItem.id.desc())
.limit(1)
).first()
return {
"bot_id": bot_id,
"total_count": _count_topic_items(session, bot_id),
"unread_count": _count_topic_items(session, bot_id, unread_only=True),
"latest_item_id": int(latest_item.id or 0) if latest_item and latest_item.id else None,
}
return get_topic_item_stats(session, bot_id=bot_id)
@router.post("/api/bots/{bot_id}/topic-items/{item_id}/read")
def mark_bot_topic_item_read(bot_id: str, item_id: int, session: Session = Depends(get_session)):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
row = session.exec(
select(TopicItem)
.where(TopicItem.bot_id == bot_id)
.where(TopicItem.id == item_id)
.limit(1)
).first()
if not row:
raise HTTPException(status_code=404, detail="Topic item not found")
if not bool(row.is_read):
row.is_read = True
session.add(row)
session.commit()
session.refresh(row)
return {
"status": "updated",
"bot_id": bot_id,
"item": _topic_item_to_dict(row),
}
return mark_topic_item_read(session, bot_id=bot_id, item_id=item_id)
@router.delete("/api/bots/{bot_id}/topic-items/{item_id}")
def delete_bot_topic_item(bot_id: str, item_id: int, session: Session = Depends(get_session)):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
row = session.exec(
select(TopicItem)
.where(TopicItem.bot_id == bot_id)
.where(TopicItem.id == item_id)
.limit(1)
).first()
if not row:
raise HTTPException(status_code=404, detail="Topic item not found")
payload = _topic_item_to_dict(row)
session.delete(row)
session.commit()
return {
"status": "deleted",
"bot_id": bot_id,
"item": payload,
}
return delete_topic_item(session, bot_id=bot_id, item_id=item_id)

View File

@ -0,0 +1,176 @@
from typing import List, Optional
from fastapi import APIRouter, Depends, File, HTTPException, Request, UploadFile
from sqlmodel import Session
from core.database import get_session
from models.bot import BotInstance
from schemas.system import WorkspaceFileUpdateRequest, WorkspacePreviewUrlRequest
from services.workspace_service import (
create_workspace_html_preview_url,
get_workspace_tree_data,
read_workspace_text_file,
serve_workspace_preview_file,
serve_workspace_file,
update_workspace_markdown_file,
upload_workspace_files_to_workspace,
)
router = APIRouter()
@router.get("/api/preview/workspace/{preview_token}/{path:path}")
def preview_workspace_file(
preview_token: str,
path: str,
request: Request,
):
return serve_workspace_preview_file(
preview_token=preview_token,
path=path,
request=request,
)
@router.get("/api/bots/{bot_id}/workspace/tree")
def get_workspace_tree(
bot_id: str,
path: Optional[str] = None,
recursive: bool = False,
session: Session = Depends(get_session),
):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return get_workspace_tree_data(bot_id, path=path, recursive=recursive)
@router.get("/api/bots/{bot_id}/workspace/file")
def read_workspace_file(
bot_id: str,
path: str,
max_bytes: int = 200000,
session: Session = Depends(get_session),
):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return read_workspace_text_file(bot_id, path=path, max_bytes=max_bytes)
@router.put("/api/bots/{bot_id}/workspace/file")
def update_workspace_file(
bot_id: str,
path: str,
payload: WorkspaceFileUpdateRequest,
session: Session = Depends(get_session),
):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return update_workspace_markdown_file(bot_id, path=path, content=payload.content)
@router.post("/api/bots/{bot_id}/workspace/preview-url")
def create_workspace_preview_url(
bot_id: str,
payload: WorkspacePreviewUrlRequest,
session: Session = Depends(get_session),
):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return create_workspace_html_preview_url(
bot_id=bot_id,
path=payload.path,
ttl_seconds=payload.ttl_seconds,
)
@router.get("/api/bots/{bot_id}/workspace/download")
def download_workspace_file(
bot_id: str,
path: str,
download: bool = False,
request: Request = None,
session: Session = Depends(get_session),
):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return serve_workspace_file(
bot_id=bot_id,
path=path,
download=download,
request=request,
public=False,
redirect_html_to_raw=True,
)
@router.get("/public/bots/{bot_id}/workspace/download")
def public_download_workspace_file(
bot_id: str,
path: str,
download: bool = False,
request: Request = None,
session: Session = Depends(get_session),
):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return serve_workspace_file(
bot_id=bot_id,
path=path,
download=download,
request=request,
public=True,
redirect_html_to_raw=True,
)
@router.get("/api/bots/{bot_id}/workspace/raw/{path:path}")
def raw_workspace_file(
bot_id: str,
path: str,
download: bool = False,
request: Request = None,
session: Session = Depends(get_session),
):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return serve_workspace_file(
bot_id=bot_id,
path=path,
download=download,
request=request,
public=False,
redirect_html_to_raw=False,
)
@router.get("/public/bots/{bot_id}/workspace/raw/{path:path}")
def public_raw_workspace_file(
bot_id: str,
path: str,
download: bool = False,
request: Request = None,
session: Session = Depends(get_session),
):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return serve_workspace_file(
bot_id=bot_id,
path=path,
download=download,
request=request,
public=True,
redirect_html_to_raw=False,
)
@router.post("/api/bots/{bot_id}/workspace/upload")
async def upload_workspace_files(
bot_id: str,
files: List[UploadFile] = File(...),
path: Optional[str] = None,
session: Session = Depends(get_session),
):
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return await upload_workspace_files_to_workspace(bot_id, files=files, path=path)

View File

@ -0,0 +1,64 @@
import os
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from api.bot_config_router import router as bot_config_router
from api.bot_management_router import router as bot_management_router
from api.bot_router import router as bot_router
from api.bot_runtime_router import router as bot_runtime_router
from api.bot_speech_router import router as bot_speech_router
from api.chat_history_router import router as chat_history_router
from api.chat_router import router as chat_router
from api.health_router import router as health_router
from api.image_router import router as image_router
from api.panel_auth_router import router as panel_auth_router
from api.platform_router import router as platform_router
from api.skill_router import router as skill_router
from api.system_router import router as system_router
from api.topic_router import router as topic_router
from api.workspace_router import router as workspace_router
from bootstrap.app_runtime import register_app_runtime
from core.auth_middleware import AuthAccessMiddleware
from core.docker_instance import docker_manager
from core.settings import BOTS_WORKSPACE_ROOT, CORS_ALLOWED_ORIGINS, DATA_ROOT
from core.speech_service import WhisperSpeechService
def create_app() -> FastAPI:
app = FastAPI(title="Dashboard Nanobot API")
speech_service = WhisperSpeechService()
app.state.docker_manager = docker_manager
app.state.speech_service = speech_service
app.add_middleware(AuthAccessMiddleware)
app.add_middleware(
CORSMiddleware,
allow_origins=list(CORS_ALLOWED_ORIGINS),
allow_methods=["*"],
allow_headers=["*"],
allow_credentials=True,
)
app.include_router(panel_auth_router)
app.include_router(health_router)
app.include_router(platform_router)
app.include_router(topic_router)
app.include_router(system_router)
app.include_router(image_router)
app.include_router(skill_router)
app.include_router(chat_router)
app.include_router(chat_history_router)
app.include_router(bot_speech_router)
app.include_router(workspace_router)
app.include_router(bot_config_router)
app.include_router(bot_runtime_router)
app.include_router(bot_management_router)
app.include_router(bot_router)
os.makedirs(BOTS_WORKSPACE_ROOT, exist_ok=True)
os.makedirs(DATA_ROOT, exist_ok=True)
register_app_runtime(app)
return app

View File

@ -0,0 +1,53 @@
import asyncio
from fastapi import FastAPI
from sqlmodel import Session, select
from core.cache import cache
from core.database import engine, init_database
from core.docker_instance import docker_manager
from core.speech_service import inspect_speech_model_status
from core.settings import DATABASE_URL_DISPLAY, REDIS_ENABLED
from models.bot import BotInstance
from services.default_assets_service import validate_runtime_data_assets
from services.platform_activity_service import prune_expired_activity_events
from services.platform_settings_service import get_speech_runtime_settings
from services.runtime_service import docker_callback, set_main_loop
def reload_platform_runtime(app: FastAPI) -> None:
cache.delete_prefix("")
speech_service = getattr(app.state, "speech_service", None)
if speech_service is not None and hasattr(speech_service, "reset_runtime"):
speech_service.reset_runtime()
def register_app_runtime(app: FastAPI) -> None:
@app.on_event("startup")
async def _on_startup() -> None:
redis_state = "Disabled"
if REDIS_ENABLED:
redis_state = "Connected" if cache.enabled else f"Unavailable ({cache.status})"
print(
f"🚀 Dashboard Backend 启动中... (DB: {DATABASE_URL_DISPLAY}, REDIS: {redis_state})"
)
current_loop = asyncio.get_running_loop()
app.state.main_loop = current_loop
set_main_loop(current_loop)
validate_runtime_data_assets()
print("[init] data 目录校验通过")
init_database()
with Session(engine) as session:
prune_expired_activity_events(session, force=True)
bots = session.exec(select(BotInstance)).all()
for bot in bots:
docker_manager.ensure_monitor(bot.id, docker_callback)
speech_settings = get_speech_runtime_settings()
model_status = inspect_speech_model_status()
if speech_settings["enabled"]:
if model_status["ready"]:
print(f"🎙️ 语音识别模型就绪: {model_status['resolved_path']}")
else:
hint = f",请将模型文件放到 {model_status['expected_path']}" if model_status["expected_path"] else ""
print(f"⚠️ 语音识别模型未就绪: {model_status['message']}{hint}")
print("✅ 启动自检完成")

View File

@ -0,0 +1,105 @@
from __future__ import annotations
from enum import Enum
from typing import Optional
class RouteAccessMode(str, Enum):
PUBLIC = "public"
PANEL_ONLY = "panel_only"
BOT_OR_PANEL = "bot_or_panel"
PUBLIC_BOT_OR_PANEL = "public_bot_or_panel"
_PUBLIC_EXACT_PATHS = {
"/api/health",
"/api/health/cache",
"/api/system/defaults",
}
_PANEL_AUTH_SEGMENTS = ("api", "panel", "auth")
_WORKSPACE_PREVIEW_SEGMENTS = ("api", "preview", "workspace")
_BOT_PUBLIC_SEGMENTS = ("public", "bots")
_BOT_API_SEGMENTS = ("api", "bots")
_BOT_AUTH_SEGMENT = "auth"
_BOT_PANEL_ONLY_ACTIONS = {"enable", "disable", "deactivate"}
_BOT_PUBLIC_AUTH_ACTIONS = {"login", "logout", "status"}
def _path_segments(path: str) -> list[str]:
raw = str(path or "").strip().strip("/")
if not raw:
return []
return [segment for segment in raw.split("/") if segment]
def extract_bot_id(path: str) -> Optional[str]:
segments = _path_segments(path)
if len(segments) < 3:
return None
if tuple(segments[:2]) not in {_BOT_API_SEGMENTS, _BOT_PUBLIC_SEGMENTS}:
return None
bot_id = str(segments[2] or "").strip()
return bot_id or None
def _is_panel_auth_route(segments: list[str]) -> bool:
return tuple(segments[:3]) == _PANEL_AUTH_SEGMENTS
def _is_workspace_preview_route(segments: list[str], method: str) -> bool:
return method == "GET" and tuple(segments[:3]) == _WORKSPACE_PREVIEW_SEGMENTS and len(segments) >= 5
def _is_public_bot_route(segments: list[str]) -> bool:
return tuple(segments[:2]) == _BOT_PUBLIC_SEGMENTS and len(segments) >= 3
def _is_bot_auth_route(segments: list[str]) -> bool:
return (
tuple(segments[:2]) == _BOT_API_SEGMENTS
and len(segments) >= 5
and segments[3] == _BOT_AUTH_SEGMENT
and segments[4] in _BOT_PUBLIC_AUTH_ACTIONS
)
def _is_panel_only_bot_action(segments: list[str], method: str) -> bool:
if tuple(segments[:2]) != _BOT_API_SEGMENTS or len(segments) < 3:
return False
if len(segments) == 3 and method == "DELETE":
return True
return len(segments) >= 4 and method == "POST" and segments[3] in _BOT_PANEL_ONLY_ACTIONS
def _is_bot_scoped_api_route(segments: list[str]) -> bool:
return tuple(segments[:2]) == _BOT_API_SEGMENTS and len(segments) >= 3
def resolve_route_access_mode(path: str, method: str) -> RouteAccessMode:
raw_path = str(path or "").strip()
verb = str(method or "GET").strip().upper()
segments = _path_segments(raw_path)
if raw_path in _PUBLIC_EXACT_PATHS:
return RouteAccessMode.PUBLIC
if _is_workspace_preview_route(segments, verb):
return RouteAccessMode.PUBLIC
if _is_panel_auth_route(segments) or _is_bot_auth_route(segments):
return RouteAccessMode.PUBLIC
if _is_public_bot_route(segments):
return RouteAccessMode.PUBLIC_BOT_OR_PANEL
if _is_panel_only_bot_action(segments, verb):
return RouteAccessMode.PANEL_ONLY
if _is_bot_scoped_api_route(segments):
return RouteAccessMode.BOT_OR_PANEL
if raw_path.startswith("/api/"):
return RouteAccessMode.PANEL_ONLY
return RouteAccessMode.PUBLIC

View File

@ -0,0 +1,50 @@
from __future__ import annotations
from fastapi import Request
from fastapi.responses import JSONResponse
from sqlmodel import Session
from starlette.middleware.base import BaseHTTPMiddleware
from bootstrap.auth_access import RouteAccessMode, extract_bot_id, resolve_route_access_mode
from core.database import engine
from services.platform_auth_service import (
resolve_bot_request_auth,
resolve_panel_request_auth,
)
def _unauthorized(detail: str) -> JSONResponse:
return JSONResponse(status_code=401, content={"detail": detail})
class AuthAccessMiddleware(BaseHTTPMiddleware):
async def dispatch(self, request: Request, call_next):
if request.method.upper() == "OPTIONS":
return await call_next(request)
path = request.url.path
access_mode = resolve_route_access_mode(path, request.method)
if access_mode == RouteAccessMode.PUBLIC:
return await call_next(request)
bot_id = extract_bot_id(path)
with Session(engine) as session:
panel_principal = resolve_panel_request_auth(session, request)
if panel_principal.authenticated:
request.state.auth_principal = panel_principal
return await call_next(request)
if access_mode == RouteAccessMode.PANEL_ONLY:
return _unauthorized("Panel authentication required")
if not bot_id:
return _unauthorized("Bot authentication required")
bot_principal = resolve_bot_request_auth(session, request, bot_id)
if bot_principal.authenticated:
request.state.auth_principal = bot_principal
return await call_next(request)
if access_mode == RouteAccessMode.PUBLIC_BOT_OR_PANEL:
return _unauthorized("Bot or panel authentication required to access this resource")
return _unauthorized("Bot or panel authentication required")

View File

@ -1,3 +1,5 @@
from __future__ import annotations
import json
from typing import Any, Optional
@ -10,18 +12,32 @@ except Exception: # pragma: no cover
class RedisCache:
def __init__(self):
self.enabled = bool(REDIS_ENABLED and REDIS_URL and Redis is not None)
self.prefix = REDIS_PREFIX
self.default_ttl = int(REDIS_DEFAULT_TTL)
def __init__(self, *, prefix_override: Optional[str] = None, default_ttl_override: Optional[int] = None):
self.prefix = str(prefix_override or REDIS_PREFIX).strip() or REDIS_PREFIX
self.default_ttl = int(default_ttl_override if default_ttl_override is not None else REDIS_DEFAULT_TTL)
self.enabled = False
self.status = "disabled"
self.status_detail = ""
self._client: Optional["Redis"] = None
if self.enabled:
try:
self._client = Redis.from_url(REDIS_URL, decode_responses=True)
self._client.ping()
except Exception:
self.enabled = False
self._client = None
if not REDIS_ENABLED:
return
if not REDIS_URL:
self.status = "missing_url"
return
if Redis is None:
self.status = "client_unavailable"
self.status_detail = "redis python package is not installed"
return
try:
self._client = Redis.from_url(REDIS_URL, decode_responses=True)
self._client.ping()
self.enabled = True
self.status = "connected"
except Exception as exc:
self.enabled = False
self._client = None
self.status = "connection_failed"
self.status_detail = str(exc or "").strip()[:200]
def _full_key(self, key: str) -> str:
return f"{self.prefix}:{key}"
@ -34,11 +50,28 @@ class RedisCache:
except Exception:
return False
def get(self, key: str) -> Optional[str]:
if not self.enabled or self._client is None:
return None
try:
return self._client.get(self._full_key(key))
except Exception:
return None
def set(self, key: str, value: str, ttl: Optional[int] = None) -> None:
if not self.enabled or self._client is None:
return
try:
ttl_seconds = int(ttl if ttl is not None else self.default_ttl)
self._client.setex(self._full_key(key), ttl_seconds, str(value))
except Exception:
return
def get_json(self, key: str) -> Any:
if not self.enabled or self._client is None:
return None
try:
raw = self._client.get(self._full_key(key))
raw = self.get(key)
if not raw:
return None
return json.loads(raw)
@ -49,11 +82,46 @@ class RedisCache:
if not self.enabled or self._client is None:
return
try:
self._client.setex(
self._full_key(key),
int(ttl if ttl is not None else self.default_ttl),
json.dumps(value, ensure_ascii=False, default=str),
)
self.set(key, json.dumps(value, ensure_ascii=False, default=str), ttl=ttl)
except Exception:
return
def sadd(self, key: str, *members: str) -> None:
if not self.enabled or self._client is None:
return
normalized = [str(member or "").strip() for member in members if str(member or "").strip()]
if not normalized:
return
try:
self._client.sadd(self._full_key(key), *normalized)
except Exception:
return
def srem(self, key: str, *members: str) -> None:
if not self.enabled or self._client is None:
return
normalized = [str(member or "").strip() for member in members if str(member or "").strip()]
if not normalized:
return
try:
self._client.srem(self._full_key(key), *normalized)
except Exception:
return
def smembers(self, key: str) -> set[str]:
if not self.enabled or self._client is None:
return set()
try:
rows = self._client.smembers(self._full_key(key))
return {str(row or "").strip() for row in rows if str(row or "").strip()}
except Exception:
return set()
def expire(self, key: str, ttl: int) -> None:
if not self.enabled or self._client is None:
return
try:
self._client.expire(self._full_key(key), max(1, int(ttl)))
except Exception:
return
@ -85,4 +153,4 @@ class RedisCache:
cache = RedisCache()
auth_cache = RedisCache(prefix_override=f"{REDIS_PREFIX}_auth")

View File

@ -1,251 +0,0 @@
import json
import os
from typing import Any, Dict, List
from core.settings import (
DEFAULT_AGENTS_MD,
DEFAULT_IDENTITY_MD,
DEFAULT_SOUL_MD,
DEFAULT_TOOLS_MD,
DEFAULT_USER_MD,
)
class BotConfigManager:
def __init__(self, host_data_root: str):
self.host_data_root = host_data_root
def update_workspace(self, bot_id: str, bot_data: Dict[str, Any], channels: List[Dict[str, Any]]):
"""Generate/update nanobot workspace files and config.json."""
bot_dir = os.path.join(self.host_data_root, bot_id)
dot_nanobot_dir = os.path.join(bot_dir, ".nanobot")
workspace_dir = os.path.join(dot_nanobot_dir, "workspace")
memory_dir = os.path.join(workspace_dir, "memory")
skills_dir = os.path.join(workspace_dir, "skills")
for d in [dot_nanobot_dir, workspace_dir, memory_dir, skills_dir]:
os.makedirs(d, exist_ok=True)
raw_provider_name = (bot_data.get("llm_provider") or "openrouter").strip().lower()
provider_name = raw_provider_name
model_name = (bot_data.get("llm_model") or "openai/gpt-4o-mini").strip()
api_key = (bot_data.get("api_key") or "").strip()
api_base = (bot_data.get("api_base") or "").strip() or None
provider_alias = {
"aliyun": "dashscope",
"qwen": "dashscope",
"aliyun-qwen": "dashscope",
"moonshot": "kimi",
# Xunfei Spark provides OpenAI-compatible endpoint.
"xunfei": "openai",
"iflytek": "openai",
"xfyun": "openai",
}
provider_name = provider_alias.get(provider_name, provider_name)
if provider_name == "openai" and raw_provider_name in {"xunfei", "iflytek", "xfyun"}:
if model_name and "/" not in model_name:
model_name = f"openai/{model_name}"
provider_cfg: Dict[str, Any] = {
"apiKey": api_key,
}
if api_base:
provider_cfg["apiBase"] = api_base
channels_cfg: Dict[str, Any] = {
"sendProgress": bool(bot_data.get("send_progress", False)),
"sendToolHints": bool(bot_data.get("send_tool_hints", False)),
}
existing_config: Dict[str, Any] = {}
config_path = os.path.join(dot_nanobot_dir, "config.json")
if os.path.isfile(config_path):
try:
with open(config_path, "r", encoding="utf-8") as f:
loaded = json.load(f)
if isinstance(loaded, dict):
existing_config = loaded
except Exception:
existing_config = {}
existing_tools = existing_config.get("tools")
tools_cfg: Dict[str, Any] = dict(existing_tools) if isinstance(existing_tools, dict) else {}
if "mcp_servers" in bot_data:
mcp_servers = bot_data.get("mcp_servers")
if isinstance(mcp_servers, dict):
tools_cfg["mcpServers"] = mcp_servers
config_data: Dict[str, Any] = {
"agents": {
"defaults": {
"model": model_name,
"temperature": float(bot_data.get("temperature") or 0.2),
"topP": float(bot_data.get("top_p") or 1.0),
"maxTokens": int(bot_data.get("max_tokens") or 8192),
}
},
"providers": {
provider_name: provider_cfg,
},
"channels": channels_cfg,
}
if tools_cfg:
config_data["tools"] = tools_cfg
existing_channels = existing_config.get("channels")
existing_dashboard_cfg = (
existing_channels.get("dashboard")
if isinstance(existing_channels, dict) and isinstance(existing_channels.get("dashboard"), dict)
else {}
)
dashboard_cfg: Dict[str, Any] = {
"enabled": True,
"host": "0.0.0.0",
"port": 9000,
"allowFrom": ["*"],
}
for key in ("host", "port", "allowFrom"):
if key in existing_dashboard_cfg:
dashboard_cfg[key] = existing_dashboard_cfg[key]
channels_cfg["dashboard"] = dashboard_cfg
for channel in channels:
channel_type = (channel.get("channel_type") or "").strip()
if not channel_type:
continue
raw_extra = channel.get("extra_config")
extra: Dict[str, Any] = {}
if isinstance(raw_extra, str) and raw_extra.strip():
try:
parsed = json.loads(raw_extra)
if isinstance(parsed, dict):
extra = parsed
except Exception:
extra = {}
elif isinstance(raw_extra, dict):
extra = raw_extra
# Dashboard channel is deprecated in DB routing. Global flags now come from bot fields.
if channel_type == "dashboard":
continue
enabled = bool(channel.get("is_active", True))
external = channel.get("external_app_id", "") or ""
secret = channel.get("app_secret", "") or ""
if channel_type == "telegram":
channels_cfg["telegram"] = {
"enabled": enabled,
"token": secret,
"proxy": extra.get("proxy", ""),
"replyToMessage": bool(extra.get("replyToMessage", False)),
"allowFrom": self._normalize_allow_from(extra.get("allowFrom", [])),
}
continue
if channel_type == "feishu":
channels_cfg["feishu"] = {
"enabled": enabled,
"appId": external,
"appSecret": secret,
"encryptKey": extra.get("encryptKey", ""),
"verificationToken": extra.get("verificationToken", ""),
"allowFrom": self._normalize_allow_from(extra.get("allowFrom", [])),
}
continue
if channel_type == "dingtalk":
channels_cfg["dingtalk"] = {
"enabled": enabled,
"clientId": external,
"clientSecret": secret,
"allowFrom": self._normalize_allow_from(extra.get("allowFrom", [])),
}
continue
if channel_type == "slack":
channels_cfg["slack"] = {
"enabled": enabled,
"mode": extra.get("mode", "socket"),
"botToken": external,
"appToken": secret,
"replyInThread": bool(extra.get("replyInThread", True)),
"groupPolicy": extra.get("groupPolicy", "mention"),
"groupAllowFrom": extra.get("groupAllowFrom", []),
"reactEmoji": extra.get("reactEmoji", "eyes"),
}
continue
if channel_type == "qq":
channels_cfg["qq"] = {
"enabled": enabled,
"appId": external,
"secret": secret,
"allowFrom": self._normalize_allow_from(extra.get("allowFrom", [])),
}
continue
if channel_type == "email":
channels_cfg["email"] = {
"enabled": enabled,
"consentGranted": bool(extra.get("consentGranted", False)),
"imapHost": extra.get("imapHost", ""),
"imapPort": max(1, min(int(extra.get("imapPort", 993) or 993), 65535)),
"imapUsername": extra.get("imapUsername", ""),
"imapPassword": extra.get("imapPassword", ""),
"imapMailbox": extra.get("imapMailbox", "INBOX"),
"imapUseSsl": bool(extra.get("imapUseSsl", True)),
"smtpHost": extra.get("smtpHost", ""),
"smtpPort": max(1, min(int(extra.get("smtpPort", 587) or 587), 65535)),
"smtpUsername": extra.get("smtpUsername", ""),
"smtpPassword": extra.get("smtpPassword", ""),
"smtpUseTls": bool(extra.get("smtpUseTls", True)),
"smtpUseSsl": bool(extra.get("smtpUseSsl", False)),
"fromAddress": extra.get("fromAddress", ""),
"autoReplyEnabled": bool(extra.get("autoReplyEnabled", True)),
"pollIntervalSeconds": max(5, int(extra.get("pollIntervalSeconds", 30) or 30)),
"markSeen": bool(extra.get("markSeen", True)),
"maxBodyChars": max(1, int(extra.get("maxBodyChars", 12000) or 12000)),
"subjectPrefix": extra.get("subjectPrefix", "Re: "),
"allowFrom": self._normalize_allow_from(extra.get("allowFrom", [])),
}
continue
# Fallback for future custom channels.
channels_cfg[channel_type] = {
"enabled": enabled,
"appId": external,
"appSecret": secret,
**extra,
}
with open(config_path, "w", encoding="utf-8") as f:
json.dump(config_data, f, indent=4, ensure_ascii=False)
bootstrap_files = {
"AGENTS.md": bot_data.get("agents_md") or DEFAULT_AGENTS_MD,
"SOUL.md": bot_data.get("soul_md") or bot_data.get("system_prompt") or DEFAULT_SOUL_MD,
"USER.md": bot_data.get("user_md") or DEFAULT_USER_MD,
"TOOLS.md": bot_data.get("tools_md") or DEFAULT_TOOLS_MD,
"IDENTITY.md": bot_data.get("identity_md") or DEFAULT_IDENTITY_MD,
}
for filename, content in bootstrap_files.items():
file_path = os.path.join(workspace_dir, filename)
with open(file_path, "w", encoding="utf-8") as f:
f.write(str(content).strip() + "\n")
return dot_nanobot_dir
@staticmethod
def _normalize_allow_from(raw: Any) -> List[str]:
rows: List[str] = []
if isinstance(raw, list):
for item in raw:
text = str(item or "").strip()
if text and text not in rows:
rows.append(text)
if not rows:
return ["*"]
return rows

View File

@ -1,9 +1,8 @@
from sqlalchemy import inspect, text
from sqlmodel import SQLModel, Session, create_engine
from sqlmodel import Session, create_engine
from core.settings import (
DATABASE_ECHO,
DATABASE_ENGINE,
DATABASE_MAX_OVERFLOW,
DATABASE_POOL_RECYCLE,
DATABASE_POOL_SIZE,
@ -11,27 +10,14 @@ from core.settings import (
DATABASE_URL,
)
# Ensure table models are registered in SQLModel metadata before create_all.
from models import bot as _bot_models # noqa: F401
from models import platform as _platform_models # noqa: F401
from models import skill as _skill_models # noqa: F401
from models import topic as _topic_models # noqa: F401
_engine_kwargs = {
"echo": DATABASE_ECHO,
"pool_pre_ping": True,
"pool_size": DATABASE_POOL_SIZE,
"max_overflow": DATABASE_MAX_OVERFLOW,
"pool_timeout": DATABASE_POOL_TIMEOUT,
"pool_recycle": DATABASE_POOL_RECYCLE,
}
if DATABASE_ENGINE == "sqlite":
_engine_kwargs["connect_args"] = {"check_same_thread": False}
else:
_engine_kwargs.update(
{
"pool_pre_ping": True,
"pool_size": DATABASE_POOL_SIZE,
"max_overflow": DATABASE_MAX_OVERFLOW,
"pool_timeout": DATABASE_POOL_TIMEOUT,
"pool_recycle": DATABASE_POOL_RECYCLE,
}
)
engine = create_engine(DATABASE_URL, **_engine_kwargs)
@ -40,762 +26,69 @@ BOT_MESSAGE_TABLE = "bot_message"
BOT_IMAGE_TABLE = "bot_image"
BOT_REQUEST_USAGE_TABLE = "bot_request_usage"
BOT_ACTIVITY_EVENT_TABLE = "bot_activity_event"
SYS_LOGIN_LOG_TABLE = "sys_login_log"
SYS_SETTING_TABLE = "sys_setting"
POSTGRES_MIGRATION_LOCK_KEY = 2026031801
MYSQL_MIGRATION_LOCK_NAME = "dashboard_nanobot_schema_migration"
LEGACY_TABLE_PAIRS = [
("botinstance", BOT_INSTANCE_TABLE),
("botmessage", BOT_MESSAGE_TABLE),
("nanobotimage", BOT_IMAGE_TABLE),
("platformsetting", SYS_SETTING_TABLE),
("botrequestusage", BOT_REQUEST_USAGE_TABLE),
("botactivityevent", BOT_ACTIVITY_EVENT_TABLE),
]
REQUIRED_TABLES = (
BOT_INSTANCE_TABLE,
BOT_MESSAGE_TABLE,
BOT_IMAGE_TABLE,
BOT_REQUEST_USAGE_TABLE,
BOT_ACTIVITY_EVENT_TABLE,
SYS_LOGIN_LOG_TABLE,
SYS_SETTING_TABLE,
"skill_market_item",
"bot_skill_install",
"topic_topic",
"topic_item",
)
REQUIRED_SYS_SETTING_KEYS = (
"page_size",
"chat_pull_page_size",
"auth_token_ttl_hours",
"auth_token_max_active",
"upload_max_mb",
"allowed_attachment_extensions",
"workspace_download_extensions",
"speech_enabled",
"activity_event_retention_days",
)
def _quote_ident(name: str) -> str:
if engine.dialect.name == "mysql":
return f"`{str(name).replace('`', '``')}`"
return f'"{str(name).replace(chr(34), chr(34) * 2)}"'
def _rename_table_if_needed(old_name: str, new_name: str) -> None:
def _validate_required_tables() -> None:
inspector = inspect(engine)
if not inspector.has_table(old_name) or inspector.has_table(new_name):
return
dialect = engine.dialect.name
with engine.connect() as conn:
if dialect == "mysql":
conn.execute(text(f"RENAME TABLE `{old_name}` TO `{new_name}`"))
else:
conn.execute(text(f'ALTER TABLE "{old_name}" RENAME TO "{new_name}"'))
conn.commit()
def _rename_legacy_tables() -> None:
_rename_table_if_needed("botinstance", BOT_INSTANCE_TABLE)
_rename_table_if_needed("botmessage", BOT_MESSAGE_TABLE)
_rename_table_if_needed("nanobotimage", BOT_IMAGE_TABLE)
_rename_table_if_needed("platformsetting", SYS_SETTING_TABLE)
_rename_table_if_needed("botrequestusage", BOT_REQUEST_USAGE_TABLE)
_rename_table_if_needed("botactivityevent", BOT_ACTIVITY_EVENT_TABLE)
def _acquire_migration_lock():
if engine.dialect.name == "postgresql":
conn = engine.connect()
conn.execute(text("SELECT pg_advisory_lock(:key)"), {"key": POSTGRES_MIGRATION_LOCK_KEY})
return conn
if engine.dialect.name == "mysql":
conn = engine.connect()
acquired = conn.execute(
text("SELECT GET_LOCK(:name, :timeout)"),
{"name": MYSQL_MIGRATION_LOCK_NAME, "timeout": 120},
).scalar()
if int(acquired or 0) != 1:
conn.close()
raise RuntimeError("Failed to acquire schema migration lock")
return conn
return None
def _release_migration_lock(lock_conn) -> None:
if lock_conn is None:
return
try:
if engine.dialect.name == "postgresql":
lock_conn.execute(text("SELECT pg_advisory_unlock(:key)"), {"key": POSTGRES_MIGRATION_LOCK_KEY})
elif engine.dialect.name == "mysql":
lock_conn.execute(text("SELECT RELEASE_LOCK(:name)"), {"name": MYSQL_MIGRATION_LOCK_NAME})
finally:
lock_conn.close()
def _table_row_count(table_name: str) -> int:
inspector = inspect(engine)
if not inspector.has_table(table_name):
return 0
with engine.connect() as conn:
value = conn.execute(text(f"SELECT COUNT(*) FROM {_quote_ident(table_name)}")).scalar()
return int(value or 0)
def _copy_legacy_table_rows(old_name: str, new_name: str) -> None:
inspector = inspect(engine)
if not inspector.has_table(old_name) or not inspector.has_table(new_name):
return
if _table_row_count(old_name) <= 0:
return
old_columns = {
str(row.get("name"))
for row in inspector.get_columns(old_name)
if row.get("name")
}
new_columns = [
str(row.get("name"))
for row in inspector.get_columns(new_name)
if row.get("name")
]
shared_columns = [col for col in new_columns if col in old_columns]
if not shared_columns:
return
pk = inspector.get_pk_constraint(new_name) or {}
pk_columns = [
str(col)
for col in (pk.get("constrained_columns") or [])
if col and col in shared_columns and col in old_columns
]
if not pk_columns:
return
columns_sql = ", ".join(_quote_ident(col) for col in shared_columns)
join_sql = " AND ".join(
f'n.{_quote_ident(col)} = o.{_quote_ident(col)}'
for col in pk_columns
)
null_check_col = _quote_ident(pk_columns[0])
with engine.connect() as conn:
conn.execute(
text(
f"INSERT INTO {_quote_ident(new_name)} ({columns_sql}) "
f"SELECT {', '.join(f'o.{_quote_ident(col)}' for col in shared_columns)} "
f"FROM {_quote_ident(old_name)} o "
f"LEFT JOIN {_quote_ident(new_name)} n ON {join_sql} "
f"WHERE n.{null_check_col} IS NULL"
)
)
conn.commit()
def _migrate_legacy_table_rows() -> None:
for old_name, new_name in LEGACY_TABLE_PAIRS:
_copy_legacy_table_rows(old_name, new_name)
def _topic_fk_target(table_name: str, constrained_column: str = "bot_id") -> str | None:
inspector = inspect(engine)
if not inspector.has_table(table_name):
return None
for fk in inspector.get_foreign_keys(table_name):
cols = [str(col) for col in (fk.get("constrained_columns") or []) if col]
if cols == [constrained_column]:
referred = fk.get("referred_table")
return str(referred) if referred else None
return None
def _repair_postgres_topic_foreign_keys() -> None:
if engine.dialect.name != "postgresql":
return
targets = {
"topic_topic": "topic_topic_bot_id_fkey",
"topic_item": "topic_item_bot_id_fkey",
}
with engine.connect() as conn:
changed = False
for table_name, constraint_name in targets.items():
if _topic_fk_target(table_name) == BOT_INSTANCE_TABLE:
continue
conn.execute(
text(
f'ALTER TABLE {_quote_ident(table_name)} '
f'DROP CONSTRAINT IF EXISTS {_quote_ident(constraint_name)}'
)
)
conn.execute(
text(
f'ALTER TABLE {_quote_ident(table_name)} '
f'ADD CONSTRAINT {_quote_ident(constraint_name)} '
f'FOREIGN KEY ({_quote_ident("bot_id")}) '
f'REFERENCES {_quote_ident(BOT_INSTANCE_TABLE)}({_quote_ident("id")}) '
f'ON DELETE CASCADE'
)
)
changed = True
if changed:
conn.commit()
def _legacy_rows_missing_in_new(old_name: str, new_name: str) -> int:
inspector = inspect(engine)
if not inspector.has_table(old_name) or not inspector.has_table(new_name):
return 0
pk = inspector.get_pk_constraint(new_name) or {}
pk_columns = [
str(col)
for col in (pk.get("constrained_columns") or [])
if col
]
if not pk_columns:
return _table_row_count(old_name)
join_sql = " AND ".join(
f'n.{_quote_ident(col)} = o.{_quote_ident(col)}'
for col in pk_columns
)
null_check_col = _quote_ident(pk_columns[0])
with engine.connect() as conn:
value = conn.execute(
text(
f'SELECT COUNT(*) FROM {_quote_ident(old_name)} o '
f'LEFT JOIN {_quote_ident(new_name)} n ON {join_sql} '
f'WHERE n.{null_check_col} IS NULL'
)
).scalar()
return int(value or 0)
def _drop_legacy_tables() -> None:
droppable = [
old_name
for old_name, new_name in LEGACY_TABLE_PAIRS
if _legacy_rows_missing_in_new(old_name, new_name) <= 0
]
if not droppable:
return
with engine.connect() as conn:
for old_name in droppable:
if engine.dialect.name == "postgresql":
conn.execute(text(f'DROP TABLE IF EXISTS {_quote_ident(old_name)} CASCADE'))
else:
conn.execute(text(f'DROP TABLE IF EXISTS {_quote_ident(old_name)}'))
conn.commit()
def _ensure_botinstance_columns() -> None:
dialect = engine.dialect.name
required_columns = {
"current_state": {
"sqlite": "TEXT DEFAULT 'IDLE'",
"postgresql": "TEXT DEFAULT 'IDLE'",
"mysql": "VARCHAR(64) DEFAULT 'IDLE'",
},
"last_action": {
"sqlite": "TEXT",
"postgresql": "TEXT",
"mysql": "LONGTEXT",
},
"image_tag": {
"sqlite": "TEXT DEFAULT 'nanobot-base:v0.1.4'",
"postgresql": "TEXT DEFAULT 'nanobot-base:v0.1.4'",
"mysql": "VARCHAR(255) DEFAULT 'nanobot-base:v0.1.4'",
},
"access_password": {
"sqlite": "TEXT DEFAULT ''",
"postgresql": "TEXT DEFAULT ''",
"mysql": "VARCHAR(255) DEFAULT ''",
},
"enabled": {
"sqlite": "INTEGER NOT NULL DEFAULT 1",
"postgresql": "BOOLEAN NOT NULL DEFAULT TRUE",
"mysql": "BOOLEAN NOT NULL DEFAULT TRUE",
},
}
inspector = inspect(engine)
if not inspector.has_table(BOT_INSTANCE_TABLE):
return
with engine.connect() as conn:
existing = {
str(row.get("name"))
for row in inspect(conn).get_columns(BOT_INSTANCE_TABLE)
if row.get("name")
}
for col, ddl_map in required_columns.items():
if col in existing:
continue
ddl = ddl_map.get(dialect) or ddl_map.get("sqlite")
conn.execute(text(f"ALTER TABLE {BOT_INSTANCE_TABLE} ADD COLUMN {col} {ddl}"))
if "enabled" in existing:
if dialect == "sqlite":
conn.execute(text(f"UPDATE {BOT_INSTANCE_TABLE} SET enabled = 1 WHERE enabled IS NULL"))
else:
conn.execute(text(f"UPDATE {BOT_INSTANCE_TABLE} SET enabled = TRUE WHERE enabled IS NULL"))
conn.commit()
def _drop_legacy_botinstance_columns() -> None:
legacy_columns = [
"avatar_model",
"avatar_skin",
"system_prompt",
"soul_md",
"agents_md",
"user_md",
"tools_md",
"tools_config_json",
"identity_md",
"llm_provider",
"llm_model",
"api_key",
"api_base",
"temperature",
"top_p",
"max_tokens",
"presence_penalty",
"frequency_penalty",
"send_progress",
"send_tool_hints",
"bot_env_json",
]
with engine.connect() as conn:
existing = {
str(col.get("name"))
for col in inspect(conn).get_columns(BOT_INSTANCE_TABLE)
if col.get("name")
}
for col in legacy_columns:
if col not in existing:
continue
try:
if engine.dialect.name == "mysql":
conn.execute(text(f"ALTER TABLE {BOT_INSTANCE_TABLE} DROP COLUMN `{col}`"))
elif engine.dialect.name == "sqlite":
conn.execute(text(f'ALTER TABLE {BOT_INSTANCE_TABLE} DROP COLUMN "{col}"'))
else:
conn.execute(text(f'ALTER TABLE {BOT_INSTANCE_TABLE} DROP COLUMN IF EXISTS "{col}"'))
except Exception:
# Keep startup resilient on mixed/legacy database engines.
continue
conn.commit()
def _ensure_botmessage_columns() -> None:
if engine.dialect.name != "sqlite":
return
required_columns = {
"media_json": "TEXT",
"feedback": "TEXT",
"feedback_at": "DATETIME",
}
with engine.connect() as conn:
existing_rows = conn.execute(text(f"PRAGMA table_info({BOT_MESSAGE_TABLE})")).fetchall()
existing = {str(row[1]) for row in existing_rows}
for col, ddl in required_columns.items():
if col in existing:
continue
conn.execute(text(f"ALTER TABLE {BOT_MESSAGE_TABLE} ADD COLUMN {col} {ddl}"))
conn.commit()
def _drop_legacy_skill_tables() -> None:
"""Drop deprecated skill registry tables (moved to workspace filesystem mode)."""
with engine.connect() as conn:
conn.execute(text("DROP TABLE IF EXISTS botskillmapping"))
conn.execute(text("DROP TABLE IF EXISTS skillregistry"))
conn.commit()
def _ensure_sys_setting_columns() -> None:
dialect = engine.dialect.name
required_columns = {
"name": {
"sqlite": "TEXT NOT NULL DEFAULT ''",
"postgresql": "TEXT NOT NULL DEFAULT ''",
"mysql": "VARCHAR(200) NOT NULL DEFAULT ''",
},
"category": {
"sqlite": "TEXT NOT NULL DEFAULT 'general'",
"postgresql": "TEXT NOT NULL DEFAULT 'general'",
"mysql": "VARCHAR(64) NOT NULL DEFAULT 'general'",
},
"description": {
"sqlite": "TEXT NOT NULL DEFAULT ''",
"postgresql": "TEXT NOT NULL DEFAULT ''",
"mysql": "LONGTEXT",
},
"value_type": {
"sqlite": "TEXT NOT NULL DEFAULT 'json'",
"postgresql": "TEXT NOT NULL DEFAULT 'json'",
"mysql": "VARCHAR(32) NOT NULL DEFAULT 'json'",
},
"is_public": {
"sqlite": "INTEGER NOT NULL DEFAULT 0",
"postgresql": "BOOLEAN NOT NULL DEFAULT FALSE",
"mysql": "BOOLEAN NOT NULL DEFAULT FALSE",
},
"sort_order": {
"sqlite": "INTEGER NOT NULL DEFAULT 100",
"postgresql": "INTEGER NOT NULL DEFAULT 100",
"mysql": "INTEGER NOT NULL DEFAULT 100",
},
}
inspector = inspect(engine)
if not inspector.has_table(SYS_SETTING_TABLE):
return
with engine.connect() as conn:
existing = {
str(row.get("name"))
for row in inspect(conn).get_columns(SYS_SETTING_TABLE)
if row.get("name")
}
for col, ddl_map in required_columns.items():
if col in existing:
continue
ddl = ddl_map.get(dialect) or ddl_map.get("sqlite")
conn.execute(text(f"ALTER TABLE {SYS_SETTING_TABLE} ADD COLUMN {col} {ddl}"))
conn.commit()
def _ensure_bot_request_usage_columns() -> None:
dialect = engine.dialect.name
required_columns = {
"message_id": {
"sqlite": "INTEGER",
"postgresql": "INTEGER",
"mysql": "INTEGER",
},
"provider": {
"sqlite": "TEXT",
"postgresql": "TEXT",
"mysql": "VARCHAR(120)",
},
"model": {
"sqlite": "TEXT",
"postgresql": "TEXT",
"mysql": "VARCHAR(255)",
},
}
inspector = inspect(engine)
if not inspector.has_table(BOT_REQUEST_USAGE_TABLE):
return
with engine.connect() as conn:
existing = {
str(row.get("name"))
for row in inspect(conn).get_columns(BOT_REQUEST_USAGE_TABLE)
if row.get("name")
}
for col, ddl_map in required_columns.items():
if col in existing:
continue
ddl = ddl_map.get(dialect) or ddl_map.get("sqlite")
conn.execute(text(f"ALTER TABLE {BOT_REQUEST_USAGE_TABLE} ADD COLUMN {col} {ddl}"))
conn.commit()
def _ensure_topic_tables_sqlite() -> None:
if engine.dialect.name != "sqlite":
return
with engine.connect() as conn:
conn.execute(
text(
"""
CREATE TABLE IF NOT EXISTS topic_topic (
id INTEGER PRIMARY KEY AUTOINCREMENT,
bot_id TEXT NOT NULL,
topic_key TEXT NOT NULL,
name TEXT NOT NULL DEFAULT '',
description TEXT NOT NULL DEFAULT '',
is_active INTEGER NOT NULL DEFAULT 1,
is_default_fallback INTEGER NOT NULL DEFAULT 0,
routing_json TEXT NOT NULL DEFAULT '{}',
view_schema_json TEXT NOT NULL DEFAULT '{}',
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY(bot_id) REFERENCES bot_instance(id)
)
"""
)
)
conn.execute(
text(
"""
CREATE TABLE IF NOT EXISTS topic_item (
id INTEGER PRIMARY KEY AUTOINCREMENT,
bot_id TEXT NOT NULL,
topic_key TEXT NOT NULL,
title TEXT NOT NULL DEFAULT '',
content TEXT NOT NULL DEFAULT '',
level TEXT NOT NULL DEFAULT 'info',
tags_json TEXT,
view_json TEXT,
source TEXT NOT NULL DEFAULT 'mcp',
dedupe_key TEXT,
is_read INTEGER NOT NULL DEFAULT 0,
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY(bot_id) REFERENCES bot_instance(id)
)
"""
)
missing = [table_name for table_name in REQUIRED_TABLES if not inspector.has_table(table_name)]
if missing:
raise RuntimeError(
"Database schema is not initialized. "
f"Missing tables: {', '.join(missing)}. "
"Run scripts/init-full-db.sh or apply scripts/sql/create-tables.sql before starting the backend."
)
conn.execute(text("CREATE UNIQUE INDEX IF NOT EXISTS uq_topic_topic_bot_topic_key ON topic_topic(bot_id, topic_key)"))
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_topic_topic_bot_id ON topic_topic(bot_id)"))
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_topic_topic_topic_key ON topic_topic(topic_key)"))
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_topic_topic_bot_fallback ON topic_topic(bot_id, is_default_fallback)"))
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_topic_item_bot_id ON topic_item(bot_id)"))
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_topic_item_topic_key ON topic_item(topic_key)"))
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_topic_item_level ON topic_item(level)"))
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_topic_item_source ON topic_item(source)"))
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_topic_item_is_read ON topic_item(is_read)"))
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_topic_item_created_at ON topic_item(created_at)"))
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_topic_item_bot_topic_created_at ON topic_item(bot_id, topic_key, created_at)"))
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_topic_item_bot_dedupe ON topic_item(bot_id, dedupe_key)"))
conn.commit()
def _ensure_topic_columns() -> None:
dialect = engine.dialect.name
required_columns = {
"topic_topic": {
"name": {
"sqlite": "TEXT NOT NULL DEFAULT ''",
"postgresql": "TEXT NOT NULL DEFAULT ''",
"mysql": "VARCHAR(255) NOT NULL DEFAULT ''",
},
"description": {
"sqlite": "TEXT NOT NULL DEFAULT ''",
"postgresql": "TEXT NOT NULL DEFAULT ''",
"mysql": "LONGTEXT",
},
"is_active": {
"sqlite": "INTEGER NOT NULL DEFAULT 1",
"postgresql": "BOOLEAN NOT NULL DEFAULT TRUE",
"mysql": "BOOLEAN NOT NULL DEFAULT TRUE",
},
"is_default_fallback": {
"sqlite": "INTEGER NOT NULL DEFAULT 0",
"postgresql": "BOOLEAN NOT NULL DEFAULT FALSE",
"mysql": "BOOLEAN NOT NULL DEFAULT FALSE",
},
"routing_json": {
"sqlite": "TEXT NOT NULL DEFAULT '{}'",
"postgresql": "TEXT NOT NULL DEFAULT '{}'",
"mysql": "LONGTEXT",
},
"view_schema_json": {
"sqlite": "TEXT NOT NULL DEFAULT '{}'",
"postgresql": "TEXT NOT NULL DEFAULT '{}'",
"mysql": "LONGTEXT",
},
"created_at": {
"sqlite": "DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP",
"postgresql": "TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP",
"mysql": "DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP",
},
"updated_at": {
"sqlite": "DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP",
"postgresql": "TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP",
"mysql": "DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP",
},
},
"topic_item": {
"title": {
"sqlite": "TEXT NOT NULL DEFAULT ''",
"postgresql": "TEXT NOT NULL DEFAULT ''",
"mysql": "VARCHAR(2000) NOT NULL DEFAULT ''",
},
"level": {
"sqlite": "TEXT NOT NULL DEFAULT 'info'",
"postgresql": "TEXT NOT NULL DEFAULT 'info'",
"mysql": "VARCHAR(32) NOT NULL DEFAULT 'info'",
},
"tags_json": {
"sqlite": "TEXT",
"postgresql": "TEXT",
"mysql": "LONGTEXT",
},
"view_json": {
"sqlite": "TEXT",
"postgresql": "TEXT",
"mysql": "LONGTEXT",
},
"source": {
"sqlite": "TEXT NOT NULL DEFAULT 'mcp'",
"postgresql": "TEXT NOT NULL DEFAULT 'mcp'",
"mysql": "VARCHAR(64) NOT NULL DEFAULT 'mcp'",
},
"dedupe_key": {
"sqlite": "TEXT",
"postgresql": "TEXT",
"mysql": "VARCHAR(200)",
},
"is_read": {
"sqlite": "INTEGER NOT NULL DEFAULT 0",
"postgresql": "BOOLEAN NOT NULL DEFAULT FALSE",
"mysql": "BOOLEAN NOT NULL DEFAULT FALSE",
},
"created_at": {
"sqlite": "DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP",
"postgresql": "TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP",
"mysql": "DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP",
},
},
}
inspector = inspect(engine)
def _validate_required_sys_settings() -> None:
placeholders = ", ".join(f":k{i}" for i, _ in enumerate(REQUIRED_SYS_SETTING_KEYS))
params = {f"k{i}": key for i, key in enumerate(REQUIRED_SYS_SETTING_KEYS)}
with engine.connect() as conn:
for table_name, cols in required_columns.items():
if not inspector.has_table(table_name):
continue
existing = {
str(row.get("name"))
for row in inspector.get_columns(table_name)
if row.get("name")
}
for col, ddl_map in cols.items():
if col in existing:
continue
ddl = ddl_map.get(dialect) or ddl_map.get("sqlite")
conn.execute(text(f"ALTER TABLE {table_name} ADD COLUMN {col} {ddl}"))
conn.commit()
def _ensure_topic_indexes() -> None:
required_indexes = [
("uq_topic_topic_bot_topic_key", "topic_topic", ["bot_id", "topic_key"], True),
("idx_topic_topic_bot_id", "topic_topic", ["bot_id"], False),
("idx_topic_topic_topic_key", "topic_topic", ["topic_key"], False),
("idx_topic_topic_bot_fallback", "topic_topic", ["bot_id", "is_default_fallback"], False),
("idx_topic_item_bot_id", "topic_item", ["bot_id"], False),
("idx_topic_item_topic_key", "topic_item", ["topic_key"], False),
("idx_topic_item_level", "topic_item", ["level"], False),
("idx_topic_item_source", "topic_item", ["source"], False),
("idx_topic_item_is_read", "topic_item", ["is_read"], False),
("idx_topic_item_created_at", "topic_item", ["created_at"], False),
("idx_topic_item_bot_topic_created_at", "topic_item", ["bot_id", "topic_key", "created_at"], False),
("idx_topic_item_bot_dedupe", "topic_item", ["bot_id", "dedupe_key"], False),
]
inspector = inspect(engine)
with engine.connect() as conn:
for name, table_name, columns, unique in required_indexes:
if not inspector.has_table(table_name):
continue
existing = {
str(item.get("name"))
for item in inspector.get_indexes(table_name)
if item.get("name")
}
existing.update(
str(item.get("name"))
for item in inspector.get_unique_constraints(table_name)
if item.get("name")
)
if name in existing:
continue
unique_sql = "UNIQUE " if unique else ""
cols_sql = ", ".join(columns)
conn.execute(text(f"CREATE {unique_sql}INDEX {name} ON {table_name} ({cols_sql})"))
conn.commit()
def _drop_obsolete_topic_tables() -> None:
with engine.connect() as conn:
if engine.dialect.name == "postgresql":
conn.execute(text('DROP TABLE IF EXISTS "topic_bot_settings"'))
elif engine.dialect.name == "mysql":
conn.execute(text("DROP TABLE IF EXISTS `topic_bot_settings`"))
else:
conn.execute(text('DROP TABLE IF EXISTS "topic_bot_settings"'))
conn.commit()
def _cleanup_legacy_default_topics() -> None:
"""
Remove legacy auto-created fallback topic rows from early topic-feed design.
Historical rows look like:
- topic_key = inbox
- name = Inbox
- description = Default topic for uncategorized items
- routing_json contains "Fallback topic"
"""
with engine.connect() as conn:
legacy_rows = conn.execute(
text(
"""
SELECT bot_id, topic_key
FROM topic_topic
WHERE lower(coalesce(topic_key, '')) = 'inbox'
AND lower(coalesce(name, '')) = 'inbox'
AND lower(coalesce(description, '')) = 'default topic for uncategorized items'
AND lower(coalesce(routing_json, '')) LIKE '%fallback topic%'
"""
)
).fetchall()
if not legacy_rows:
return
for row in legacy_rows:
bot_id = str(row[0] or "").strip()
topic_key = str(row[1] or "").strip().lower()
if not bot_id or not topic_key:
continue
conn.execute(
text(
"""
DELETE FROM topic_item
WHERE bot_id = :bot_id AND lower(coalesce(topic_key, '')) = :topic_key
"""
),
{"bot_id": bot_id, "topic_key": topic_key},
)
conn.execute(
text(
"""
DELETE FROM topic_topic
WHERE bot_id = :bot_id AND lower(coalesce(topic_key, '')) = :topic_key
"""
),
{"bot_id": bot_id, "topic_key": topic_key},
)
conn.commit()
def align_postgres_sequences() -> None:
if engine.dialect.name != "postgresql":
return
sequence_targets = [
(BOT_MESSAGE_TABLE, "id"),
(BOT_REQUEST_USAGE_TABLE, "id"),
(BOT_ACTIVITY_EVENT_TABLE, "id"),
("skill_market_item", "id"),
("bot_skill_install", "id"),
]
with engine.connect() as conn:
for table_name, column_name in sequence_targets:
seq_name = conn.execute(
text("SELECT pg_get_serial_sequence(:table_name, :column_name)"),
{"table_name": table_name, "column_name": column_name},
).scalar()
if not seq_name:
continue
max_id = conn.execute(
text(f'SELECT COALESCE(MAX("{column_name}"), 0) FROM "{table_name}"')
).scalar()
max_id = int(max_id or 0)
conn.execute(
text("SELECT setval(:seq_name, :next_value, :is_called)"),
{
"seq_name": seq_name,
"next_value": max_id if max_id > 0 else 1,
"is_called": max_id > 0,
},
)
conn.commit()
rows = conn.execute(
text(f'SELECT key FROM "{SYS_SETTING_TABLE}" WHERE key IN ({placeholders})'),
params,
).scalars().all()
present = {str(row or "").strip() for row in rows if str(row or "").strip()}
missing = [key for key in REQUIRED_SYS_SETTING_KEYS if key not in present]
if missing:
raise RuntimeError(
"Database seed data is not initialized. "
f"Missing sys_setting keys: {', '.join(missing)}. "
"Run scripts/init-full-db.sh or apply scripts/sql/init-data.sql before starting the backend."
)
def init_database() -> None:
lock_conn = _acquire_migration_lock()
try:
_rename_legacy_tables()
SQLModel.metadata.create_all(engine)
_migrate_legacy_table_rows()
_drop_legacy_skill_tables()
_ensure_sys_setting_columns()
_ensure_bot_request_usage_columns()
_ensure_botinstance_columns()
_drop_legacy_botinstance_columns()
_ensure_botmessage_columns()
_ensure_topic_tables_sqlite()
_repair_postgres_topic_foreign_keys()
_ensure_topic_columns()
_ensure_topic_indexes()
_drop_obsolete_topic_tables()
_cleanup_legacy_default_topics()
_drop_legacy_tables()
align_postgres_sequences()
finally:
_release_migration_lock(lock_conn)
with engine.connect() as conn:
conn.execute(text("SELECT 1"))
_validate_required_tables()
_validate_required_sys_settings()
def get_session():

View File

@ -0,0 +1,7 @@
from core.docker_manager import BotDockerManager
from core.settings import BOTS_WORKSPACE_ROOT, DOCKER_NETWORK_NAME
docker_manager = BotDockerManager(
host_data_root=BOTS_WORKSPACE_ROOT,
network_name=DOCKER_NETWORK_NAME,
)

View File

@ -11,7 +11,14 @@ import docker
class BotDockerManager:
def __init__(self, host_data_root: str, base_image: str = "nanobot-base:v0.1.4"):
_RUNTIME_BOOTSTRAP_LABEL_KEY = "dashboard.runtime_bootstrap"
_RUNTIME_BOOTSTRAP_LABEL_VALUE = "env-json-v1"
def __init__(
self,
host_data_root: str,
base_image: str = "nanobot-base",
network_name: str = "",
):
try:
self.client = docker.from_env(timeout=6)
self.client.version()
@ -22,8 +29,11 @@ class BotDockerManager:
self.host_data_root = host_data_root
self.base_image = base_image
self.network_name = str(network_name or "").strip()
self.active_monitors = {}
self._last_delivery_error: Dict[str, str] = {}
self._storage_limit_supported: Optional[bool] = None
self._storage_limit_warning_emitted = False
@staticmethod
def _normalize_resource_limits(
@ -88,6 +98,282 @@ class BotDockerManager:
print(f"[DockerManager] list_images_by_repo failed: {e}")
return rows
@staticmethod
def _docker_error_message(exc: Exception) -> str:
explanation = getattr(exc, "explanation", None)
if isinstance(explanation, bytes):
try:
explanation = explanation.decode("utf-8", errors="replace")
except Exception:
explanation = str(explanation)
if explanation:
return str(explanation)
response = getattr(exc, "response", None)
text = getattr(response, "text", None)
if text:
return str(text)
return str(exc)
@classmethod
def _is_unsupported_storage_opt_error(cls, exc: Exception) -> bool:
message = cls._docker_error_message(exc).lower()
if "storage-opt" not in message and "storage opt" not in message:
return False
markers = (
"overlay over xfs",
"overlay2 over xfs",
"pquota",
"project quota",
"storage driver does not support",
"xfs",
)
return any(marker in message for marker in markers)
def _cleanup_container_if_exists(self, container_name: str) -> None:
if not self.client:
return
try:
container = self.client.containers.get(container_name)
container.remove(force=True)
except docker.errors.NotFound:
pass
except Exception as e:
print(f"[DockerManager] failed to cleanup container {container_name}: {e}")
def _resolve_container_network(self) -> str:
if not self.client or not self.network_name:
return "bridge"
try:
self.client.networks.get(self.network_name)
return self.network_name
except docker.errors.NotFound:
print(f"[DockerManager] network '{self.network_name}' not found; falling back to bridge")
except Exception as e:
print(f"[DockerManager] failed to inspect network '{self.network_name}': {e}; falling back to bridge")
return "bridge"
@staticmethod
def _container_uses_network(container: Any, network_name: str) -> bool:
attrs = getattr(container, "attrs", {}) or {}
network_settings = attrs.get("NetworkSettings") or {}
networks = network_settings.get("Networks") or {}
if network_name in networks:
return True
if network_name == "bridge" and not networks and str(network_settings.get("IPAddress") or "").strip():
return True
return False
@staticmethod
def _get_container_network_ip(container: Any, preferred_network: str = "") -> str:
attrs = getattr(container, "attrs", {}) or {}
network_settings = attrs.get("NetworkSettings") or {}
networks = network_settings.get("Networks") or {}
if preferred_network:
preferred = networks.get(preferred_network) or {}
preferred_ip = str(preferred.get("IPAddress") or "").strip()
if preferred_ip:
return preferred_ip
for network in networks.values():
ip_address = str((network or {}).get("IPAddress") or "").strip()
if ip_address:
return ip_address
return str(network_settings.get("IPAddress") or "").strip()
@classmethod
def _container_uses_expected_bootstrap(cls, container: Any) -> bool:
attrs = getattr(container, "attrs", {}) or {}
config = attrs.get("Config") or {}
labels = config.get("Labels") or {}
return str(labels.get(cls._RUNTIME_BOOTSTRAP_LABEL_KEY) or "").strip() == cls._RUNTIME_BOOTSTRAP_LABEL_VALUE
@staticmethod
def _runtime_bootstrap_entrypoint() -> List[str]:
bootstrap_code = "\n".join(
[
"import json",
"import os",
"import pathlib",
"import re",
"",
"path = pathlib.Path('/root/.nanobot/env.json')",
"pattern = re.compile(r'^[A-Z_][A-Z0-9_]{0,127}$')",
"data = {}",
"if path.is_file():",
" try:",
" data = json.loads(path.read_text(encoding='utf-8'))",
" except Exception:",
" data = {}",
"if not isinstance(data, dict):",
" data = {}",
"for raw_key, raw_value in data.items():",
" key = str(raw_key or '').strip().upper()",
" if not pattern.fullmatch(key):",
" continue",
" os.environ[key] = str(raw_value or '').strip()",
"os.execvp('nanobot', ['nanobot', 'gateway'])",
]
)
return [
"python",
"-c",
bootstrap_code,
]
@staticmethod
def _container_has_mount(container: Any, source: str, destination: str) -> bool:
attrs = getattr(container, "attrs", {}) or {}
mounts = attrs.get("Mounts") or []
expected_source = os.path.normpath(source)
expected_destination = str(destination or "").strip()
for mount in mounts:
if not isinstance(mount, dict):
continue
current_source = os.path.normpath(str(mount.get("Source") or ""))
current_destination = str(mount.get("Destination") or "").strip()
if current_source != expected_source or current_destination != expected_destination:
continue
if mount.get("RW") is False:
continue
return True
return False
@staticmethod
def _desired_memory_bytes(memory_mb: int) -> int:
return int(memory_mb) * 1024 * 1024 if int(memory_mb or 0) > 0 else 0
@staticmethod
def _desired_storage_bytes(storage_gb: int) -> Optional[int]:
storage = int(storage_gb or 0)
if storage <= 0:
return None
return storage * 1024 * 1024 * 1024
@staticmethod
def _get_container_cpu_cores(container: Any) -> float:
attrs = getattr(container, "attrs", {}) or {}
host_cfg = attrs.get("HostConfig") or {}
nano_cpus = int(host_cfg.get("NanoCpus") or 0)
if nano_cpus > 0:
return nano_cpus / 1_000_000_000
cpu_quota = int(host_cfg.get("CpuQuota") or 0)
cpu_period = int(host_cfg.get("CpuPeriod") or 0)
if cpu_quota > 0 and cpu_period > 0:
return cpu_quota / cpu_period
return 0.0
@staticmethod
def _normalize_image_id(raw: Any) -> str:
text = str(raw or "").strip().lower()
if text.startswith("sha256:"):
return text[7:]
return text
@classmethod
def _get_container_image_id(cls, container: Any) -> str:
attrs = getattr(container, "attrs", {}) or {}
image_id = attrs.get("Image")
if image_id:
return cls._normalize_image_id(image_id)
image = getattr(container, "image", None)
return cls._normalize_image_id(getattr(image, "id", ""))
def _resolve_image_id(self, image_ref: str) -> str:
if not self.client:
return ""
try:
image = self.client.images.get(image_ref)
except Exception as e:
print(f"[DockerManager] failed to resolve image id for {image_ref}: {e}")
return ""
return self._normalize_image_id(getattr(image, "id", ""))
def _container_storage_matches(self, actual_storage_bytes: Optional[int], desired_storage_gb: int) -> bool:
expected_storage_bytes = self._desired_storage_bytes(desired_storage_gb)
if expected_storage_bytes is None:
return actual_storage_bytes in {None, 0}
if actual_storage_bytes == expected_storage_bytes:
return True
return actual_storage_bytes is None and self._storage_limit_supported is not True
def _container_matches_runtime(
self,
container: Any,
*,
image_id: str,
cpu_cores: float,
memory_mb: int,
storage_gb: int,
bot_workspace: str,
network_name: str,
) -> bool:
attrs = getattr(container, "attrs", {}) or {}
host_cfg = attrs.get("HostConfig") or {}
current_image_id = self._get_container_image_id(container)
desired_image_id = self._normalize_image_id(image_id)
if not desired_image_id or not current_image_id or current_image_id != desired_image_id:
return False
if not self._container_uses_expected_bootstrap(container):
return False
if not self._container_uses_network(container, network_name):
return False
if not self._container_has_mount(container, bot_workspace, "/root/.nanobot"):
return False
actual_memory_bytes = int(host_cfg.get("Memory") or 0)
if actual_memory_bytes != self._desired_memory_bytes(memory_mb):
return False
desired_cpu = float(cpu_cores or 0)
actual_cpu = self._get_container_cpu_cores(container)
if abs(actual_cpu - desired_cpu) > 0.01:
return False
storage_opt = host_cfg.get("StorageOpt") or {}
actual_storage_bytes = self._parse_size_to_bytes(storage_opt.get("size"))
if not self._container_storage_matches(actual_storage_bytes, storage_gb):
return False
return True
def _run_container_with_storage_fallback(
self,
bot_id: str,
container_name: str,
storage_gb: int,
**base_kwargs: Any,
):
if not self.client:
raise RuntimeError("Docker client is not available")
if storage_gb <= 0:
return self.client.containers.run(**base_kwargs)
if self._storage_limit_supported is False:
return self.client.containers.run(**base_kwargs)
try:
container = self.client.containers.run(
storage_opt={"size": f"{storage_gb}G"},
**base_kwargs,
)
self._storage_limit_supported = True
return container
except Exception as exc:
if not self._is_unsupported_storage_opt_error(exc):
raise
self._storage_limit_supported = False
if not self._storage_limit_warning_emitted:
print(
"[DockerManager] storage limit not supported by current Docker storage driver; "
f"falling back to unlimited container filesystem size. Details: {self._docker_error_message(exc)}"
)
self._storage_limit_warning_emitted = True
else:
print(f"[DockerManager] storage limit skipped for {bot_id}: unsupported by current Docker storage driver")
self._cleanup_container_if_exists(container_name)
return self.client.containers.run(**base_kwargs)
def start_bot(
self,
bot_id: str,
@ -106,22 +392,30 @@ class BotDockerManager:
if not self.has_image(image):
print(f"❌ 错误: 镜像不存在: {image}")
return False
desired_image_id = self._resolve_image_id(image)
if not desired_image_id:
print(f"❌ 错误: 无法解析镜像 ID: {image}")
return False
bot_workspace = os.path.join(self.host_data_root, bot_id, ".nanobot")
container_name = f"worker_{bot_id}"
os.makedirs(bot_workspace, exist_ok=True)
cpu, memory, storage = self._normalize_resource_limits(cpu_cores, memory_mb, storage_gb)
target_network = self._resolve_container_network()
base_kwargs = {
"image": image,
"name": container_name,
"detach": True,
"stdin_open": True,
"tty": True,
"environment": env_vars or {},
"entrypoint": self._runtime_bootstrap_entrypoint(),
"labels": {
self._RUNTIME_BOOTSTRAP_LABEL_KEY: self._RUNTIME_BOOTSTRAP_LABEL_VALUE,
},
"volumes": {
bot_workspace: {"bind": "/root/.nanobot", "mode": "rw"},
},
"network_mode": "bridge",
"network": target_network,
}
if memory > 0:
base_kwargs["mem_limit"] = f"{memory}m"
@ -132,27 +426,46 @@ class BotDockerManager:
try:
container = self.client.containers.get(container_name)
container.reload()
if container.status == "running":
runtime_matches = self._container_matches_runtime(
container,
image_id=desired_image_id,
cpu_cores=cpu,
memory_mb=memory,
storage_gb=storage,
bot_workspace=bot_workspace,
network_name=target_network,
)
if container.status in {"running", "restarting"} and runtime_matches:
if on_state_change:
self.ensure_monitor(bot_id, on_state_change)
return True
container.remove(force=True)
if container.status in {"running", "restarting"}:
if not self._container_uses_network(container, target_network):
print(
f"[DockerManager] recreating {container_name} to switch network "
f"from current attachment to '{target_network}'"
)
else:
print(f"[DockerManager] recreating {container_name} because container config no longer matches desired runtime")
container.remove(force=True)
elif runtime_matches:
container.start()
if on_state_change:
self.ensure_monitor(bot_id, on_state_change)
return True
else:
print(f"[DockerManager] recreating {container_name} because container config no longer matches desired runtime")
container.remove(force=True)
except docker.errors.NotFound:
pass
container = None
if storage > 0:
try:
container = self.client.containers.run(
storage_opt={"size": f"{storage}G"},
**base_kwargs,
)
except Exception as e:
# Some Docker engines (e.g. Desktop/overlay2) may not support size storage option.
print(f"[DockerManager] storage limit not applied for {bot_id}: {e}")
container = self.client.containers.run(**base_kwargs)
else:
container = self.client.containers.run(**base_kwargs)
container = self._run_container_with_storage_fallback(
bot_id,
container_name,
storage,
**base_kwargs,
)
if on_state_change:
monitor_thread = threading.Thread(
@ -194,14 +507,17 @@ class BotDockerManager:
print(f"[DockerManager] Error ensuring monitor for {bot_id}: {e}")
return False
def stop_bot(self, bot_id: str) -> bool:
def stop_bot(self, bot_id: str, remove: bool = False) -> bool:
if not self.client:
return False
container_name = f"worker_{bot_id}"
try:
container = self.client.containers.get(container_name)
container.stop(timeout=5)
container.remove()
container.reload()
if str(container.status or "").strip().lower() in {"running", "restarting", "paused"}:
container.stop(timeout=5)
if remove:
container.remove()
self.active_monitors.pop(bot_id, None)
return True
except docker.errors.NotFound:
@ -219,6 +535,11 @@ class BotDockerManager:
media_paths = [str(v).strip().replace("\\", "/") for v in (media or []) if str(v).strip()]
self._last_delivery_error.pop(bot_id, None)
if not self._wait_for_dashboard_ready(bot_id):
if bot_id not in self._last_delivery_error:
self._last_delivery_error[bot_id] = "Dashboard channel is not ready"
return False
# Primary path on Docker Desktop/Mac: execute curl inside container namespace.
for attempt in range(3):
if self._send_command_via_exec(bot_id, command, media_paths):
@ -237,6 +558,13 @@ class BotDockerManager:
def get_last_delivery_error(self, bot_id: str) -> str:
return str(self._last_delivery_error.get(bot_id, "") or "").strip()
def _wait_for_dashboard_ready(self, bot_id: str) -> bool:
status = self.get_bot_status(bot_id)
if status != "RUNNING":
self._last_delivery_error[bot_id] = f"Container status is {status.lower()}"
return False
return True
def get_bot_status(self, bot_id: str) -> str:
"""Return normalized runtime status from Docker: RUNNING or STOPPED."""
if not self.client:
@ -521,7 +849,8 @@ class BotDockerManager:
container_name = f"worker_{bot_id}"
payload = {"message": command, "media": media or []}
container = self.client.containers.get(container_name)
ip_address = container.attrs["NetworkSettings"]["IPAddress"] or "127.0.0.1"
container.reload()
ip_address = self._get_container_network_ip(container, preferred_network=self.network_name) or "127.0.0.1"
target_url = f"http://{ip_address}:9000/chat"
with httpx.Client(timeout=4.0) as client:
@ -538,19 +867,65 @@ class BotDockerManager:
self._last_delivery_error[bot_id] = reason
return False
def get_recent_logs(self, bot_id: str, tail: int = 300) -> List[str]:
def _read_log_lines_with_client(self, client, bot_id: str, tail: Optional[int] = None) -> List[str]:
container = client.containers.get(f"worker_{bot_id}")
raw = container.logs(tail=max(1, int(tail))) if tail is not None else container.logs()
if isinstance(raw, (bytes, bytearray)):
text = raw.decode("utf-8", errors="ignore")
else:
text = str(raw or "")
return [line for line in text.splitlines() if line.strip()]
def _read_log_lines(self, bot_id: str, tail: Optional[int] = None) -> List[str]:
if not self.client:
return []
container_name = f"worker_{bot_id}"
try:
container = self.client.containers.get(container_name)
raw = container.logs(tail=max(1, int(tail)))
text = raw.decode("utf-8", errors="ignore")
return [line for line in text.splitlines() if line.strip()]
return self._read_log_lines_with_client(self.client, bot_id, tail=tail)
except Exception as e:
print(f"[DockerManager] Error reading logs for {bot_id}: {e}")
return []
def get_recent_logs(self, bot_id: str, tail: int = 300) -> List[str]:
return self._read_log_lines(bot_id, tail=max(1, int(tail)))
def get_logs_page(
self,
bot_id: str,
offset: int = 0,
limit: int = 50,
reverse: bool = True,
) -> Dict[str, Any]:
safe_offset = max(0, int(offset))
safe_limit = max(1, int(limit))
if reverse:
# Docker logs API supports tail but not arbitrary offsets. For reverse pagination
# we only read the minimal newest slice needed for the requested page.
tail_count = safe_offset + safe_limit + 1
lines = self._read_log_lines(bot_id, tail=tail_count)
ordered = list(reversed(lines))
page = ordered[safe_offset:safe_offset + safe_limit]
has_more = len(lines) > safe_offset + safe_limit
return {
"logs": page,
"total": None,
"offset": safe_offset,
"limit": safe_limit,
"has_more": has_more,
"reverse": reverse,
}
lines = self._read_log_lines(bot_id, tail=None)
total = len(lines)
page = lines[safe_offset:safe_offset + safe_limit]
return {
"logs": page,
"total": total,
"offset": safe_offset,
"limit": safe_limit,
"has_more": safe_offset + safe_limit < total,
"reverse": reverse,
}
def _monitor_container_logs(self, bot_id: str, container, callback: Callable[[str, dict], None]):
try:
buffer = ""

View File

@ -1,4 +1,3 @@
import json
import os
import re
from pathlib import Path
@ -30,13 +29,6 @@ for _k, _v in _prod_env_values.items():
os.environ[_k] = str(_v)
def _env_text(name: str, default: str) -> str:
raw = os.getenv(name)
if raw is None:
return default
return str(raw).replace("\\n", "\n")
def _env_bool(name: str, default: bool) -> bool:
raw = os.getenv(name)
if raw is None:
@ -84,6 +76,32 @@ def _env_extensions(name: str, default: tuple[str, ...]) -> tuple[str, ...]:
return tuple(rows)
def _normalize_origin(raw: str) -> str:
text = str(raw or "").strip()
if not text:
return ""
try:
parsed = urlsplit(text)
except Exception:
return ""
scheme = str(parsed.scheme or "").strip().lower()
netloc = str(parsed.netloc or "").strip().lower()
if scheme not in {"http", "https"} or not netloc:
return ""
return urlunsplit((scheme, netloc, "", "", ""))
def _env_origins(name: str, default: tuple[str, ...]) -> tuple[str, ...]:
raw = os.getenv(name)
source = list(default) if raw is None else re.split(r"[,;\s]+", str(raw))
rows: list[str] = []
for item in source:
origin = _normalize_origin(item)
if origin and origin not in rows:
rows.append(origin)
return tuple(rows)
def _normalize_dir_path(path_value: str) -> str:
raw = str(path_value or "").strip()
if not raw:
@ -95,27 +113,14 @@ def _normalize_dir_path(path_value: str) -> str:
return str((BACKEND_ROOT / p).resolve())
def _load_json_object(path: Path) -> dict[str, object]:
try:
with open(path, "r", encoding="utf-8") as f:
data = json.load(f)
if isinstance(data, dict):
return data
except Exception:
pass
return {}
def _read_template_md(raw: object) -> str:
if raw is None:
return ""
return str(raw).replace("\r\n", "\n").strip()
DATA_ROOT: Final[str] = _normalize_dir_path(os.getenv("DATA_ROOT", str(PROJECT_ROOT / "data")))
BOTS_WORKSPACE_ROOT: Final[str] = _normalize_dir_path(
os.getenv("BOTS_WORKSPACE_ROOT", str(PROJECT_ROOT / "workspace" / "bots"))
)
RUNTIME_DATA_ROOT: Final[Path] = Path(DATA_ROOT).resolve()
RUNTIME_TEMPLATES_ROOT: Final[Path] = (RUNTIME_DATA_ROOT / "templates").resolve()
RUNTIME_SKILLS_ROOT: Final[Path] = (RUNTIME_DATA_ROOT / "skills").resolve()
RUNTIME_MODEL_ROOT: Final[Path] = (RUNTIME_DATA_ROOT / "model").resolve()
def _normalize_database_url(url: str) -> str:
@ -168,9 +173,10 @@ def _mask_database_url(url: str) -> str:
_db_env = str(os.getenv("DATABASE_URL") or "").strip()
DATABASE_URL: Final[str] = _normalize_database_url(
_db_env if _db_env else f"sqlite:///{Path(DATA_ROOT) / 'nanobot_dashboard.db'}"
)
if not _db_env:
raise RuntimeError("DATABASE_URL is not set in environment. PostgreSQL is required.")
DATABASE_URL: Final[str] = _normalize_database_url(_db_env)
DATABASE_ENGINE: Final[str] = _database_engine(DATABASE_URL)
DATABASE_URL_DISPLAY: Final[str] = _mask_database_url(DATABASE_URL)
DATABASE_ECHO: Final[bool] = _env_bool("DATABASE_ECHO", True)
@ -181,7 +187,17 @@ DATABASE_POOL_RECYCLE: Final[int] = _env_int("DATABASE_POOL_RECYCLE", 1800, 30,
DEFAULT_UPLOAD_MAX_MB: Final[int] = 100
DEFAULT_PAGE_SIZE: Final[int] = 10
DEFAULT_CHAT_PULL_PAGE_SIZE: Final[int] = 60
DEFAULT_COMMAND_AUTO_UNLOCK_SECONDS: Final[int] = _env_int("COMMAND_AUTO_UNLOCK_SECONDS", 10, 1, 600)
DEFAULT_AUTH_TOKEN_TTL_HOURS: Final[int] = _env_int("AUTH_TOKEN_TTL_HOURS", 24, 1, 720)
DEFAULT_AUTH_TOKEN_MAX_ACTIVE: Final[int] = _env_int("AUTH_TOKEN_MAX_ACTIVE", 2, 1, 20)
WORKSPACE_PREVIEW_SIGNING_SECRET: Final[str] = str(
os.getenv("WORKSPACE_PREVIEW_SIGNING_SECRET") or DATABASE_URL
).strip()
WORKSPACE_PREVIEW_TOKEN_TTL_SECONDS: Final[int] = _env_int(
"WORKSPACE_PREVIEW_TOKEN_TTL_SECONDS",
3600,
60,
86400,
)
DEFAULT_BOT_SYSTEM_TIMEZONE: Final[str] = str(
os.getenv("DEFAULT_BOT_SYSTEM_TIMEZONE") or os.getenv("TZ") or "Asia/Shanghai"
).strip() or "Asia/Shanghai"
@ -201,7 +217,7 @@ DEFAULT_WORKSPACE_DOWNLOAD_EXTENSIONS: Final[tuple[str, ...]] = (
)
STT_ENABLED_DEFAULT: Final[bool] = True
STT_MODEL: Final[str] = str(os.getenv("STT_MODEL") or "ggml-small-q8_0.bin").strip()
_DEFAULT_STT_MODEL_DIR: Final[Path] = (Path(DATA_ROOT) / "model").resolve()
_DEFAULT_STT_MODEL_DIR: Final[Path] = RUNTIME_MODEL_ROOT
_configured_stt_model_dir = _normalize_dir_path(os.getenv("STT_MODEL_DIR", str(_DEFAULT_STT_MODEL_DIR)))
if _configured_stt_model_dir and not Path(_configured_stt_model_dir).exists() and _DEFAULT_STT_MODEL_DIR.exists():
STT_MODEL_DIR: Final[str] = str(_DEFAULT_STT_MODEL_DIR)
@ -222,51 +238,20 @@ REDIS_URL: Final[str] = str(os.getenv("REDIS_URL") or "").strip()
REDIS_PREFIX: Final[str] = str(os.getenv("REDIS_PREFIX") or "dashboard_nanobot").strip() or "dashboard_nanobot"
REDIS_DEFAULT_TTL: Final[int] = _env_int("REDIS_DEFAULT_TTL", 60, 1, 86400)
PANEL_ACCESS_PASSWORD: Final[str] = str(os.getenv("PANEL_ACCESS_PASSWORD") or "").strip()
CORS_ALLOWED_ORIGINS: Final[tuple[str, ...]] = _env_origins(
"CORS_ALLOWED_ORIGINS",
(
"http://localhost:5173",
"http://127.0.0.1:5173",
"http://localhost:4173",
"http://127.0.0.1:4173",
),
)
TEMPLATE_ROOT: Final[Path] = (BACKEND_ROOT / "templates").resolve()
AGENT_MD_TEMPLATES_FILE: Final[Path] = TEMPLATE_ROOT / "agent_md_templates.json"
TOPIC_PRESETS_TEMPLATES_FILE: Final[Path] = TEMPLATE_ROOT / "topic_presets.json"
APP_HOST: Final[str] = str(os.getenv("APP_HOST") or "0.0.0.0").strip()
APP_PORT: Final[int] = _env_int("APP_PORT", 8000, 1, 65535)
APP_RELOAD: Final[bool] = _env_bool("APP_RELOAD", False)
DOCKER_NETWORK_NAME: Final[str] = str(os.getenv("DOCKER_NETWORK_NAME") or "").strip()
_agent_md_templates_raw = _load_json_object(AGENT_MD_TEMPLATES_FILE)
DEFAULT_AGENTS_MD: Final[str] = _env_text(
"DEFAULT_AGENTS_MD",
_read_template_md(_agent_md_templates_raw.get("agents_md")),
).strip()
DEFAULT_SOUL_MD: Final[str] = _env_text(
"DEFAULT_SOUL_MD",
_read_template_md(_agent_md_templates_raw.get("soul_md")),
).strip()
DEFAULT_USER_MD: Final[str] = _env_text(
"DEFAULT_USER_MD",
_read_template_md(_agent_md_templates_raw.get("user_md")),
).strip()
DEFAULT_TOOLS_MD: Final[str] = _env_text(
"DEFAULT_TOOLS_MD",
_read_template_md(_agent_md_templates_raw.get("tools_md")),
).strip()
DEFAULT_IDENTITY_MD: Final[str] = _env_text(
"DEFAULT_IDENTITY_MD",
_read_template_md(_agent_md_templates_raw.get("identity_md")),
).strip()
_topic_presets_raw = _load_json_object(TOPIC_PRESETS_TEMPLATES_FILE)
_topic_presets_list = _topic_presets_raw.get("presets")
TOPIC_PRESET_TEMPLATES: Final[list[dict[str, object]]] = [
dict(row) for row in (_topic_presets_list if isinstance(_topic_presets_list, list) else []) if isinstance(row, dict)
]
def load_agent_md_templates() -> dict[str, str]:
raw = _load_json_object(AGENT_MD_TEMPLATES_FILE)
rows: dict[str, str] = {}
for key in ("agents_md", "soul_md", "user_md", "tools_md", "identity_md"):
rows[key] = _read_template_md(raw.get(key))
return rows
def load_topic_presets_template() -> dict[str, object]:
raw = _load_json_object(TOPIC_PRESETS_TEMPLATES_FILE)
presets = raw.get("presets")
if not isinstance(presets, list):
return {"presets": []}
return {"presets": [dict(row) for row in presets if isinstance(row, dict)]}
AGENT_MD_TEMPLATES_FILE: Final[Path] = RUNTIME_TEMPLATES_ROOT / "agent_md_templates.json"
TOPIC_PRESETS_TEMPLATES_FILE: Final[Path] = RUNTIME_TEMPLATES_ROOT / "topic_presets.json"

View File

@ -9,7 +9,7 @@ from pathlib import Path
from typing import Any, Dict, Optional
from core.settings import STT_DEVICE, STT_MODEL, STT_MODEL_DIR
from services.platform_service import get_speech_runtime_settings
from services.platform_settings_service import get_speech_runtime_settings
class SpeechServiceError(RuntimeError):
@ -24,6 +24,39 @@ class SpeechDurationError(SpeechServiceError):
pass
def inspect_speech_model_status() -> Dict[str, Any]:
service = WhisperSpeechService()
model = str(STT_MODEL or "").strip()
model_dir = str(STT_MODEL_DIR or "").strip()
expected_path = ""
if model:
if any(sep in model for sep in ("/", "\\")):
expected_path = str(Path(model).expanduser())
elif model_dir:
expected_path = str((Path(model_dir).expanduser() / model).resolve())
try:
resolved_path = service._resolve_model_source()
return {
"ready": True,
"model": model,
"model_dir": model_dir,
"expected_path": expected_path or resolved_path,
"resolved_path": resolved_path,
"message": "",
}
except SpeechServiceError as exc:
return {
"ready": False,
"model": model,
"model_dir": model_dir,
"expected_path": expected_path,
"resolved_path": "",
"message": str(exc),
}
class WhisperSpeechService:
def __init__(self) -> None:
self._model: Any = None

View File

@ -0,0 +1,160 @@
import os
import re
import json
from datetime import datetime, timezone, timedelta
from typing import Any, Dict, List, Optional
from zoneinfo import ZoneInfo
from fastapi import HTTPException
from core.settings import DEFAULT_BOT_SYSTEM_TIMEZONE
_ENV_KEY_RE = re.compile(r"^[A-Z_][A-Z0-9_]{0,127}$")
__all__ = [
"_calc_dir_size_bytes",
"_get_default_system_timezone",
"_is_ignored_skill_zip_top_level",
"_is_image_attachment_path",
"_is_valid_top_level_skill_name",
"_is_video_attachment_path",
"_is_visual_attachment_path",
"_normalize_env_params",
"_normalize_system_timezone",
"_parse_env_params",
"_parse_json_string_list",
"_read_description_from_text",
"_resolve_local_day_range",
"_safe_float",
"_safe_int",
"_sanitize_skill_market_key",
"_sanitize_zip_filename",
"_workspace_stat_ctime_iso",
]
def _resolve_local_day_range(date_text: str, tz_offset_minutes: Optional[int]) -> tuple[datetime, datetime]:
try:
local_day = datetime.strptime(str(date_text or "").strip(), "%Y-%m-%d")
except ValueError as exc:
raise HTTPException(status_code=400, detail="Invalid date, expected YYYY-MM-DD") from exc
offset = timedelta(minutes=tz_offset_minutes if tz_offset_minutes is not None else 0)
utc_start = (local_day).replace(tzinfo=timezone.utc) + offset
utc_end = utc_start + timedelta(days=1)
return utc_start, utc_end
def _sanitize_zip_filename(name: str) -> str:
s = str(name or "").strip()
s = re.sub(r"[^a-zA-Z0-9._-]", "_", s)
return s if s else "upload.zip"
def _normalize_env_params(raw: Any) -> Dict[str, str]:
if not isinstance(raw, dict):
return {}
res: Dict[str, str] = {}
for k, v in raw.items():
ks = str(k).strip()
if _ENV_KEY_RE.match(ks):
res[ks] = str(v or "").strip()
return res
def _get_default_system_timezone() -> str:
return str(DEFAULT_BOT_SYSTEM_TIMEZONE or "Asia/Shanghai").strip()
def _normalize_system_timezone(raw: Any) -> str:
s = str(raw or "").strip()
if not s:
return _get_default_system_timezone()
try:
ZoneInfo(s)
return s
except Exception:
return _get_default_system_timezone()
def _safe_float(raw: Any, default: float) -> float:
try:
return float(raw)
except (ValueError, TypeError):
return default
def _safe_int(raw: Any, default: int) -> int:
try:
return int(raw)
except (ValueError, TypeError):
return default
def _parse_env_params(raw: Any) -> Dict[str, str]:
if isinstance(raw, dict):
return _normalize_env_params(raw)
if isinstance(raw, str):
try:
parsed = json.loads(raw)
return _normalize_env_params(parsed)
except Exception:
pass
return {}
def _is_valid_top_level_skill_name(name: str) -> bool:
return bool(re.match(r"^[a-zA-Z0-9_-]+$", name))
def _parse_json_string_list(raw: Any) -> List[str]:
if not raw:
return []
if isinstance(raw, list):
return [str(v) for v in raw]
if isinstance(raw, str):
try:
parsed = json.loads(raw)
if isinstance(parsed, list):
return [str(v) for v in parsed]
except Exception:
pass
return []
def _is_ignored_skill_zip_top_level(name: str) -> bool:
return name.startswith(".") or name.startswith("__") or name in {"venv", "node_modules"}
def _read_description_from_text(text: str) -> str:
if not text:
return ""
lines = text.strip().split("\n")
for line in lines:
s = line.strip()
if s and not s.startswith("#"):
return s[:200]
return ""
def _sanitize_skill_market_key(key: str) -> str:
s = str(key or "").strip().lower()
s = re.sub(r"[^a-z0-9_-]", "_", s)
return s
def _calc_dir_size_bytes(path: str) -> int:
total = 0
try:
for root, dirs, files in os.walk(path):
for f in files:
fp = os.path.join(root, f)
if not os.path.islink(fp):
total += os.path.getsize(fp)
except Exception:
pass
return total
def _is_image_attachment_path(path: str) -> bool:
ext = (os.path.splitext(path)[1] or "").lower()
return ext in {".png", ".jpg", ".jpeg", ".gif", ".webp", ".svg", ".bmp"}
def _is_video_attachment_path(path: str) -> bool:
ext = (os.path.splitext(path)[1] or "").lower()
return ext in {".mp4", ".mov", ".avi", ".mkv", ".webm"}
def _is_visual_attachment_path(path: str) -> bool:
return _is_image_attachment_path(path) or _is_video_attachment_path(path)
def _workspace_stat_ctime_iso(stat: os.stat_result) -> str:
ts = getattr(stat, "st_birthtime", None)
if ts is None:
ts = getattr(stat, "st_ctime", None)
try:
return datetime.fromtimestamp(float(ts), tz=timezone.utc).isoformat().replace("+00:00", "Z")
except Exception:
return datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc).isoformat().replace("+00:00", "Z")

View File

@ -0,0 +1,27 @@
from typing import Any, Dict, List
from fastapi import WebSocket
class WSConnectionManager:
def __init__(self):
self.connections: Dict[str, List[WebSocket]] = {}
async def connect(self, bot_id: str, websocket: WebSocket):
await websocket.accept()
self.connections.setdefault(bot_id, []).append(websocket)
def disconnect(self, bot_id: str, websocket: WebSocket):
conns = self.connections.get(bot_id, [])
if websocket in conns:
conns.remove(websocket)
if not conns and bot_id in self.connections:
del self.connections[bot_id]
async def broadcast(self, bot_id: str, data: Dict[str, Any]):
conns = list(self.connections.get(bot_id, []))
for ws in conns:
try:
await ws.send_json(data)
except Exception:
self.disconnect(bot_id, ws)
manager = WSConnectionManager()

View File

@ -1,73 +0,0 @@
-- Topic subsystem schema (SQLite)
-- Apply manually before/after backend deployment if needed.
BEGIN TRANSACTION;
CREATE TABLE IF NOT EXISTS topic_topic (
id INTEGER PRIMARY KEY AUTOINCREMENT,
bot_id TEXT NOT NULL,
topic_key TEXT NOT NULL,
name TEXT NOT NULL DEFAULT '',
description TEXT NOT NULL DEFAULT '',
is_active INTEGER NOT NULL DEFAULT 1,
is_default_fallback INTEGER NOT NULL DEFAULT 0,
routing_json TEXT NOT NULL DEFAULT '{}',
view_schema_json TEXT NOT NULL DEFAULT '{}',
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY(bot_id) REFERENCES bot_instance(id)
);
CREATE TABLE IF NOT EXISTS topic_item (
id INTEGER PRIMARY KEY AUTOINCREMENT,
bot_id TEXT NOT NULL,
topic_key TEXT NOT NULL,
title TEXT NOT NULL DEFAULT '',
content TEXT NOT NULL DEFAULT '',
level TEXT NOT NULL DEFAULT 'info',
tags_json TEXT,
view_json TEXT,
source TEXT NOT NULL DEFAULT 'mcp',
dedupe_key TEXT,
is_read INTEGER NOT NULL DEFAULT 0,
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY(bot_id) REFERENCES bot_instance(id)
);
CREATE UNIQUE INDEX IF NOT EXISTS uq_topic_topic_bot_topic_key
ON topic_topic(bot_id, topic_key);
CREATE INDEX IF NOT EXISTS idx_topic_topic_bot_id
ON topic_topic(bot_id);
CREATE INDEX IF NOT EXISTS idx_topic_topic_topic_key
ON topic_topic(topic_key);
CREATE INDEX IF NOT EXISTS idx_topic_topic_bot_fallback
ON topic_topic(bot_id, is_default_fallback);
CREATE INDEX IF NOT EXISTS idx_topic_item_bot_id
ON topic_item(bot_id);
CREATE INDEX IF NOT EXISTS idx_topic_item_topic_key
ON topic_item(topic_key);
CREATE INDEX IF NOT EXISTS idx_topic_item_level
ON topic_item(level);
CREATE INDEX IF NOT EXISTS idx_topic_item_source
ON topic_item(source);
CREATE INDEX IF NOT EXISTS idx_topic_item_is_read
ON topic_item(is_read);
CREATE INDEX IF NOT EXISTS idx_topic_item_created_at
ON topic_item(created_at);
CREATE INDEX IF NOT EXISTS idx_topic_item_bot_topic_created_at
ON topic_item(bot_id, topic_key, created_at);
CREATE INDEX IF NOT EXISTS idx_topic_item_bot_dedupe
ON topic_item(bot_id, dedupe_key);
COMMIT;

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,23 @@
from datetime import datetime
from typing import Optional
from sqlmodel import Field, SQLModel
class AuthLoginLog(SQLModel, table=True):
__tablename__ = "sys_login_log"
id: Optional[int] = Field(default=None, primary_key=True)
auth_type: str = Field(index=True) # panel | bot
token_hash: str = Field(index=True, unique=True)
subject_id: str = Field(index=True)
bot_id: Optional[str] = Field(default=None, index=True)
auth_source: str = Field(default="", index=True)
created_at: datetime = Field(default_factory=datetime.utcnow, index=True)
expires_at: datetime = Field(index=True)
last_seen_at: datetime = Field(default_factory=datetime.utcnow, index=True)
revoked_at: Optional[datetime] = Field(default=None, index=True)
revoke_reason: Optional[str] = Field(default=None)
client_ip: Optional[str] = Field(default=None)
user_agent: Optional[str] = Field(default=None)
device_info: Optional[str] = Field(default=None)

View File

@ -13,7 +13,7 @@ class BotInstance(SQLModel, table=True):
docker_status: str = Field(default="STOPPED", index=True)
current_state: Optional[str] = Field(default="IDLE")
last_action: Optional[str] = Field(default=None)
image_tag: str = Field(default="nanobot-base:v0.1.4") # 记录该机器人使用的镜像版本
image_tag: str = Field(default="nanobot-base") # 记录该机器人使用的镜像版本
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
@ -32,7 +32,7 @@ class BotMessage(SQLModel, table=True):
class NanobotImage(SQLModel, table=True):
__tablename__ = "bot_image"
tag: str = Field(primary_key=True) # e.g., nanobot-base:v0.1.4
tag: str = Field(primary_key=True) # e.g., nanobot-base
image_id: Optional[str] = Field(default=None) # Docker 内部的 Image ID
version: str # e.g., 0.1.4
status: str = Field(default="READY") # READY, BUILDING, ERROR

View File

@ -0,0 +1,3 @@
from providers.bot_workspace_provider import BotWorkspaceProvider
__all__ = ["BotWorkspaceProvider"]

View File

@ -0,0 +1,283 @@
from __future__ import annotations
import json
import os
from typing import Any, Dict, List
_PROVIDER_ALIAS_MAP = {
"aliyun": "dashscope",
"qwen": "dashscope",
"aliyun-qwen": "dashscope",
"moonshot": "kimi",
"xunfei": "openai",
"iflytek": "openai",
"xfyun": "openai",
"vllm": "openai",
}
_MANAGED_WORKSPACE_FILES = ("AGENTS.md", "SOUL.md", "USER.md", "TOOLS.md", "IDENTITY.md")
def _require_text(raw: Any, *, field: str) -> str:
value = str(raw if raw is not None else "").strip()
if not value:
raise RuntimeError(f"Missing required bot runtime field: {field}")
return value
def _normalize_markdown_text(raw: Any, *, field: str) -> str:
if raw is None:
raise RuntimeError(f"Missing required workspace markdown field: {field}")
return str(raw).replace("\r\n", "\n").strip() + "\n"
def _normalize_provider_name(raw_provider_name: str) -> tuple[str, str]:
normalized = raw_provider_name.strip().lower()
if not normalized:
raise RuntimeError("Missing required bot runtime field: llm_provider")
canonical = _PROVIDER_ALIAS_MAP.get(normalized, normalized)
return normalized, canonical
def _normalize_allow_from(raw: Any) -> List[str]:
rows: List[str] = []
if isinstance(raw, list):
for item in raw:
text = str(item or "").strip()
if text and text not in rows:
rows.append(text)
return rows or ["*"]
def _normalize_extra_config(raw: Any) -> Dict[str, Any]:
if raw is None:
return {}
if not isinstance(raw, dict):
raise RuntimeError("Channel extra_config must be an object")
return dict(raw)
def _write_json_atomic(path: str, payload: Dict[str, Any]) -> None:
os.makedirs(os.path.dirname(path), exist_ok=True)
tmp_path = f"{path}.tmp"
with open(tmp_path, "w", encoding="utf-8") as file:
json.dump(payload, file, ensure_ascii=False, indent=2)
os.replace(tmp_path, path)
def _write_text_atomic(path: str, content: str) -> None:
os.makedirs(os.path.dirname(path), exist_ok=True)
tmp_path = f"{path}.tmp"
with open(tmp_path, "w", encoding="utf-8") as file:
file.write(content)
os.replace(tmp_path, path)
class BotWorkspaceProvider:
def __init__(self, host_data_root: str):
self.host_data_root = host_data_root
def write_workspace(self, bot_id: str, bot_data: Dict[str, Any], channels: List[Dict[str, Any]]) -> str:
raw_provider_name, provider_name = _normalize_provider_name(_require_text(bot_data.get("llm_provider"), field="llm_provider"))
model_name = _require_text(bot_data.get("llm_model"), field="llm_model")
api_key = _require_text(bot_data.get("api_key"), field="api_key")
api_base = _require_text(bot_data.get("api_base"), field="api_base")
temperature = float(bot_data.get("temperature"))
top_p = float(bot_data.get("top_p"))
max_tokens = int(bot_data.get("max_tokens"))
send_progress = bool(bot_data.get("send_progress"))
send_tool_hints = bool(bot_data.get("send_tool_hints"))
bot_root = os.path.join(self.host_data_root, bot_id)
dot_nanobot_dir = os.path.join(bot_root, ".nanobot")
workspace_dir = os.path.join(dot_nanobot_dir, "workspace")
memory_dir = os.path.join(workspace_dir, "memory")
skills_dir = os.path.join(workspace_dir, "skills")
for path in (dot_nanobot_dir, workspace_dir, memory_dir, skills_dir):
os.makedirs(path, exist_ok=True)
provider_cfg: Dict[str, Any] = {
"apiKey": api_key,
"apiBase": api_base,
}
if raw_provider_name in {"xunfei", "iflytek", "xfyun", "vllm"}:
provider_cfg["dashboardProviderAlias"] = raw_provider_name
effective_model_name = model_name
if provider_name == "openai" and raw_provider_name in {"xunfei", "iflytek", "xfyun"} and "/" not in model_name:
effective_model_name = f"openai/{model_name}"
config_data: Dict[str, Any] = {
"agents": {
"defaults": {
"model": effective_model_name,
"temperature": temperature,
"topP": top_p,
"maxTokens": max_tokens,
}
},
"providers": {
provider_name: provider_cfg,
},
"channels": {
"sendProgress": send_progress,
"sendToolHints": send_tool_hints,
"dashboard": {
"enabled": True,
"host": "0.0.0.0",
"port": 9000,
"allowFrom": ["*"],
},
},
}
mcp_servers = bot_data.get("mcp_servers")
if mcp_servers is not None:
if not isinstance(mcp_servers, dict):
raise RuntimeError("mcp_servers must be an object")
config_data["tools"] = {"mcpServers": mcp_servers}
channels_cfg = config_data["channels"]
for channel in channels:
channel_type = str(channel.get("channel_type") or "").strip().lower()
if not channel_type or channel_type == "dashboard":
continue
extra = _normalize_extra_config(channel.get("extra_config"))
enabled = bool(channel.get("is_active"))
external_app_id = str(channel.get("external_app_id") or "").strip()
app_secret = str(channel.get("app_secret") or "").strip()
if channel_type == "telegram":
channels_cfg["telegram"] = {
"enabled": enabled,
"token": app_secret,
"proxy": str(extra.get("proxy") or "").strip(),
"replyToMessage": bool(extra.get("replyToMessage")),
"allowFrom": _normalize_allow_from(extra.get("allowFrom")),
}
continue
if channel_type == "feishu":
channels_cfg["feishu"] = {
"enabled": enabled,
"appId": external_app_id,
"appSecret": app_secret,
"encryptKey": str(extra.get("encryptKey") or "").strip(),
"verificationToken": str(extra.get("verificationToken") or "").strip(),
"allowFrom": _normalize_allow_from(extra.get("allowFrom")),
}
continue
if channel_type == "dingtalk":
channels_cfg["dingtalk"] = {
"enabled": enabled,
"clientId": external_app_id,
"clientSecret": app_secret,
"allowFrom": _normalize_allow_from(extra.get("allowFrom")),
}
continue
if channel_type == "slack":
channels_cfg["slack"] = {
"enabled": enabled,
"mode": str(extra.get("mode") or "socket"),
"botToken": external_app_id,
"appToken": app_secret,
"replyInThread": bool(extra.get("replyInThread", True)),
"groupPolicy": str(extra.get("groupPolicy") or "mention"),
"groupAllowFrom": extra.get("groupAllowFrom") if isinstance(extra.get("groupAllowFrom"), list) else [],
"reactEmoji": str(extra.get("reactEmoji") or "eyes"),
}
continue
if channel_type == "qq":
channels_cfg["qq"] = {
"enabled": enabled,
"appId": external_app_id,
"secret": app_secret,
"allowFrom": _normalize_allow_from(extra.get("allowFrom")),
}
continue
if channel_type == "wecom":
wecom_cfg: Dict[str, Any] = {
"enabled": enabled,
"botId": external_app_id,
"secret": app_secret,
"allowFrom": _normalize_allow_from(extra.get("allowFrom")),
}
welcome_message = str(extra.get("welcomeMessage") or "").strip()
if welcome_message:
wecom_cfg["welcomeMessage"] = welcome_message
channels_cfg["wecom"] = wecom_cfg
continue
if channel_type == "weixin":
weixin_cfg: Dict[str, Any] = {
"enabled": enabled,
"allowFrom": _normalize_allow_from(extra.get("allowFrom")),
}
route_tag = str(extra.get("routeTag") or "").strip()
if route_tag:
weixin_cfg["routeTag"] = route_tag
state_dir = str(extra.get("stateDir") or "").strip()
if state_dir:
weixin_cfg["stateDir"] = state_dir
base_url = str(extra.get("baseUrl") or "").strip()
if base_url:
weixin_cfg["baseUrl"] = base_url
cdn_base_url = str(extra.get("cdnBaseUrl") or "").strip()
if cdn_base_url:
weixin_cfg["cdnBaseUrl"] = cdn_base_url
poll_timeout = extra.get("pollTimeout", extra.get("poll_timeout"))
if poll_timeout not in {None, ""}:
weixin_cfg["pollTimeout"] = max(1, int(poll_timeout))
channels_cfg["weixin"] = weixin_cfg
continue
if channel_type == "email":
channels_cfg["email"] = {
"enabled": enabled,
"consentGranted": bool(extra.get("consentGranted")),
"imapHost": str(extra.get("imapHost") or "").strip(),
"imapPort": max(1, min(int(extra.get("imapPort") or 993), 65535)),
"imapUsername": str(extra.get("imapUsername") or "").strip(),
"imapPassword": str(extra.get("imapPassword") or "").strip(),
"imapMailbox": str(extra.get("imapMailbox") or "INBOX"),
"imapUseSsl": bool(extra.get("imapUseSsl", True)),
"smtpHost": str(extra.get("smtpHost") or "").strip(),
"smtpPort": max(1, min(int(extra.get("smtpPort") or 587), 65535)),
"smtpUsername": str(extra.get("smtpUsername") or "").strip(),
"smtpPassword": str(extra.get("smtpPassword") or "").strip(),
"smtpUseTls": bool(extra.get("smtpUseTls", True)),
"smtpUseSsl": bool(extra.get("smtpUseSsl")),
"fromAddress": str(extra.get("fromAddress") or "").strip(),
"autoReplyEnabled": bool(extra.get("autoReplyEnabled", True)),
"pollIntervalSeconds": max(5, int(extra.get("pollIntervalSeconds") or 30)),
"markSeen": bool(extra.get("markSeen", True)),
"maxBodyChars": max(1, int(extra.get("maxBodyChars") or 12000)),
"subjectPrefix": str(extra.get("subjectPrefix") or "Re: "),
"allowFrom": _normalize_allow_from(extra.get("allowFrom")),
}
continue
channels_cfg[channel_type] = {
"enabled": enabled,
"appId": external_app_id,
"appSecret": app_secret,
**extra,
}
_write_json_atomic(os.path.join(dot_nanobot_dir, "config.json"), config_data)
workspace_files = {
"AGENTS.md": _normalize_markdown_text(bot_data.get("agents_md"), field="agents_md"),
"SOUL.md": _normalize_markdown_text(bot_data.get("soul_md"), field="soul_md"),
"USER.md": _normalize_markdown_text(bot_data.get("user_md"), field="user_md"),
"TOOLS.md": _normalize_markdown_text(bot_data.get("tools_md"), field="tools_md"),
"IDENTITY.md": _normalize_markdown_text(bot_data.get("identity_md"), field="identity_md"),
}
for filename in _MANAGED_WORKSPACE_FILES:
_write_text_atomic(os.path.join(workspace_dir, filename), workspace_files[filename])
return dot_nanobot_dir

View File

@ -1,5 +1,5 @@
fastapi==0.110.0
uvicorn==0.27.1
uvicorn[standard]==0.27.1
docker==7.0.0
sqlmodel==0.0.16
pydantic==2.6.3

View File

@ -0,0 +1,106 @@
from typing import Optional, Dict, Any, List
from pydantic import BaseModel, ConfigDict
class ChannelConfigRequest(BaseModel):
channel_type: str
external_app_id: Optional[str] = None
app_secret: Optional[str] = None
internal_port: Optional[int] = None
is_active: bool = True
extra_config: Optional[Dict[str, Any]] = None
class ChannelConfigUpdateRequest(BaseModel):
channel_type: Optional[str] = None
external_app_id: Optional[str] = None
app_secret: Optional[str] = None
internal_port: Optional[int] = None
is_active: Optional[bool] = None
extra_config: Optional[Dict[str, Any]] = None
class BotCreateRequest(BaseModel):
model_config = ConfigDict(extra="forbid")
id: str
name: str
enabled: Optional[bool] = True
image_tag: str
access_password: Optional[str] = None
llm_provider: str
llm_model: str
api_key: str
api_base: str
temperature: float = 0.2
top_p: float = 1.0
max_tokens: int = 8192
cpu_cores: float = 1.0
memory_mb: int = 1024
storage_gb: int = 10
system_timezone: str
soul_md: str
agents_md: str
user_md: str
tools_md: str
tools_config: Optional[Dict[str, Any]] = None
env_params: Optional[Dict[str, str]] = None
identity_md: str
channels: Optional[List[ChannelConfigRequest]] = None
send_progress: Optional[bool] = None
send_tool_hints: Optional[bool] = None
class BotUpdateRequest(BaseModel):
model_config = ConfigDict(extra="forbid")
name: Optional[str] = None
enabled: Optional[bool] = None
image_tag: Optional[str] = None
access_password: Optional[str] = None
llm_provider: Optional[str] = None
llm_model: Optional[str] = None
api_key: Optional[str] = None
api_base: Optional[str] = None
temperature: Optional[float] = None
top_p: Optional[float] = None
max_tokens: Optional[int] = None
cpu_cores: Optional[float] = None
memory_mb: Optional[int] = None
storage_gb: Optional[int] = None
system_timezone: Optional[str] = None
agents_md: Optional[str] = None
soul_md: Optional[str] = None
user_md: Optional[str] = None
tools_md: Optional[str] = None
tools_config: Optional[Dict[str, Any]] = None
env_params: Optional[Dict[str, str]] = None
identity_md: Optional[str] = None
send_progress: Optional[bool] = None
send_tool_hints: Optional[bool] = None
class BotToolsConfigUpdateRequest(BaseModel):
tools_config: Optional[Dict[str, Any]] = None
class BotMcpConfigUpdateRequest(BaseModel):
mcp_servers: Optional[Dict[str, Any]] = None
class BotEnvParamsUpdateRequest(BaseModel):
env_params: Optional[Dict[str, str]] = None
class BotPageAuthLoginRequest(BaseModel):
password: str
class CommandRequest(BaseModel):
command: Optional[str] = None
attachments: Optional[List[str]] = None
class MessageFeedbackRequest(BaseModel):
feedback: Optional[str] = None

View File

@ -3,29 +3,15 @@ from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
class LoadingPageSettings(BaseModel):
title: str = "Dashboard Nanobot"
subtitle: str = "平台正在准备管理面板"
description: str = "请稍候,正在加载 Bot 平台数据。"
class PlatformSettingsPayload(BaseModel):
page_size: int = Field(default=10, ge=1, le=100)
chat_pull_page_size: int = Field(default=60, ge=10, le=500)
command_auto_unlock_seconds: int = Field(default=10, ge=1, le=600)
auth_token_ttl_hours: int = Field(default=24, ge=1, le=720)
auth_token_max_active: int = Field(default=2, ge=1, le=20)
upload_max_mb: int = Field(default=100, ge=1, le=2048)
allowed_attachment_extensions: List[str] = Field(default_factory=list)
workspace_download_extensions: List[str] = Field(default_factory=list)
speech_enabled: bool = True
speech_max_audio_seconds: int = Field(default=20, ge=5, le=600)
speech_default_language: str = Field(default="zh", min_length=1, max_length=16)
speech_force_simplified: bool = True
speech_audio_preprocess: bool = True
speech_audio_filter: str = Field(default="highpass=f=120,lowpass=f=7600,afftdn=nf=-20")
speech_initial_prompt: str = Field(
default="以下内容可能包含简体中文和英文术语。请优先输出简体中文,英文单词、缩写、品牌名和数字保持原文,不要翻译。"
)
loading_page: LoadingPageSettings = Field(default_factory=LoadingPageSettings)
class PlatformUsageItem(BaseModel):
@ -55,6 +41,19 @@ class PlatformUsageSummary(BaseModel):
total_tokens: int
class PlatformUsageAnalyticsSeries(BaseModel):
model: str
total_requests: int
daily_counts: List[int]
class PlatformUsageAnalytics(BaseModel):
window_days: int
days: List[str]
total_requests: int
series: List[PlatformUsageAnalyticsSeries]
class PlatformUsageResponse(BaseModel):
summary: PlatformUsageSummary
items: List[PlatformUsageItem]
@ -62,6 +61,32 @@ class PlatformUsageResponse(BaseModel):
limit: int
offset: int
has_more: bool
analytics: PlatformUsageAnalytics
class PlatformLoginLogItem(BaseModel):
id: int
auth_type: str
subject_id: str
bot_id: Optional[str] = None
auth_source: str
client_ip: Optional[str] = None
user_agent: Optional[str] = None
device_info: Optional[str] = None
created_at: str
last_seen_at: Optional[str] = None
expires_at: Optional[str] = None
revoked_at: Optional[str] = None
revoke_reason: Optional[str] = None
status: str
class PlatformLoginLogResponse(BaseModel):
items: List[PlatformLoginLogItem]
total: int
limit: int
offset: int
has_more: bool
class PlatformActivityItem(BaseModel):

View File

@ -0,0 +1,28 @@
from typing import Optional, Dict, Any
from pydantic import BaseModel
class WorkspaceFileUpdateRequest(BaseModel):
content: str
class WorkspacePreviewUrlRequest(BaseModel):
path: str
ttl_seconds: Optional[int] = None
class PanelLoginRequest(BaseModel):
password: str
class AgentMdTemplatesPayload(BaseModel):
agents_md: Optional[str] = None
soul_md: Optional[str] = None
user_md: Optional[str] = None
tools_md: Optional[str] = None
identity_md: Optional[str] = None
class SystemTemplatesUpdateRequest(BaseModel):
agent_md_templates: Optional[AgentMdTemplatesPayload] = None
topic_presets: Optional[Dict[str, Any]] = None

View File

@ -0,0 +1,380 @@
import os
from datetime import datetime
from typing import Any, Dict, Optional
from fastapi import HTTPException
from sqlmodel import Session
from core.docker_instance import docker_manager
from core.settings import BOTS_WORKSPACE_ROOT
from models.bot import BotInstance
from schemas.bot import (
BotEnvParamsUpdateRequest,
BotMcpConfigUpdateRequest,
ChannelConfigRequest,
ChannelConfigUpdateRequest,
)
from services.bot_service import (
channel_api_to_config,
list_bot_channels_from_config,
normalize_channel_extra,
read_global_delivery_flags,
sync_bot_workspace_channels,
)
from services.bot_mcp_service import (
_merge_mcp_servers_preserving_extras,
_normalize_mcp_servers,
)
from services.bot_storage_service import (
get_bot_resource_limits,
get_bot_workspace_snapshot,
normalize_bot_env_params,
read_bot_config_data,
read_bot_env_params,
write_bot_config_data,
write_bot_env_params,
)
from services.cache_service import _invalidate_bot_detail_cache
MANAGED_WORKSPACE_FILENAMES = ("AGENTS.md", "SOUL.md", "USER.md", "TOOLS.md", "IDENTITY.md")
def _get_bot_or_404(session: Session, bot_id: str) -> BotInstance:
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return bot
def _read_bot_config_object(bot_id: str) -> Dict[str, Any]:
config_data = read_bot_config_data(bot_id)
return config_data if isinstance(config_data, dict) else {}
def _read_bot_tools_cfg(bot_id: str) -> tuple[Dict[str, Any], Dict[str, Any]]:
config_data = _read_bot_config_object(bot_id)
tools_cfg = config_data.get("tools")
if not isinstance(tools_cfg, dict):
tools_cfg = {}
config_data["tools"] = tools_cfg
return config_data, tools_cfg
def _read_bot_channels_cfg(bot_id: str) -> tuple[Dict[str, Any], Dict[str, Any]]:
config_data = _read_bot_config_object(bot_id)
channels_cfg = config_data.get("channels")
if not isinstance(channels_cfg, dict):
channels_cfg = {}
config_data["channels"] = channels_cfg
return config_data, channels_cfg
def _managed_bot_file_paths(bot_id: str) -> Dict[str, str]:
bot_root = os.path.join(BOTS_WORKSPACE_ROOT, bot_id, ".nanobot")
workspace_root = os.path.join(bot_root, "workspace")
paths = {
"config": os.path.join(bot_root, "config.json"),
"resources": os.path.join(bot_root, "resources.json"),
}
for filename in MANAGED_WORKSPACE_FILENAMES:
paths[f"workspace:{filename}"] = os.path.join(workspace_root, filename)
return paths
def _snapshot_managed_bot_files(bot_id: str) -> Dict[str, Optional[bytes]]:
snapshot: Dict[str, Optional[bytes]] = {}
for key, path in _managed_bot_file_paths(bot_id).items():
if os.path.isfile(path):
with open(path, "rb") as file:
snapshot[key] = file.read()
else:
snapshot[key] = None
return snapshot
def _restore_managed_bot_files(bot_id: str, snapshot: Dict[str, Optional[bytes]]) -> None:
for key, path in _managed_bot_file_paths(bot_id).items():
payload = snapshot.get(key)
if payload is None:
if os.path.exists(path):
os.remove(path)
continue
os.makedirs(os.path.dirname(path), exist_ok=True)
tmp_path = f"{path}.tmp"
with open(tmp_path, "wb") as file:
file.write(payload)
os.replace(tmp_path, path)
def _write_bot_config_state(
session: Session,
*,
bot_id: str,
config_data: Dict[str, Any],
sync_workspace: bool = False,
) -> None:
managed_file_snapshot = _snapshot_managed_bot_files(bot_id) if sync_workspace else None
try:
write_bot_config_data(bot_id, config_data)
if sync_workspace:
sync_bot_workspace_channels(session, bot_id)
except Exception:
if managed_file_snapshot is not None:
_restore_managed_bot_files(bot_id, managed_file_snapshot)
session.rollback()
raise
_invalidate_bot_detail_cache(bot_id)
def _find_channel_row(rows: list[Dict[str, Any]], channel_id: str) -> Dict[str, Any]:
channel_key = str(channel_id or "").strip().lower()
row = next((item for item in rows if str(item.get("id") or "").lower() == channel_key), None)
if not row:
raise HTTPException(status_code=404, detail="Channel not found")
return row
def get_bot_resources_snapshot(session: Session, *, bot_id: str) -> Dict[str, Any]:
bot = _get_bot_or_404(session, bot_id)
configured = get_bot_resource_limits(bot_id)
runtime = docker_manager.get_bot_resource_snapshot(bot_id)
workspace = get_bot_workspace_snapshot(bot_id)
workspace_root = str(workspace.get("path") or "")
workspace_bytes = int(workspace.get("usage_bytes") or 0)
configured_storage_bytes = int(workspace.get("configured_limit_bytes") or 0)
workspace_percent = 0.0
if configured_storage_bytes > 0:
workspace_percent = (workspace_bytes / configured_storage_bytes) * 100.0
limits = runtime.get("limits") or {}
cpu_limited = (limits.get("cpu_cores") or 0) > 0
memory_limited = (limits.get("memory_bytes") or 0) > 0
storage_limited = bool(limits.get("storage_bytes")) or bool(limits.get("storage_opt_raw"))
return {
"bot_id": bot_id,
"docker_status": runtime.get("docker_status") or bot.docker_status,
"configured": configured,
"runtime": runtime,
"workspace": {
"path": workspace_root,
"usage_bytes": workspace_bytes,
"configured_limit_bytes": configured_storage_bytes if configured_storage_bytes > 0 else None,
"usage_percent": max(0.0, workspace_percent),
},
"enforcement": {
"cpu_limited": cpu_limited,
"memory_limited": memory_limited,
"storage_limited": storage_limited,
},
"note": (
"Resource value 0 means unlimited. CPU/Memory limits come from Docker HostConfig and are enforced by cgroup. "
"Storage limit depends on Docker storage driver support."
),
"collected_at": datetime.utcnow().isoformat() + "Z",
}
def list_bot_channels_config(session: Session, *, bot_id: str):
bot = _get_bot_or_404(session, bot_id)
return list_bot_channels_from_config(bot)
def get_bot_tools_config_state(session: Session, *, bot_id: str) -> Dict[str, Any]:
_get_bot_or_404(session, bot_id)
return {
"bot_id": bot_id,
"tools_config": {},
"managed_by_dashboard": False,
"hint": "Tools config is disabled in dashboard. Configure tool-related env vars manually.",
}
def reject_bot_tools_config_update(
session: Session,
*,
bot_id: str,
payload: Any,
) -> None:
_get_bot_or_404(session, bot_id)
raise HTTPException(
status_code=400,
detail="Tools config is no longer managed by dashboard. Please set required env vars manually.",
)
def get_bot_mcp_config_state(session: Session, *, bot_id: str) -> Dict[str, Any]:
_get_bot_or_404(session, bot_id)
_config_data, tools_cfg = _read_bot_tools_cfg(bot_id)
mcp_servers = _normalize_mcp_servers(tools_cfg.get("mcpServers"))
return {
"bot_id": bot_id,
"mcp_servers": mcp_servers,
"locked_servers": [],
"restart_required": True,
}
def update_bot_mcp_config_state(
session: Session,
*,
bot_id: str,
payload: BotMcpConfigUpdateRequest,
) -> Dict[str, Any]:
_get_bot_or_404(session, bot_id)
config_data, tools_cfg = _read_bot_tools_cfg(bot_id)
normalized_mcp_servers = _normalize_mcp_servers(payload.mcp_servers or {})
current_mcp_servers = tools_cfg.get("mcpServers")
merged_mcp_servers = _merge_mcp_servers_preserving_extras(current_mcp_servers, normalized_mcp_servers)
tools_cfg["mcpServers"] = merged_mcp_servers
sanitized_after_save = _normalize_mcp_servers(tools_cfg.get("mcpServers"))
_write_bot_config_state(session, bot_id=bot_id, config_data=config_data)
return {
"status": "updated",
"bot_id": bot_id,
"mcp_servers": sanitized_after_save,
"locked_servers": [],
"restart_required": True,
}
def get_bot_env_params_state(session: Session, *, bot_id: str) -> Dict[str, Any]:
_get_bot_or_404(session, bot_id)
return {
"bot_id": bot_id,
"env_params": read_bot_env_params(bot_id),
}
def update_bot_env_params_state(
session: Session,
*,
bot_id: str,
payload: BotEnvParamsUpdateRequest,
) -> Dict[str, Any]:
_get_bot_or_404(session, bot_id)
normalized = normalize_bot_env_params(payload.env_params)
write_bot_env_params(bot_id, normalized)
_invalidate_bot_detail_cache(bot_id)
return {
"status": "updated",
"bot_id": bot_id,
"env_params": normalized,
"restart_required": True,
}
def create_bot_channel_config(
session: Session,
*,
bot_id: str,
payload: ChannelConfigRequest,
) -> Dict[str, Any]:
bot = _get_bot_or_404(session, bot_id)
ctype = (payload.channel_type or "").strip().lower()
if not ctype:
raise HTTPException(status_code=400, detail="channel_type is required")
if ctype == "dashboard":
raise HTTPException(status_code=400, detail="dashboard channel is built-in and cannot be created manually")
current_rows = list_bot_channels_from_config(bot)
if any(str(row.get("channel_type") or "").lower() == ctype for row in current_rows):
raise HTTPException(status_code=400, detail=f"Channel already exists: {ctype}")
new_row = {
"id": ctype,
"bot_id": bot_id,
"channel_type": ctype,
"external_app_id": (payload.external_app_id or "").strip() or f"{ctype}-{bot_id}",
"app_secret": (payload.app_secret or "").strip(),
"internal_port": max(1, min(int(payload.internal_port or 8080), 65535)),
"is_active": bool(payload.is_active),
"extra_config": normalize_channel_extra(payload.extra_config),
"locked": False,
}
config_data, channels_cfg = _read_bot_channels_cfg(bot_id)
channels_cfg[ctype] = channel_api_to_config(new_row)
_write_bot_config_state(session, bot_id=bot_id, config_data=config_data, sync_workspace=True)
return new_row
def update_bot_channel_config(
session: Session,
*,
bot_id: str,
channel_id: str,
payload: ChannelConfigUpdateRequest,
) -> Dict[str, Any]:
bot = _get_bot_or_404(session, bot_id)
rows = list_bot_channels_from_config(bot)
row = _find_channel_row(rows, channel_id)
if str(row.get("channel_type") or "").strip().lower() == "dashboard" or bool(row.get("locked")):
raise HTTPException(status_code=400, detail="dashboard channel is built-in and cannot be modified")
update_data = payload.model_dump(exclude_unset=True)
existing_type = str(row.get("channel_type") or "").strip().lower()
new_type = existing_type
if "channel_type" in update_data and update_data["channel_type"] is not None:
new_type = str(update_data["channel_type"]).strip().lower()
if not new_type:
raise HTTPException(status_code=400, detail="channel_type cannot be empty")
if existing_type == "dashboard" and new_type != "dashboard":
raise HTTPException(status_code=400, detail="dashboard channel type cannot be changed")
if new_type != existing_type and any(str(r.get("channel_type") or "").lower() == new_type for r in rows):
raise HTTPException(status_code=400, detail=f"Channel already exists: {new_type}")
if "external_app_id" in update_data and update_data["external_app_id"] is not None:
row["external_app_id"] = str(update_data["external_app_id"]).strip()
if "app_secret" in update_data and update_data["app_secret"] is not None:
row["app_secret"] = str(update_data["app_secret"]).strip()
if "internal_port" in update_data and update_data["internal_port"] is not None:
row["internal_port"] = max(1, min(int(update_data["internal_port"]), 65535))
if "is_active" in update_data and update_data["is_active"] is not None:
next_active = bool(update_data["is_active"])
if existing_type == "dashboard" and not next_active:
raise HTTPException(status_code=400, detail="dashboard channel must remain enabled")
row["is_active"] = next_active
if "extra_config" in update_data:
row["extra_config"] = normalize_channel_extra(update_data.get("extra_config"))
row["channel_type"] = new_type
row["id"] = new_type
row["locked"] = new_type == "dashboard"
config_data, channels_cfg = _read_bot_channels_cfg(bot_id)
current_send_progress, current_send_tool_hints = read_global_delivery_flags(channels_cfg)
if new_type == "dashboard":
extra = normalize_channel_extra(row.get("extra_config"))
channels_cfg["sendProgress"] = bool(extra.get("sendProgress", current_send_progress))
channels_cfg["sendToolHints"] = bool(extra.get("sendToolHints", current_send_tool_hints))
else:
channels_cfg["sendProgress"] = current_send_progress
channels_cfg["sendToolHints"] = current_send_tool_hints
channels_cfg.pop("dashboard", None)
if existing_type != "dashboard" and existing_type in channels_cfg and existing_type != new_type:
channels_cfg.pop(existing_type, None)
if new_type != "dashboard":
channels_cfg[new_type] = channel_api_to_config(row)
_write_bot_config_state(session, bot_id=bot_id, config_data=config_data, sync_workspace=True)
return row
def delete_bot_channel_config(
session: Session,
*,
bot_id: str,
channel_id: str,
) -> Dict[str, Any]:
bot = _get_bot_or_404(session, bot_id)
rows = list_bot_channels_from_config(bot)
row = _find_channel_row(rows, channel_id)
if str(row.get("channel_type") or "").lower() == "dashboard":
raise HTTPException(status_code=400, detail="dashboard channel cannot be deleted")
config_data, channels_cfg = _read_bot_channels_cfg(bot_id)
channels_cfg.pop(str(row.get("channel_type") or "").lower(), None)
_write_bot_config_state(session, bot_id=bot_id, config_data=config_data, sync_workspace=True)
return {"status": "deleted"}

View File

@ -0,0 +1,159 @@
import asyncio
import os
import shutil
from typing import Any, Dict
from sqlmodel import Session, select
from core.docker_instance import docker_manager
from core.settings import BOTS_WORKSPACE_ROOT
from models.bot import BotInstance, BotMessage
from models.platform import BotActivityEvent, BotRequestUsage
from models.skill import BotSkillInstall
from models.topic import TopicItem, TopicTopic
from services.bot_service import (
_safe_float,
_safe_int,
read_bot_runtime_snapshot,
resolve_bot_runtime_env_params,
sync_bot_workspace_channels,
)
from services.bot_storage_service import write_bot_env_params
from services.cache_service import _invalidate_bot_detail_cache, _invalidate_bot_messages_cache
from services.platform_activity_service import record_activity_event
from services.runtime_service import docker_callback, record_agent_loop_ready_warning
def _get_bot_or_404(session: Session, bot_id: str) -> BotInstance:
bot = session.get(BotInstance, bot_id)
if not bot:
raise ValueError("Bot not found")
return bot
async def start_bot_instance(session: Session, bot_id: str) -> Dict[str, Any]:
bot = _get_bot_or_404(session, bot_id)
if not bool(getattr(bot, "enabled", True)):
raise PermissionError("Bot is disabled. Enable it first.")
sync_bot_workspace_channels(session, bot_id)
runtime_snapshot = read_bot_runtime_snapshot(bot)
env_params = resolve_bot_runtime_env_params(bot_id)
write_bot_env_params(bot_id, env_params)
success = docker_manager.start_bot(
bot_id,
image_tag=bot.image_tag,
on_state_change=docker_callback,
env_vars=env_params,
cpu_cores=_safe_float(runtime_snapshot.get("cpu_cores"), 1.0),
memory_mb=_safe_int(runtime_snapshot.get("memory_mb"), 1024),
storage_gb=_safe_int(runtime_snapshot.get("storage_gb"), 10),
)
if not success:
bot.docker_status = "STOPPED"
session.add(bot)
session.commit()
raise RuntimeError(f"Failed to start container with image {bot.image_tag}")
actual_status = docker_manager.get_bot_status(bot_id)
bot.docker_status = actual_status
if actual_status != "RUNNING":
session.add(bot)
session.commit()
_invalidate_bot_detail_cache(bot_id)
raise RuntimeError("Bot container failed shortly after startup. Check bot logs/config.")
asyncio.create_task(record_agent_loop_ready_warning(bot_id))
session.add(bot)
record_activity_event(session, bot_id, "bot_started", channel="system", detail=f"Container started for {bot_id}")
session.commit()
_invalidate_bot_detail_cache(bot_id)
return {"status": "started"}
def stop_bot_instance(session: Session, bot_id: str) -> Dict[str, Any]:
bot = _get_bot_or_404(session, bot_id)
if not bool(getattr(bot, "enabled", True)):
raise PermissionError("Bot is disabled. Enable it first.")
docker_manager.stop_bot(bot_id)
bot.docker_status = "STOPPED"
session.add(bot)
record_activity_event(session, bot_id, "bot_stopped", channel="system", detail=f"Container stopped for {bot_id}")
session.commit()
_invalidate_bot_detail_cache(bot_id)
return {"status": "stopped"}
def enable_bot_instance(session: Session, bot_id: str) -> Dict[str, Any]:
bot = _get_bot_or_404(session, bot_id)
bot.enabled = True
session.add(bot)
record_activity_event(session, bot_id, "bot_enabled", channel="system", detail=f"Bot {bot_id} enabled")
session.commit()
_invalidate_bot_detail_cache(bot_id)
return {"status": "enabled", "enabled": True}
def disable_bot_instance(session: Session, bot_id: str) -> Dict[str, Any]:
bot = _get_bot_or_404(session, bot_id)
docker_manager.stop_bot(bot_id)
bot.enabled = False
bot.docker_status = "STOPPED"
if str(bot.current_state or "").upper() not in {"ERROR"}:
bot.current_state = "IDLE"
session.add(bot)
record_activity_event(session, bot_id, "bot_disabled", channel="system", detail=f"Bot {bot_id} disabled")
session.commit()
_invalidate_bot_detail_cache(bot_id)
return {"status": "disabled", "enabled": False}
def deactivate_bot_instance(session: Session, bot_id: str) -> Dict[str, Any]:
bot = _get_bot_or_404(session, bot_id)
docker_manager.stop_bot(bot_id)
bot.enabled = False
bot.docker_status = "STOPPED"
if str(bot.current_state or "").upper() not in {"ERROR"}:
bot.current_state = "IDLE"
session.add(bot)
record_activity_event(session, bot_id, "bot_deactivated", channel="system", detail=f"Bot {bot_id} deactivated")
session.commit()
_invalidate_bot_detail_cache(bot_id)
return {"status": "deactivated"}
def delete_bot_instance(session: Session, bot_id: str, delete_workspace: bool = True) -> Dict[str, Any]:
bot = _get_bot_or_404(session, bot_id)
docker_manager.stop_bot(bot_id, remove=True)
messages = session.exec(select(BotMessage).where(BotMessage.bot_id == bot_id)).all()
for row in messages:
session.delete(row)
topic_items = session.exec(select(TopicItem).where(TopicItem.bot_id == bot_id)).all()
for row in topic_items:
session.delete(row)
topics = session.exec(select(TopicTopic).where(TopicTopic.bot_id == bot_id)).all()
for row in topics:
session.delete(row)
usage_rows = session.exec(select(BotRequestUsage).where(BotRequestUsage.bot_id == bot_id)).all()
for row in usage_rows:
session.delete(row)
activity_rows = session.exec(select(BotActivityEvent).where(BotActivityEvent.bot_id == bot_id)).all()
for row in activity_rows:
session.delete(row)
skill_install_rows = session.exec(select(BotSkillInstall).where(BotSkillInstall.bot_id == bot_id)).all()
for row in skill_install_rows:
session.delete(row)
session.delete(bot)
session.commit()
if delete_workspace:
workspace_root = os.path.join(BOTS_WORKSPACE_ROOT, bot_id)
if os.path.isdir(workspace_root):
shutil.rmtree(workspace_root, ignore_errors=True)
_invalidate_bot_detail_cache(bot_id)
_invalidate_bot_messages_cache(bot_id)
return {"status": "deleted", "workspace_deleted": bool(delete_workspace)}

View File

@ -0,0 +1,339 @@
import os
import re
import shutil
from typing import Any, Dict, List, Optional
from fastapi import HTTPException
from sqlmodel import Session, select
from core.cache import cache
from core.docker_instance import docker_manager
from core.settings import BOTS_WORKSPACE_ROOT
from models.bot import BotInstance, NanobotImage
from schemas.bot import BotCreateRequest, BotUpdateRequest
from services.bot_service import (
normalize_initial_bot_channels,
normalize_bot_system_timezone,
resolve_bot_runtime_env_params,
serialize_bot_detail,
serialize_bot_list_entry,
sync_bot_workspace_channels,
)
from services.bot_storage_service import (
normalize_bot_env_params,
normalize_bot_resource_limits,
write_bot_env_params,
write_bot_resource_limits,
)
from services.cache_service import _cache_key_bot_detail, _cache_key_bots_list, _invalidate_bot_detail_cache
from services.platform_activity_service import record_activity_event
BOT_ID_PATTERN = re.compile(r"^[A-Za-z0-9_]+$")
MANAGED_WORKSPACE_FILENAMES = ("AGENTS.md", "SOUL.md", "USER.md", "TOOLS.md", "IDENTITY.md")
def _managed_bot_file_paths(bot_id: str) -> Dict[str, str]:
bot_root = os.path.join(BOTS_WORKSPACE_ROOT, bot_id, ".nanobot")
workspace_root = os.path.join(bot_root, "workspace")
paths = {
"config": os.path.join(bot_root, "config.json"),
"env": os.path.join(bot_root, "env.json"),
"resources": os.path.join(bot_root, "resources.json"),
}
for filename in MANAGED_WORKSPACE_FILENAMES:
paths[f"workspace:{filename}"] = os.path.join(workspace_root, filename)
return paths
def _snapshot_managed_bot_files(bot_id: str) -> Dict[str, Optional[bytes]]:
snapshot: Dict[str, Optional[bytes]] = {}
for key, path in _managed_bot_file_paths(bot_id).items():
if os.path.isfile(path):
with open(path, "rb") as file:
snapshot[key] = file.read()
else:
snapshot[key] = None
return snapshot
def _restore_managed_bot_files(bot_id: str, snapshot: Dict[str, Optional[bytes]]) -> None:
for key, path in _managed_bot_file_paths(bot_id).items():
payload = snapshot.get(key)
if payload is None:
if os.path.exists(path):
os.remove(path)
continue
os.makedirs(os.path.dirname(path), exist_ok=True)
tmp_path = f"{path}.tmp"
with open(tmp_path, "wb") as file:
file.write(payload)
os.replace(tmp_path, path)
def _cleanup_bot_workspace_root(bot_id: str) -> None:
bot_root = os.path.join(BOTS_WORKSPACE_ROOT, bot_id)
if os.path.isdir(bot_root):
shutil.rmtree(bot_root, ignore_errors=True)
def _require_runtime_text(raw: Any, *, field: str) -> str:
value = str(raw if raw is not None else "").strip()
if not value:
raise HTTPException(status_code=400, detail=f"{field} is required")
return value
def create_bot_record(session: Session, *, payload: BotCreateRequest) -> Dict[str, Any]:
normalized_bot_id = str(payload.id or "").strip()
if not normalized_bot_id:
raise HTTPException(status_code=400, detail="Bot ID is required")
if not BOT_ID_PATTERN.fullmatch(normalized_bot_id):
raise HTTPException(status_code=400, detail="Bot ID can only contain letters, numbers, and underscores")
if session.get(BotInstance, normalized_bot_id):
raise HTTPException(status_code=409, detail=f"Bot ID already exists: {normalized_bot_id}")
image_row = session.get(NanobotImage, payload.image_tag)
if not image_row:
raise HTTPException(status_code=400, detail=f"Image not registered in DB: {payload.image_tag}")
if image_row.status != "READY":
raise HTTPException(status_code=400, detail=f"Image status is not READY: {payload.image_tag} ({image_row.status})")
if not docker_manager.has_image(payload.image_tag):
raise HTTPException(status_code=400, detail=f"Docker image not found locally: {payload.image_tag}")
normalized_env_params = normalize_bot_env_params(payload.env_params)
try:
normalized_env_params["TZ"] = normalize_bot_system_timezone(payload.system_timezone)
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
llm_provider = _require_runtime_text(payload.llm_provider, field="llm_provider")
llm_model = _require_runtime_text(payload.llm_model, field="llm_model")
api_key = _require_runtime_text(payload.api_key, field="api_key")
api_base = _require_runtime_text(payload.api_base, field="api_base")
bot = BotInstance(
id=normalized_bot_id,
name=payload.name,
enabled=bool(payload.enabled) if payload.enabled is not None else True,
access_password=str(payload.access_password or ""),
image_tag=payload.image_tag,
workspace_dir=os.path.join(BOTS_WORKSPACE_ROOT, normalized_bot_id),
)
resource_limits = normalize_bot_resource_limits(payload.cpu_cores, payload.memory_mb, payload.storage_gb)
try:
session.add(bot)
session.flush()
write_bot_env_params(normalized_bot_id, normalized_env_params)
write_bot_resource_limits(
normalized_bot_id,
resource_limits["cpu_cores"],
resource_limits["memory_mb"],
resource_limits["storage_gb"],
)
sync_bot_workspace_channels(
session,
normalized_bot_id,
channels_override=normalize_initial_bot_channels(normalized_bot_id, payload.channels),
global_delivery_override={
"sendProgress": bool(payload.send_progress) if payload.send_progress is not None else False,
"sendToolHints": bool(payload.send_tool_hints) if payload.send_tool_hints is not None else False,
},
runtime_overrides={
"llm_provider": llm_provider,
"llm_model": llm_model,
"api_key": api_key,
"api_base": api_base,
"temperature": payload.temperature,
"top_p": payload.top_p,
"max_tokens": payload.max_tokens,
"cpu_cores": resource_limits["cpu_cores"],
"memory_mb": resource_limits["memory_mb"],
"storage_gb": resource_limits["storage_gb"],
"soul_md": payload.soul_md,
"agents_md": payload.agents_md,
"user_md": payload.user_md,
"tools_md": payload.tools_md,
"identity_md": payload.identity_md,
"send_progress": bool(payload.send_progress) if payload.send_progress is not None else False,
"send_tool_hints": bool(payload.send_tool_hints) if payload.send_tool_hints is not None else False,
},
)
record_activity_event(
session,
normalized_bot_id,
"bot_created",
channel="system",
detail=f"Bot {normalized_bot_id} created",
metadata={"image_tag": payload.image_tag},
)
session.commit()
session.refresh(bot)
except Exception:
session.rollback()
_cleanup_bot_workspace_root(normalized_bot_id)
raise
_invalidate_bot_detail_cache(normalized_bot_id)
return serialize_bot_detail(bot)
def list_bots_with_cache(session: Session) -> List[Dict[str, Any]]:
cached = cache.get_json(_cache_key_bots_list())
if isinstance(cached, list):
return cached
bots = session.exec(
select(BotInstance).order_by(BotInstance.created_at.desc(), BotInstance.id.asc())
).all()
dirty = False
for bot in bots:
actual_status = docker_manager.get_bot_status(bot.id)
if bot.docker_status != actual_status:
bot.docker_status = actual_status
if actual_status != "RUNNING" and str(bot.current_state or "").upper() not in {"ERROR"}:
bot.current_state = "IDLE"
session.add(bot)
dirty = True
if dirty:
session.commit()
for bot in bots:
session.refresh(bot)
rows = [serialize_bot_list_entry(bot) for bot in bots]
cache.set_json(_cache_key_bots_list(), rows, ttl=30)
return rows
def get_bot_detail_cached(session: Session, *, bot_id: str) -> Dict[str, Any]:
cached = cache.get_json(_cache_key_bot_detail(bot_id))
if isinstance(cached, dict):
return cached
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
row = serialize_bot_detail(bot)
cache.set_json(_cache_key_bot_detail(bot_id), row, ttl=30)
return row
def authenticate_bot_page_access(session: Session, *, bot_id: str, password: str) -> Dict[str, Any]:
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
configured = str(bot.access_password or "").strip()
if not configured:
return {"ok": True, "enabled": False, "bot_id": bot_id}
candidate = str(password or "").strip()
if not candidate:
raise HTTPException(status_code=401, detail="Bot access password required")
if candidate != configured:
raise HTTPException(status_code=401, detail="Invalid bot access password")
return {"ok": True, "enabled": True, "bot_id": bot_id}
def update_bot_record(session: Session, *, bot_id: str, payload: BotUpdateRequest) -> Dict[str, Any]:
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
managed_file_snapshot = _snapshot_managed_bot_files(bot_id)
update_data = payload.model_dump(exclude_unset=True)
if "image_tag" in update_data and update_data["image_tag"]:
image_tag = str(update_data["image_tag"]).strip()
image_row = session.get(NanobotImage, image_tag)
if not image_row:
raise HTTPException(status_code=400, detail=f"Image not registered in DB: {image_tag}")
if image_row.status != "READY":
raise HTTPException(status_code=400, detail=f"Image status is not READY: {image_tag} ({image_row.status})")
if not docker_manager.has_image(image_tag):
raise HTTPException(status_code=400, detail=f"Docker image not found locally: {image_tag}")
env_params = update_data.pop("env_params", None) if isinstance(update_data, dict) else None
system_timezone = update_data.pop("system_timezone", None) if isinstance(update_data, dict) else None
normalized_system_timezone: Optional[str] = None
if system_timezone is not None:
try:
normalized_system_timezone = normalize_bot_system_timezone(system_timezone)
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
runtime_fields = {
"llm_provider",
"llm_model",
"api_key",
"api_base",
"temperature",
"top_p",
"max_tokens",
"cpu_cores",
"memory_mb",
"storage_gb",
"soul_md",
"agents_md",
"user_md",
"tools_md",
"identity_md",
"send_progress",
"send_tool_hints",
}
runtime_overrides: Dict[str, Any] = {}
update_data.pop("tools_config", None) if isinstance(update_data, dict) else None
for field in runtime_fields:
if field in update_data:
runtime_overrides[field] = update_data.pop(field)
for text_field in ("llm_provider", "llm_model", "api_key", "api_base"):
if text_field in runtime_overrides:
runtime_overrides[text_field] = _require_runtime_text(
runtime_overrides.get(text_field),
field=text_field,
)
if {"cpu_cores", "memory_mb", "storage_gb"} & set(runtime_overrides.keys()):
runtime_overrides.update(
normalize_bot_resource_limits(
runtime_overrides.get("cpu_cores"),
runtime_overrides.get("memory_mb"),
runtime_overrides.get("storage_gb"),
)
)
for key, value in update_data.items():
if key in {"name", "image_tag", "access_password", "enabled"}:
setattr(bot, key, value)
try:
session.add(bot)
session.flush()
if env_params is not None or normalized_system_timezone is not None:
next_env_params = resolve_bot_runtime_env_params(bot_id)
if env_params is not None:
next_env_params = normalize_bot_env_params(env_params)
if normalized_system_timezone is not None:
next_env_params["TZ"] = normalized_system_timezone
write_bot_env_params(bot_id, next_env_params)
global_delivery_override: Optional[Dict[str, Any]] = None
if "send_progress" in runtime_overrides or "send_tool_hints" in runtime_overrides:
global_delivery_override = {}
if "send_progress" in runtime_overrides:
global_delivery_override["sendProgress"] = bool(runtime_overrides.get("send_progress"))
if "send_tool_hints" in runtime_overrides:
global_delivery_override["sendToolHints"] = bool(runtime_overrides.get("send_tool_hints"))
sync_bot_workspace_channels(
session,
bot_id,
runtime_overrides=runtime_overrides if runtime_overrides else None,
global_delivery_override=global_delivery_override,
)
session.commit()
session.refresh(bot)
except Exception:
session.rollback()
_restore_managed_bot_files(bot_id, managed_file_snapshot)
refreshed_bot = session.get(BotInstance, bot_id)
if refreshed_bot:
session.refresh(refreshed_bot)
bot = refreshed_bot
raise
_invalidate_bot_detail_cache(bot_id)
return serialize_bot_detail(bot)

View File

@ -0,0 +1,71 @@
import re
from typing import Any, Dict
_MCP_SERVER_NAME_RE = re.compile(r"[A-Za-z0-9][A-Za-z0-9._-]{0,63}")
def _normalize_mcp_servers(raw: Any) -> Dict[str, Dict[str, Any]]:
if not isinstance(raw, dict):
return {}
rows: Dict[str, Dict[str, Any]] = {}
for server_name, server_cfg in raw.items():
name = str(server_name or "").strip()
if not name or not _MCP_SERVER_NAME_RE.fullmatch(name):
continue
if not isinstance(server_cfg, dict):
continue
url = str(server_cfg.get("url") or "").strip()
if not url:
continue
transport_type = str(server_cfg.get("type") or "streamableHttp").strip()
if transport_type not in {"streamableHttp", "sse"}:
transport_type = "streamableHttp"
headers_raw = server_cfg.get("headers")
headers: Dict[str, str] = {}
if isinstance(headers_raw, dict):
for key, value in headers_raw.items():
header_key = str(key or "").strip()
if not header_key:
continue
headers[header_key] = str(value or "").strip()
timeout_raw = server_cfg.get("toolTimeout", 60)
try:
timeout = int(timeout_raw)
except Exception:
timeout = 60
rows[name] = {
"type": transport_type,
"url": url,
"headers": headers,
"toolTimeout": max(1, min(timeout, 600)),
}
return rows
def _merge_mcp_servers_preserving_extras(
current_raw: Any,
normalized: Dict[str, Dict[str, Any]],
) -> Dict[str, Dict[str, Any]]:
current_map = current_raw if isinstance(current_raw, dict) else {}
merged: Dict[str, Dict[str, Any]] = {}
for name, normalized_cfg in normalized.items():
base = current_map.get(name)
base_cfg = dict(base) if isinstance(base, dict) else {}
next_cfg = dict(base_cfg)
next_cfg.update(normalized_cfg)
merged[name] = next_cfg
return merged
def _sanitize_mcp_servers_in_config_data(config_data: Dict[str, Any]) -> Dict[str, Dict[str, Any]]:
if not isinstance(config_data, dict):
return {}
tools_cfg = config_data.get("tools")
if not isinstance(tools_cfg, dict):
tools_cfg = {}
current_raw = tools_cfg.get("mcpServers")
normalized = _normalize_mcp_servers(current_raw)
merged = _merge_mcp_servers_preserving_extras(current_raw, normalized)
tools_cfg["mcpServers"] = merged
config_data["tools"] = tools_cfg
return merged

View File

@ -0,0 +1,214 @@
from __future__ import annotations
import time
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Optional
from zoneinfo import ZoneInfo
from fastapi import WebSocket
from sqlmodel import Session
from core.docker_instance import docker_manager
from core.settings import BOTS_WORKSPACE_ROOT
from models.bot import BotInstance
from services.bot_lifecycle_service import start_bot_instance, stop_bot_instance
from services.bot_service import list_bot_channels_from_config
from services.bot_storage_service import (
read_bot_config_data,
read_bot_cron_jobs_store,
write_bot_config_data,
write_bot_cron_jobs_store,
)
from services.platform_auth_service import resolve_bot_websocket_auth, resolve_panel_websocket_auth
def _now_ms() -> int:
return int(time.time() * 1000)
def _get_bot_or_raise(session: Session, bot_id: str) -> BotInstance:
bot = session.get(BotInstance, bot_id)
if not bot:
raise LookupError("Bot not found")
return bot
def _weixin_state_file_path(bot_id: str) -> Path:
return Path(BOTS_WORKSPACE_ROOT) / bot_id / ".nanobot" / "weixin" / "account.json"
def _compute_cron_next_run(schedule: Dict[str, Any], now_ms: Optional[int] = None) -> Optional[int]:
current_ms = int(now_ms or _now_ms())
kind = str(schedule.get("kind") or "").strip().lower()
if kind == "at":
at_ms = int(schedule.get("atMs") or 0)
return at_ms if at_ms > current_ms else None
if kind == "every":
every_ms = int(schedule.get("everyMs") or 0)
return current_ms + every_ms if every_ms > 0 else None
if kind == "cron":
expr = str(schedule.get("expr") or "").strip()
if not expr:
return None
try:
from croniter import croniter
tz_name = str(schedule.get("tz") or "").strip()
tz = ZoneInfo(tz_name) if tz_name else datetime.now().astimezone().tzinfo
base_dt = datetime.fromtimestamp(current_ms / 1000, tz=tz)
next_dt = croniter(expr, base_dt).get_next(datetime)
return int(next_dt.timestamp() * 1000)
except Exception:
return None
return None
def get_bot_logs(
session: Session,
*,
bot_id: str,
tail: Optional[int] = 300,
offset: int = 0,
limit: Optional[int] = None,
reverse: bool = False,
) -> Dict[str, Any]:
_get_bot_or_raise(session, bot_id)
if limit is not None:
page = docker_manager.get_logs_page(
bot_id,
offset=max(0, int(offset)),
limit=max(1, int(limit)),
reverse=bool(reverse),
)
return {"bot_id": bot_id, **page}
effective_tail = max(1, int(tail or 300))
return {
"bot_id": bot_id,
"logs": docker_manager.get_recent_logs(bot_id, tail=effective_tail),
}
async def relogin_weixin(session: Session, *, bot_id: str) -> Dict[str, Any]:
bot = _get_bot_or_raise(session, bot_id)
weixin_channel = next(
(
row
for row in list_bot_channels_from_config(bot)
if str(row.get("channel_type") or "").strip().lower() == "weixin"
),
None,
)
if not weixin_channel:
raise ValueError("Weixin channel not found")
state_file = _weixin_state_file_path(bot_id)
removed = False
try:
if state_file.is_file():
state_file.unlink()
removed = True
except Exception as exc:
raise RuntimeError(f"Failed to remove weixin state: {exc}") from exc
config_data = read_bot_config_data(bot_id)
channels_cfg = config_data.get("channels") if isinstance(config_data, dict) else {}
weixin_cfg = channels_cfg.get("weixin") if isinstance(channels_cfg, dict) else None
if isinstance(weixin_cfg, dict) and "token" in weixin_cfg:
weixin_cfg.pop("token", None)
write_bot_config_data(bot_id, config_data)
restarted = False
if str(bot.docker_status or "").upper() == "RUNNING":
stop_bot_instance(session, bot_id)
await start_bot_instance(session, bot_id)
restarted = True
return {
"status": "relogin_started",
"bot_id": bot_id,
"removed_state": removed,
"restarted": restarted,
}
def list_cron_jobs(session: Session, *, bot_id: str, include_disabled: bool = True) -> Dict[str, Any]:
_get_bot_or_raise(session, bot_id)
store = read_bot_cron_jobs_store(bot_id)
rows = []
for row in store.get("jobs", []):
if not isinstance(row, dict):
continue
enabled = bool(row.get("enabled", True))
if not include_disabled and not enabled:
continue
rows.append(row)
rows.sort(key=lambda value: int(((value.get("state") or {}).get("nextRunAtMs")) or 2**62))
return {"bot_id": bot_id, "version": int(store.get("version", 1) or 1), "jobs": rows}
def stop_cron_job(session: Session, *, bot_id: str, job_id: str) -> Dict[str, Any]:
_get_bot_or_raise(session, bot_id)
store = read_bot_cron_jobs_store(bot_id)
jobs = store.get("jobs", [])
if not isinstance(jobs, list):
jobs = []
found = next((row for row in jobs if isinstance(row, dict) and str(row.get("id")) == job_id), None)
if not found:
raise LookupError("Cron job not found")
found["enabled"] = False
found["updatedAtMs"] = _now_ms()
state = found.get("state")
if not isinstance(state, dict):
state = {}
found["state"] = state
state["nextRunAtMs"] = None
write_bot_cron_jobs_store(bot_id, {"version": int(store.get("version", 1) or 1), "jobs": jobs})
return {"status": "stopped", "job_id": job_id}
def start_cron_job(session: Session, *, bot_id: str, job_id: str) -> Dict[str, Any]:
_get_bot_or_raise(session, bot_id)
store = read_bot_cron_jobs_store(bot_id)
jobs = store.get("jobs", [])
if not isinstance(jobs, list):
jobs = []
found = next((row for row in jobs if isinstance(row, dict) and str(row.get("id")) == job_id), None)
if not found:
raise LookupError("Cron job not found")
found["enabled"] = True
found["updatedAtMs"] = _now_ms()
state = found.get("state")
if not isinstance(state, dict):
state = {}
found["state"] = state
schedule = found.get("schedule")
state["nextRunAtMs"] = _compute_cron_next_run(schedule if isinstance(schedule, dict) else {})
write_bot_cron_jobs_store(bot_id, {"version": int(store.get("version", 1) or 1), "jobs": jobs})
return {"status": "started", "job_id": job_id}
def delete_cron_job(session: Session, *, bot_id: str, job_id: str) -> Dict[str, Any]:
_get_bot_or_raise(session, bot_id)
store = read_bot_cron_jobs_store(bot_id)
jobs = store.get("jobs", [])
if not isinstance(jobs, list):
jobs = []
kept = [row for row in jobs if not (isinstance(row, dict) and str(row.get("id")) == job_id)]
if len(kept) == len(jobs):
raise LookupError("Cron job not found")
write_bot_cron_jobs_store(bot_id, {"version": int(store.get("version", 1) or 1), "jobs": kept})
return {"status": "deleted", "job_id": job_id}
def ensure_monitor_websocket_access(session: Session, websocket: WebSocket, bot_id: str) -> BotInstance:
principal = resolve_panel_websocket_auth(session, websocket)
if not principal.authenticated:
principal = resolve_bot_websocket_auth(session, websocket, bot_id)
if not principal.authenticated:
raise PermissionError("Bot or panel authentication required")
return _get_bot_or_raise(session, bot_id)

View File

@ -0,0 +1,549 @@
import os
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List, Optional
from zoneinfo import ZoneInfo
from sqlmodel import Session
from core.settings import BOTS_WORKSPACE_ROOT
from models.bot import BotInstance
from providers.bot_workspace_provider import BotWorkspaceProvider
from schemas.bot import ChannelConfigRequest
from services.bot_storage_service import (
_normalize_env_params,
_read_bot_config,
_read_bot_resources,
_read_env_store,
_safe_float,
_safe_int,
_workspace_root,
normalize_bot_resource_limits,
write_bot_resource_limits,
)
workspace_provider = BotWorkspaceProvider(host_data_root=BOTS_WORKSPACE_ROOT)
def normalize_bot_system_timezone(raw: Any) -> str:
value = str(raw or "").strip()
if not value:
raise ValueError("System timezone is required")
try:
ZoneInfo(value)
except Exception as exc:
raise ValueError("Invalid system timezone. Use an IANA timezone such as Asia/Shanghai.") from exc
return value
def resolve_bot_runtime_env_params(bot_id: str, raw: Optional[Dict[str, str]] = None) -> Dict[str, str]:
env_params = _normalize_env_params(raw if isinstance(raw, dict) else _read_env_store(bot_id))
if "TZ" not in env_params:
raise RuntimeError(f"Missing required TZ in bot env settings: {bot_id}")
env_params["TZ"] = normalize_bot_system_timezone(env_params.get("TZ"))
return env_params
def normalize_channel_extra(raw: Any) -> Dict[str, Any]:
if not isinstance(raw, dict):
return {}
return raw
def _normalize_allow_from(raw: Any) -> List[str]:
rows: List[str] = []
if isinstance(raw, list):
for item in raw:
text = str(item or "").strip()
if text and text not in rows:
rows.append(text)
return rows or ["*"]
def read_global_delivery_flags(channels_cfg: Any) -> tuple[bool, bool]:
if not isinstance(channels_cfg, dict):
return False, False
return bool(channels_cfg.get("sendProgress")), bool(channels_cfg.get("sendToolHints"))
def channel_config_to_api(bot_id: str, channel_type: str, cfg: Dict[str, Any]) -> Dict[str, Any]:
ctype = str(channel_type or "").strip().lower()
enabled = bool(cfg.get("enabled", True))
port = max(1, min(int(cfg.get("port", 8080) or 8080), 65535))
extra: Dict[str, Any] = {}
external_app_id = ""
app_secret = ""
if ctype == "feishu":
external_app_id = str(cfg.get("appId") or "")
app_secret = str(cfg.get("appSecret") or "")
extra = {
"encryptKey": cfg.get("encryptKey", ""),
"verificationToken": cfg.get("verificationToken", ""),
"allowFrom": _normalize_allow_from(cfg.get("allowFrom", [])),
}
elif ctype == "dingtalk":
external_app_id = str(cfg.get("clientId") or "")
app_secret = str(cfg.get("clientSecret") or "")
extra = {"allowFrom": _normalize_allow_from(cfg.get("allowFrom", []))}
elif ctype == "telegram":
app_secret = str(cfg.get("token") or "")
extra = {
"proxy": cfg.get("proxy", ""),
"replyToMessage": bool(cfg.get("replyToMessage", False)),
"allowFrom": _normalize_allow_from(cfg.get("allowFrom", [])),
}
elif ctype == "slack":
external_app_id = str(cfg.get("botToken") or "")
app_secret = str(cfg.get("appToken") or "")
extra = {
"mode": cfg.get("mode", "socket"),
"replyInThread": bool(cfg.get("replyInThread", True)),
"groupPolicy": cfg.get("groupPolicy", "mention"),
"groupAllowFrom": cfg.get("groupAllowFrom", []),
"reactEmoji": cfg.get("reactEmoji", "eyes"),
}
elif ctype == "qq":
external_app_id = str(cfg.get("appId") or "")
app_secret = str(cfg.get("secret") or "")
extra = {"allowFrom": _normalize_allow_from(cfg.get("allowFrom", []))}
elif ctype == "wecom":
external_app_id = str(cfg.get("botId") or "")
app_secret = str(cfg.get("secret") or "")
extra = {
"allowFrom": _normalize_allow_from(cfg.get("allowFrom", [])),
"welcomeMessage": str(cfg.get("welcomeMessage") or ""),
}
elif ctype == "weixin":
app_secret = ""
extra = {
"hasSavedState": (Path(BOTS_WORKSPACE_ROOT) / bot_id / ".nanobot" / "weixin" / "account.json").is_file(),
}
elif ctype == "email":
extra = {
"consentGranted": bool(cfg.get("consentGranted", False)),
"imapHost": str(cfg.get("imapHost") or ""),
"imapPort": int(cfg.get("imapPort") or 993),
"imapUsername": str(cfg.get("imapUsername") or ""),
"imapPassword": str(cfg.get("imapPassword") or ""),
"imapMailbox": str(cfg.get("imapMailbox") or "INBOX"),
"imapUseSsl": bool(cfg.get("imapUseSsl", True)),
"smtpHost": str(cfg.get("smtpHost") or ""),
"smtpPort": int(cfg.get("smtpPort") or 587),
"smtpUsername": str(cfg.get("smtpUsername") or ""),
"smtpPassword": str(cfg.get("smtpPassword") or ""),
"smtpUseTls": bool(cfg.get("smtpUseTls", True)),
"smtpUseSsl": bool(cfg.get("smtpUseSsl", False)),
"fromAddress": str(cfg.get("fromAddress") or ""),
"autoReplyEnabled": bool(cfg.get("autoReplyEnabled", True)),
"pollIntervalSeconds": int(cfg.get("pollIntervalSeconds") or 30),
"markSeen": bool(cfg.get("markSeen", True)),
"maxBodyChars": int(cfg.get("maxBodyChars") or 12000),
"subjectPrefix": str(cfg.get("subjectPrefix") or "Re: "),
"allowFrom": _normalize_allow_from(cfg.get("allowFrom", [])),
}
else:
external_app_id = str(
cfg.get("appId") or cfg.get("clientId") or cfg.get("botToken") or cfg.get("externalAppId") or ""
)
app_secret = str(
cfg.get("appSecret")
or cfg.get("clientSecret")
or cfg.get("secret")
or cfg.get("token")
or cfg.get("appToken")
or ""
)
extra = {
key: value
for key, value in cfg.items()
if key
not in {
"enabled",
"port",
"appId",
"clientId",
"botToken",
"externalAppId",
"appSecret",
"clientSecret",
"secret",
"token",
"appToken",
}
}
return {
"id": ctype,
"bot_id": bot_id,
"channel_type": ctype,
"external_app_id": external_app_id,
"app_secret": app_secret,
"internal_port": port,
"is_active": enabled,
"extra_config": extra,
"locked": ctype == "dashboard",
}
def channel_api_to_config(row: Dict[str, Any]) -> Dict[str, Any]:
ctype = str(row.get("channel_type") or "").strip().lower()
enabled = bool(row.get("is_active", True))
extra = normalize_channel_extra(row.get("extra_config"))
external_app_id = str(row.get("external_app_id") or "")
app_secret = str(row.get("app_secret") or "")
port = max(1, min(int(row.get("internal_port") or 8080), 65535))
if ctype == "feishu":
return {
"enabled": enabled,
"appId": external_app_id,
"appSecret": app_secret,
"encryptKey": extra.get("encryptKey", ""),
"verificationToken": extra.get("verificationToken", ""),
"allowFrom": _normalize_allow_from(extra.get("allowFrom", [])),
}
if ctype == "dingtalk":
return {
"enabled": enabled,
"clientId": external_app_id,
"clientSecret": app_secret,
"allowFrom": _normalize_allow_from(extra.get("allowFrom", [])),
}
if ctype == "telegram":
return {
"enabled": enabled,
"token": app_secret,
"proxy": extra.get("proxy", ""),
"replyToMessage": bool(extra.get("replyToMessage", False)),
"allowFrom": _normalize_allow_from(extra.get("allowFrom", [])),
}
if ctype == "slack":
return {
"enabled": enabled,
"mode": extra.get("mode", "socket"),
"botToken": external_app_id,
"appToken": app_secret,
"replyInThread": bool(extra.get("replyInThread", True)),
"groupPolicy": extra.get("groupPolicy", "mention"),
"groupAllowFrom": extra.get("groupAllowFrom", []),
"reactEmoji": extra.get("reactEmoji", "eyes"),
}
if ctype == "qq":
return {
"enabled": enabled,
"appId": external_app_id,
"secret": app_secret,
"allowFrom": _normalize_allow_from(extra.get("allowFrom", [])),
}
if ctype == "wecom":
return {
"enabled": enabled,
"botId": external_app_id,
"secret": app_secret,
"allowFrom": _normalize_allow_from(extra.get("allowFrom", [])),
"welcomeMessage": str(extra.get("welcomeMessage") or ""),
}
if ctype == "weixin":
return {
"enabled": enabled,
"token": app_secret,
}
if ctype == "email":
return {
"enabled": enabled,
"consentGranted": bool(extra.get("consentGranted", False)),
"imapHost": str(extra.get("imapHost") or ""),
"imapPort": max(1, min(int(extra.get("imapPort") or 993), 65535)),
"imapUsername": str(extra.get("imapUsername") or ""),
"imapPassword": str(extra.get("imapPassword") or ""),
"imapMailbox": str(extra.get("imapMailbox") or "INBOX"),
"imapUseSsl": bool(extra.get("imapUseSsl", True)),
"smtpHost": str(extra.get("smtpHost") or ""),
"smtpPort": max(1, min(int(extra.get("smtpPort") or 587), 65535)),
"smtpUsername": str(extra.get("smtpUsername") or ""),
"smtpPassword": str(extra.get("smtpPassword") or ""),
"smtpUseTls": bool(extra.get("smtpUseTls", True)),
"smtpUseSsl": bool(extra.get("smtpUseSsl", False)),
"fromAddress": str(extra.get("fromAddress") or ""),
"autoReplyEnabled": bool(extra.get("autoReplyEnabled", True)),
"pollIntervalSeconds": max(5, int(extra.get("pollIntervalSeconds") or 30)),
"markSeen": bool(extra.get("markSeen", True)),
"maxBodyChars": max(1, int(extra.get("maxBodyChars") or 12000)),
"subjectPrefix": str(extra.get("subjectPrefix") or "Re: "),
"allowFrom": _normalize_allow_from(extra.get("allowFrom", [])),
}
merged = dict(extra)
merged.update(
{
"enabled": enabled,
"appId": external_app_id,
"appSecret": app_secret,
"port": port,
}
)
return merged
def list_bot_channels_from_config(bot: BotInstance) -> List[Dict[str, Any]]:
config_data = _read_bot_config(bot.id)
channels_cfg = config_data.get("channels")
if not isinstance(channels_cfg, dict):
channels_cfg = {}
send_progress, send_tool_hints = read_global_delivery_flags(channels_cfg)
rows: List[Dict[str, Any]] = [
{
"id": "dashboard",
"bot_id": bot.id,
"channel_type": "dashboard",
"external_app_id": f"dashboard-{bot.id}",
"app_secret": "",
"internal_port": 9000,
"is_active": True,
"extra_config": {
"sendProgress": send_progress,
"sendToolHints": send_tool_hints,
},
"locked": True,
}
]
for ctype, cfg in channels_cfg.items():
if ctype in {"sendProgress", "sendToolHints", "dashboard"} or not isinstance(cfg, dict):
continue
rows.append(channel_config_to_api(bot.id, ctype, cfg))
return rows
def normalize_initial_bot_channels(bot_id: str, channels: Optional[List[ChannelConfigRequest]]) -> List[Dict[str, Any]]:
rows: List[Dict[str, Any]] = []
seen_types: set[str] = set()
for channel in channels or []:
ctype = (channel.channel_type or "").strip().lower()
if not ctype or ctype == "dashboard" or ctype in seen_types:
continue
seen_types.add(ctype)
rows.append(
{
"id": ctype,
"bot_id": bot_id,
"channel_type": ctype,
"external_app_id": (channel.external_app_id or "").strip() or f"{ctype}-{bot_id}",
"app_secret": (channel.app_secret or "").strip(),
"internal_port": max(1, min(int(channel.internal_port or 8080), 65535)),
"is_active": bool(channel.is_active),
"extra_config": normalize_channel_extra(channel.extra_config),
"locked": False,
}
)
return rows
def _read_workspace_md(bot_id: str, filename: str) -> str:
path = os.path.join(_workspace_root(bot_id), filename)
if not os.path.isfile(path):
raise RuntimeError(f"Missing required workspace file: {path}")
try:
with open(path, "r", encoding="utf-8") as file:
return file.read().strip()
except Exception as exc:
raise RuntimeError(f"Failed to read workspace file: {path}") from exc
def read_bot_runtime_snapshot(bot: BotInstance) -> Dict[str, Any]:
config_data = _read_bot_config(bot.id)
env_params = resolve_bot_runtime_env_params(bot.id)
provider_name = ""
provider_cfg: Dict[str, Any] = {}
providers_cfg = config_data.get("providers")
if isinstance(providers_cfg, dict):
for p_name, p_cfg in providers_cfg.items():
provider_name = str(p_name or "").strip()
if isinstance(p_cfg, dict):
provider_cfg = p_cfg
break
if not provider_name or not provider_cfg:
raise RuntimeError(f"Missing provider configuration in bot config: {bot.id}")
agents_defaults: Dict[str, Any] = {}
agents_cfg = config_data.get("agents")
if isinstance(agents_cfg, dict):
defaults = agents_cfg.get("defaults")
if isinstance(defaults, dict):
agents_defaults = defaults
if not agents_defaults:
raise RuntimeError(f"Missing agents.defaults in bot config: {bot.id}")
channels_cfg = config_data.get("channels")
send_progress, send_tool_hints = read_global_delivery_flags(channels_cfg)
llm_provider = provider_name or ""
llm_model = str(agents_defaults.get("model") or "")
api_key = str(provider_cfg.get("apiKey") or "").strip()
api_base = str(provider_cfg.get("apiBase") or "").strip()
if not llm_model:
raise RuntimeError(f"Missing model in bot config: {bot.id}")
if not api_key:
raise RuntimeError(f"Missing apiKey in bot config: {bot.id}")
if not api_base:
raise RuntimeError(f"Missing apiBase in bot config: {bot.id}")
api_base_lower = api_base.lower()
provider_alias = str(provider_cfg.get("dashboardProviderAlias") or "").strip().lower()
if llm_provider == "openai" and provider_alias in {"xunfei", "iflytek", "xfyun", "vllm"}:
llm_provider = "xunfei" if provider_alias in {"iflytek", "xfyun"} else provider_alias
elif llm_provider == "openai" and ("spark-api-open.xf-yun.com" in api_base_lower or "xf-yun.com" in api_base_lower):
llm_provider = "xunfei"
tools_cfg = config_data.get("tools")
if tools_cfg is not None and not isinstance(tools_cfg, dict):
raise RuntimeError(f"Invalid tools configuration in bot config: {bot.id}")
mcp_servers = tools_cfg.get("mcpServers") if isinstance(tools_cfg, dict) else None
soul_md = _read_workspace_md(bot.id, "SOUL.md")
resources = _read_bot_resources(bot.id, config_data=config_data)
return {
"llm_provider": llm_provider,
"llm_model": llm_model,
"api_key": api_key,
"api_base": api_base,
"temperature": _safe_float(agents_defaults.get("temperature"), 0.2),
"top_p": _safe_float(agents_defaults.get("topP"), 1.0),
"max_tokens": _safe_int(agents_defaults.get("maxTokens"), 8192),
"cpu_cores": resources["cpu_cores"],
"memory_mb": resources["memory_mb"],
"storage_gb": resources["storage_gb"],
"system_timezone": env_params["TZ"],
"send_progress": send_progress,
"send_tool_hints": send_tool_hints,
"soul_md": soul_md,
"agents_md": _read_workspace_md(bot.id, "AGENTS.md"),
"user_md": _read_workspace_md(bot.id, "USER.md"),
"tools_md": _read_workspace_md(bot.id, "TOOLS.md"),
"identity_md": _read_workspace_md(bot.id, "IDENTITY.md"),
"mcp_servers": mcp_servers if isinstance(mcp_servers, dict) else None,
}
def serialize_bot_detail(bot: BotInstance) -> Dict[str, Any]:
runtime = read_bot_runtime_snapshot(bot)
created_at = bot.created_at.isoformat() + "Z" if bot.created_at else None
updated_at = bot.updated_at.isoformat() + "Z" if bot.updated_at else None
return {
"id": bot.id,
"name": bot.name,
"enabled": bool(getattr(bot, "enabled", True)),
"access_password": bot.access_password or "",
"has_access_password": bool(str(bot.access_password or "").strip()),
"avatar_model": "base",
"avatar_skin": "blue_suit",
"image_tag": bot.image_tag,
"llm_provider": runtime["llm_provider"],
"llm_model": runtime["llm_model"],
"api_key": runtime["api_key"],
"api_base": runtime["api_base"],
"temperature": runtime["temperature"],
"top_p": runtime["top_p"],
"max_tokens": runtime["max_tokens"],
"cpu_cores": runtime["cpu_cores"],
"memory_mb": runtime["memory_mb"],
"storage_gb": runtime["storage_gb"],
"system_timezone": runtime["system_timezone"],
"send_progress": runtime["send_progress"],
"send_tool_hints": runtime["send_tool_hints"],
"soul_md": runtime["soul_md"],
"agents_md": runtime["agents_md"],
"user_md": runtime["user_md"],
"tools_md": runtime["tools_md"],
"identity_md": runtime["identity_md"],
"workspace_dir": bot.workspace_dir,
"docker_status": bot.docker_status,
"current_state": bot.current_state,
"last_action": bot.last_action,
"created_at": created_at,
"updated_at": updated_at,
}
def serialize_bot_list_entry(bot: BotInstance) -> Dict[str, Any]:
created_at = bot.created_at.isoformat() + "Z" if bot.created_at else None
updated_at = bot.updated_at.isoformat() + "Z" if bot.updated_at else None
return {
"id": bot.id,
"name": bot.name,
"enabled": bool(getattr(bot, "enabled", True)),
"has_access_password": bool(str(bot.access_password or "").strip()),
"image_tag": bot.image_tag,
"docker_status": bot.docker_status,
"current_state": bot.current_state,
"last_action": bot.last_action,
"created_at": created_at,
"updated_at": updated_at,
}
def _has_bot_workspace_config(bot_id: str) -> bool:
return (Path(BOTS_WORKSPACE_ROOT) / bot_id / ".nanobot" / "config.json").is_file()
def sync_bot_workspace_channels(
session: Session,
bot_id: str,
channels_override: Optional[List[Dict[str, Any]]] = None,
global_delivery_override: Optional[Dict[str, Any]] = None,
runtime_overrides: Optional[Dict[str, Any]] = None,
) -> None:
bot = session.get(BotInstance, bot_id)
if not bot:
raise RuntimeError(f"Bot not found: {bot_id}")
has_existing_config = _has_bot_workspace_config(bot_id)
if has_existing_config:
snapshot = read_bot_runtime_snapshot(bot)
bot_data: Dict[str, Any] = dict(snapshot)
else:
if not isinstance(runtime_overrides, dict):
raise RuntimeError(f"Missing required bot config for workspace sync: {bot_id}")
bot_data = {}
if isinstance(runtime_overrides, dict):
bot_data.update(runtime_overrides)
resources = normalize_bot_resource_limits(
bot_data.get("cpu_cores"),
bot_data.get("memory_mb"),
bot_data.get("storage_gb"),
)
bot_data.update(resources)
send_progress = bool(bot_data.get("send_progress", False))
send_tool_hints = bool(bot_data.get("send_tool_hints", False))
if isinstance(global_delivery_override, dict):
if "sendProgress" in global_delivery_override:
send_progress = bool(global_delivery_override.get("sendProgress"))
if "sendToolHints" in global_delivery_override:
send_tool_hints = bool(global_delivery_override.get("sendToolHints"))
if channels_override is not None:
channels_data = channels_override
elif has_existing_config:
channels_data = list_bot_channels_from_config(bot)
else:
channels_data = []
bot_data["send_progress"] = send_progress
bot_data["send_tool_hints"] = send_tool_hints
normalized_channels: List[Dict[str, Any]] = []
for row in channels_data:
ctype = str(row.get("channel_type") or "").strip().lower()
if not ctype or ctype == "dashboard":
continue
normalized_channels.append(
{
"channel_type": ctype,
"external_app_id": str(row.get("external_app_id") or ""),
"app_secret": str(row.get("app_secret") or ""),
"internal_port": max(1, min(int(row.get("internal_port") or 8080), 65535)),
"is_active": bool(row.get("is_active", True)),
"extra_config": normalize_channel_extra(row.get("extra_config")),
}
)
workspace_provider.write_workspace(bot_id=bot_id, bot_data=bot_data, channels=normalized_channels)
write_bot_resource_limits(bot_id, bot_data.get("cpu_cores"), bot_data.get("memory_mb"), bot_data.get("storage_gb"))

View File

@ -0,0 +1,305 @@
from __future__ import annotations
import json
import os
import re
from typing import Any, Dict, Optional
from core.utils import _calc_dir_size_bytes
from core.settings import BOTS_WORKSPACE_ROOT
_ENV_KEY_RE = re.compile(r"^[A-Z_][A-Z0-9_]{0,127}$")
_BYTES_PER_GB = 1024 * 1024 * 1024
__all__ = [
"get_bot_data_root",
"normalize_bot_env_params",
"normalize_bot_resource_limits",
"read_bot_config_data",
"read_bot_cron_jobs_store",
"read_bot_env_params",
"get_bot_resource_limits",
"get_bot_workspace_root",
"get_bot_workspace_snapshot",
"get_bot_workspace_usage_bytes",
"write_bot_config_data",
"write_bot_cron_jobs_store",
"write_bot_env_params",
"write_bot_resource_limits",
"_bot_data_root",
"_clear_bot_dashboard_direct_session",
"_clear_bot_sessions",
"_normalize_env_params",
"_normalize_resource_limits",
"_read_bot_config",
"_read_bot_resources",
"_read_cron_store",
"_read_env_store",
"_safe_float",
"_safe_int",
"_workspace_root",
"_write_bot_config",
"_write_bot_resources",
"_write_cron_store",
"_write_env_store",
]
def get_bot_workspace_root(bot_id: str) -> str:
return _workspace_root(bot_id)
def _workspace_root(bot_id: str) -> str:
return os.path.abspath(os.path.join(BOTS_WORKSPACE_ROOT, bot_id, ".nanobot", "workspace"))
def get_bot_data_root(bot_id: str) -> str:
return _bot_data_root(bot_id)
def _bot_data_root(bot_id: str) -> str:
return os.path.abspath(os.path.join(BOTS_WORKSPACE_ROOT, bot_id, ".nanobot"))
def _safe_float(raw: Any, default: float) -> float:
try:
return float(raw)
except Exception:
return default
def _safe_int(raw: Any, default: int) -> int:
try:
return int(raw)
except Exception:
return default
def _normalize_resource_limits(cpu_cores: Any, memory_mb: Any, storage_gb: Any) -> Dict[str, Any]:
cpu = _safe_float(cpu_cores, 1.0)
mem = _safe_int(memory_mb, 1024)
storage = _safe_int(storage_gb, 10)
if cpu < 0:
cpu = 1.0
if mem < 0:
mem = 1024
if storage < 0:
storage = 10
normalized_cpu = 0.0 if cpu == 0 else min(16.0, max(0.1, cpu))
normalized_mem = 0 if mem == 0 else min(65536, max(256, mem))
normalized_storage = 0 if storage == 0 else min(1024, max(1, storage))
return {
"cpu_cores": normalized_cpu,
"memory_mb": normalized_mem,
"storage_gb": normalized_storage,
}
def normalize_bot_resource_limits(cpu_cores: Any, memory_mb: Any, storage_gb: Any) -> Dict[str, Any]:
return _normalize_resource_limits(cpu_cores, memory_mb, storage_gb)
def _normalize_env_params(raw: Any) -> Dict[str, str]:
if not isinstance(raw, dict):
return {}
rows: Dict[str, str] = {}
for key, value in raw.items():
normalized_key = str(key or "").strip().upper()
if not normalized_key or not _ENV_KEY_RE.fullmatch(normalized_key):
continue
rows[normalized_key] = str(value or "").strip()
return rows
def normalize_bot_env_params(raw: Any) -> Dict[str, str]:
return _normalize_env_params(raw)
def _read_json_object(path: str) -> Dict[str, Any]:
if not os.path.isfile(path):
raise RuntimeError(f"Missing required JSON file: {path}")
try:
with open(path, "r", encoding="utf-8") as file:
data = json.load(file)
except Exception as exc:
raise RuntimeError(f"Invalid JSON file: {path}") from exc
if not isinstance(data, dict):
raise RuntimeError(f"JSON file must contain an object: {path}")
return data
def _read_json_value(path: str) -> Any:
if not os.path.isfile(path):
return None
try:
with open(path, "r", encoding="utf-8") as file:
return json.load(file)
except Exception:
return None
def _write_json_atomic(path: str, payload: Dict[str, Any]) -> None:
os.makedirs(os.path.dirname(path), exist_ok=True)
tmp_path = f"{path}.tmp"
with open(tmp_path, "w", encoding="utf-8") as file:
json.dump(payload, file, ensure_ascii=False, indent=2)
os.replace(tmp_path, path)
def _config_json_path(bot_id: str) -> str:
return os.path.join(_bot_data_root(bot_id), "config.json")
def _read_bot_config(bot_id: str) -> Dict[str, Any]:
return _read_json_object(_config_json_path(bot_id))
def read_bot_config_data(bot_id: str) -> Dict[str, Any]:
return _read_bot_config(bot_id)
def _write_bot_config(bot_id: str, config_data: Dict[str, Any]) -> None:
_write_json_atomic(_config_json_path(bot_id), config_data)
def write_bot_config_data(bot_id: str, config_data: Dict[str, Any]) -> None:
_write_bot_config(bot_id, config_data)
def _resources_json_path(bot_id: str) -> str:
return os.path.join(_bot_data_root(bot_id), "resources.json")
def _write_bot_resources(bot_id: str, cpu_cores: Any, memory_mb: Any, storage_gb: Any) -> None:
normalized = _normalize_resource_limits(cpu_cores, memory_mb, storage_gb)
_write_json_atomic(
_resources_json_path(bot_id),
{
"cpuCores": normalized["cpu_cores"],
"memoryMB": normalized["memory_mb"],
"storageGB": normalized["storage_gb"],
},
)
def write_bot_resource_limits(bot_id: str, cpu_cores: Any, memory_mb: Any, storage_gb: Any) -> None:
_write_bot_resources(bot_id, cpu_cores, memory_mb, storage_gb)
def _read_bot_resources(bot_id: str, config_data: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
_ = config_data
path = _resources_json_path(bot_id)
data = _read_json_object(path)
return _normalize_resource_limits(
data.get("cpuCores", data.get("cpu_cores")),
data.get("memoryMB", data.get("memory_mb")),
data.get("storageGB", data.get("storage_gb")),
)
def get_bot_resource_limits(bot_id: str, config_data: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
return _read_bot_resources(bot_id, config_data=config_data)
def get_bot_workspace_usage_bytes(bot_id: str) -> int:
return _calc_dir_size_bytes(_workspace_root(bot_id))
def get_bot_workspace_snapshot(bot_id: str, config_data: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
resources = get_bot_resource_limits(bot_id, config_data=config_data)
configured_limit_bytes = int(resources.get("storage_gb") or 0) * _BYTES_PER_GB
return {
"path": get_bot_workspace_root(bot_id),
"usage_bytes": get_bot_workspace_usage_bytes(bot_id),
"configured_limit_bytes": configured_limit_bytes if configured_limit_bytes > 0 else None,
}
def _env_store_path(bot_id: str) -> str:
return os.path.join(_bot_data_root(bot_id), "env.json")
def _read_env_store(bot_id: str) -> Dict[str, str]:
return _normalize_env_params(_read_json_object(_env_store_path(bot_id)))
def read_bot_env_params(bot_id: str) -> Dict[str, str]:
return _read_env_store(bot_id)
def _write_env_store(bot_id: str, env_params: Dict[str, str]) -> None:
_write_json_atomic(_env_store_path(bot_id), _normalize_env_params(env_params))
def write_bot_env_params(bot_id: str, env_params: Dict[str, str]) -> None:
_write_env_store(bot_id, env_params)
def _cron_store_path(bot_id: str) -> str:
return os.path.join(_workspace_root(bot_id), "cron", "jobs.json")
def _normalize_cron_store_payload(raw: Any) -> Dict[str, Any]:
if isinstance(raw, list):
return {"version": 1, "jobs": [row for row in raw if isinstance(row, dict)]}
if not isinstance(raw, dict):
return {"version": 1, "jobs": []}
jobs = raw.get("jobs")
if isinstance(jobs, list):
normalized_jobs = [row for row in jobs if isinstance(row, dict)]
else:
normalized_jobs = []
return {
"version": _safe_int(raw.get("version"), 1),
"jobs": normalized_jobs,
}
def _read_cron_store(bot_id: str) -> Dict[str, Any]:
return _normalize_cron_store_payload(_read_json_value(_cron_store_path(bot_id)))
def read_bot_cron_jobs_store(bot_id: str) -> Dict[str, Any]:
return _read_cron_store(bot_id)
def _write_cron_store(bot_id: str, store: Dict[str, Any]) -> None:
normalized = _normalize_cron_store_payload(store)
_write_json_atomic(_cron_store_path(bot_id), normalized)
def write_bot_cron_jobs_store(bot_id: str, store: Dict[str, Any]) -> None:
_write_cron_store(bot_id, store)
def _sessions_root(bot_id: str) -> str:
return os.path.join(_workspace_root(bot_id), "sessions")
def _clear_bot_sessions(bot_id: str) -> int:
root = _sessions_root(bot_id)
if not os.path.isdir(root):
return 0
deleted = 0
for name in os.listdir(root):
path = os.path.join(root, name)
if not os.path.isfile(path):
continue
if not name.lower().endswith(".jsonl"):
continue
try:
os.remove(path)
deleted += 1
except Exception:
continue
return deleted
def _clear_bot_dashboard_direct_session(bot_id: str) -> Dict[str, Any]:
root = _sessions_root(bot_id)
os.makedirs(root, exist_ok=True)
path = os.path.join(root, "dashboard_direct.jsonl")
existed = os.path.exists(path)
with open(path, "w", encoding="utf-8"):
pass
return {"path": path, "existed": existed}

View File

@ -0,0 +1,28 @@
from typing import Optional
from core.cache import cache
def _cache_key_bots_list() -> str:
return "bot:list:v3"
def _cache_key_bot_detail(bot_id: str) -> str:
return f"bot:detail:v3:{bot_id}"
def _cache_key_bot_messages(bot_id: str, limit: int) -> str:
return f"bot:messages:list:v2:{bot_id}:limit:{limit}"
def _cache_key_bot_messages_page(bot_id: str, limit: int, before_id: Optional[int]) -> str:
cursor = str(int(before_id)) if isinstance(before_id, int) and before_id > 0 else "latest"
return f"bot:messages:page:v2:{bot_id}:before:{cursor}:limit:{limit}"
def _cache_key_images() -> str:
return "images:list"
def _invalidate_bot_detail_cache(bot_id: str) -> None:
cache.delete(_cache_key_bots_list(), _cache_key_bot_detail(bot_id))
def _invalidate_bot_messages_cache(bot_id: str) -> None:
cache.delete_prefix(f"bot:messages:list:v2:{bot_id}:")
cache.delete_prefix(f"bot:messages:page:v2:{bot_id}:")
def _invalidate_images_cache() -> None:
cache.delete(_cache_key_images())

View File

@ -0,0 +1,202 @@
import logging
import os
from typing import Any, Dict, List
from fastapi import HTTPException
from sqlmodel import Session
from core.docker_instance import docker_manager
from core.utils import _is_video_attachment_path, _is_visual_attachment_path
from models.bot import BotInstance
from services.bot_service import read_bot_runtime_snapshot
from services.platform_activity_service import record_activity_event
from services.platform_usage_service import create_usage_request, fail_latest_usage
from services.runtime_service import broadcast_runtime_packet, persist_runtime_packet
from services.workspace_service import resolve_workspace_path
logger = logging.getLogger("dashboard.backend")
def _normalize_message_media_item(value: Any) -> str:
return str(value or "").strip().replace("\\", "/").lstrip("/")
def _normalize_message_media_list(raw: Any) -> List[str]:
if not isinstance(raw, list):
return []
rows: List[str] = []
for value in raw:
normalized = _normalize_message_media_item(value)
if normalized:
rows.append(normalized)
return rows
def _build_delivery_command(command: str, checked_attachments: List[str]) -> str:
if not checked_attachments:
return command
attachment_block = "\n".join(f"- {path}" for path in checked_attachments)
if all(_is_visual_attachment_path(path) for path in checked_attachments):
has_video = any(_is_video_attachment_path(path) for path in checked_attachments)
media_label = "图片/视频" if has_video else "图片"
capability_hint = (
"1) 附件已随请求附带;图片在可用时可直接作为多模态输入理解,视频请按附件路径处理。\n"
if has_video
else "1) 附件中的图片已作为多模态输入提供,优先直接理解并回答。\n"
)
if command:
return (
f"{command}\n\n"
"[Attached files]\n"
f"{attachment_block}\n\n"
"【附件处理要求】\n"
f"{capability_hint}"
"2) 若当前模型或接口不支持直接理解该附件,请明确说明后再调用工具解析。\n"
"3) 除非用户明确要求,不要先调用工具读取附件文件。\n"
"4) 回复语言必须遵循 USER.md若未指定则与用户当前输入语言保持一致。\n"
"5) 仅基于可见内容回答;看不清或无法确认的部分请明确说明,不要猜测。"
)
return (
"请先处理已附带的附件列表:\n"
f"{attachment_block}\n\n"
f"请直接分析已附带的{media_label}并总结关键信息。\n"
f"{'图片在可用时可直接作为多模态输入理解,视频请按附件路径处理。' if has_video else ''}\n"
"若当前模型或接口不支持直接理解该附件,请明确说明后再调用工具解析。\n"
"回复语言必须遵循 USER.md若未指定则与用户当前输入语言保持一致。\n"
"仅基于可见内容回答;看不清或无法确认的部分请明确说明,不要猜测。"
)
command_has_paths = all(path in command for path in checked_attachments) if command else False
if command and not command_has_paths:
return (
f"{command}\n\n"
"[Attached files]\n"
f"{attachment_block}\n\n"
"Please process the attached file(s) listed above when answering this request.\n"
"Reply language must follow USER.md. If not specified, use the same language as the user input."
)
if not command:
return (
"Please process the uploaded file(s) listed below:\n"
f"{attachment_block}\n\n"
"Reply language must follow USER.md. If not specified, use the same language as the user input."
)
return command
def send_bot_command(session: Session, bot_id: str, command: str, attachments: Any) -> Dict[str, Any]:
request_id = ""
try:
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
runtime_snapshot = read_bot_runtime_snapshot(bot)
normalized_attachments = _normalize_message_media_list(attachments)
text_command = str(command or "").strip()
if not text_command and not normalized_attachments:
raise HTTPException(status_code=400, detail="Command or attachments is required")
checked_attachments: List[str] = []
for rel_path in normalized_attachments:
_, target = resolve_workspace_path(bot_id, rel_path)
if not os.path.isfile(target):
raise HTTPException(status_code=400, detail=f"attachment not found: {rel_path}")
checked_attachments.append(rel_path)
delivery_media = [f"/root/.nanobot/workspace/{path.lstrip('/')}" for path in checked_attachments]
display_command = text_command if text_command else "[attachment message]"
delivery_command = _build_delivery_command(text_command, checked_attachments)
request_id = create_usage_request(
session,
bot_id,
display_command,
attachments=checked_attachments,
channel="dashboard",
metadata={"attachment_count": len(checked_attachments)},
provider=str(runtime_snapshot.get("llm_provider") or "").strip() or None,
model=str(runtime_snapshot.get("llm_model") or "").strip() or None,
)
record_activity_event(
session,
bot_id,
"command_submitted",
request_id=request_id,
channel="dashboard",
detail="command submitted",
metadata={"attachment_count": len(checked_attachments), "has_text": bool(text_command)},
)
session.commit()
outbound_user_packet: Dict[str, Any] | None = None
if display_command or checked_attachments:
outbound_user_packet = {
"type": "USER_COMMAND",
"channel": "dashboard",
"text": display_command,
"media": checked_attachments,
"request_id": request_id,
}
persist_runtime_packet(bot_id, outbound_user_packet)
if outbound_user_packet:
broadcast_runtime_packet(bot_id, outbound_user_packet)
success = docker_manager.send_command(bot_id, delivery_command, media=delivery_media)
if success:
return {"success": True}
detail = docker_manager.get_last_delivery_error(bot_id)
fail_latest_usage(session, bot_id, detail or "command delivery failed")
record_activity_event(
session,
bot_id,
"command_failed",
request_id=request_id,
channel="dashboard",
detail=(detail or "command delivery failed")[:400],
)
session.commit()
broadcast_runtime_packet(
bot_id,
{
"type": "AGENT_STATE",
"channel": "dashboard",
"payload": {
"state": "ERROR",
"action_msg": detail or "command delivery failed",
},
},
)
raise HTTPException(
status_code=502,
detail=f"Failed to deliver command to bot dashboard channel{': ' + detail if detail else ''}",
)
except HTTPException:
raise
except Exception as exc:
logger.exception("send_bot_command failed for bot_id=%s", bot_id)
try:
session.rollback()
except Exception:
pass
if request_id:
try:
fail_latest_usage(session, bot_id, str(exc))
record_activity_event(
session,
bot_id,
"command_failed",
request_id=request_id,
channel="dashboard",
detail=str(exc)[:400],
)
session.commit()
except Exception:
try:
session.rollback()
except Exception:
pass
raise HTTPException(status_code=500, detail=f"Failed to process bot command: {exc}") from exc

View File

@ -0,0 +1,371 @@
import json
import os
from datetime import datetime, timezone
from typing import Any, Dict, List, Optional
from fastapi import HTTPException
from sqlmodel import Session, select
from core.cache import cache
from core.docker_instance import docker_manager
from core.utils import _resolve_local_day_range
from models.bot import BotInstance, BotMessage
from services.bot_storage_service import (
_clear_bot_dashboard_direct_session,
_clear_bot_sessions,
get_bot_workspace_root,
)
from services.cache_service import (
_cache_key_bot_messages,
_cache_key_bot_messages_page,
_invalidate_bot_detail_cache,
_invalidate_bot_messages_cache,
)
from services.platform_activity_service import record_activity_event
from services.platform_settings_service import get_chat_pull_page_size
def _get_bot_or_404(session: Session, bot_id: str) -> BotInstance:
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return bot
def _normalize_message_media_item(bot_id: str, value: Any) -> str:
raw = str(value or "").strip().replace("\\", "/")
if not raw:
return ""
if raw.startswith("/root/.nanobot/workspace/"):
return raw[len("/root/.nanobot/workspace/") :].lstrip("/")
root = get_bot_workspace_root(bot_id)
if os.path.isabs(raw):
try:
if os.path.commonpath([root, raw]) == root:
return os.path.relpath(raw, root).replace("\\", "/")
except Exception:
pass
return raw.lstrip("/")
def _normalize_message_media_list(raw: Any, bot_id: str) -> List[str]:
if not isinstance(raw, list):
return []
rows: List[str] = []
for value in raw:
normalized = _normalize_message_media_item(bot_id, value)
if normalized:
rows.append(normalized)
return rows
def _parse_message_media(bot_id: str, media_raw: Optional[str]) -> List[str]:
if not media_raw:
return []
try:
parsed = json.loads(media_raw)
except Exception:
return []
return _normalize_message_media_list(parsed, bot_id)
def serialize_bot_message_row(bot_id: str, row: BotMessage) -> Dict[str, Any]:
created_at = row.created_at
if created_at.tzinfo is None:
created_at = created_at.replace(tzinfo=timezone.utc)
return {
"id": row.id,
"bot_id": row.bot_id,
"role": row.role,
"text": row.text,
"media": _parse_message_media(bot_id, getattr(row, "media_json", None)),
"feedback": str(getattr(row, "feedback", "") or "").strip() or None,
"ts": int(created_at.timestamp() * 1000),
}
def list_bot_messages_payload(session: Session, bot_id: str, limit: int = 200) -> List[Dict[str, Any]]:
_get_bot_or_404(session, bot_id)
safe_limit = max(1, min(int(limit), 500))
cached = cache.get_json(_cache_key_bot_messages(bot_id, safe_limit))
if isinstance(cached, list):
return cached
rows = session.exec(
select(BotMessage)
.where(BotMessage.bot_id == bot_id)
.order_by(BotMessage.created_at.desc(), BotMessage.id.desc())
.limit(safe_limit)
).all()
payload = [serialize_bot_message_row(bot_id, row) for row in reversed(rows)]
cache.set_json(_cache_key_bot_messages(bot_id, safe_limit), payload, ttl=30)
return payload
def list_bot_messages_page_payload(
session: Session,
bot_id: str,
limit: Optional[int],
before_id: Optional[int],
) -> Dict[str, Any]:
_get_bot_or_404(session, bot_id)
configured_limit = get_chat_pull_page_size()
safe_limit = max(1, min(int(limit if limit is not None else configured_limit), 500))
safe_before_id = int(before_id) if isinstance(before_id, int) and before_id > 0 else None
cache_key = _cache_key_bot_messages_page(bot_id, safe_limit, safe_before_id)
cached = cache.get_json(cache_key)
if isinstance(cached, dict) and isinstance(cached.get("items"), list):
return cached
stmt = (
select(BotMessage)
.where(BotMessage.bot_id == bot_id)
.order_by(BotMessage.created_at.desc(), BotMessage.id.desc())
.limit(safe_limit + 1)
)
if safe_before_id is not None:
stmt = stmt.where(BotMessage.id < safe_before_id)
rows = session.exec(stmt).all()
has_more = len(rows) > safe_limit
if has_more:
rows = rows[:safe_limit]
ordered = list(reversed(rows))
payload = {
"items": [serialize_bot_message_row(bot_id, row) for row in ordered],
"has_more": bool(has_more),
"next_before_id": rows[-1].id if rows else None,
"limit": safe_limit,
}
cache.set_json(cache_key, payload, ttl=30)
return payload
def list_bot_messages_by_date_payload(
session: Session,
bot_id: str,
date: str,
tz_offset_minutes: Optional[int],
limit: Optional[int],
) -> Dict[str, Any]:
_get_bot_or_404(session, bot_id)
utc_start, utc_end = _resolve_local_day_range(date, tz_offset_minutes)
configured_limit = max(60, get_chat_pull_page_size())
safe_limit = max(12, min(int(limit if limit is not None else configured_limit), 240))
before_limit = max(3, min(18, safe_limit // 4))
after_limit = max(0, safe_limit - before_limit - 1)
exact_anchor = session.exec(
select(BotMessage)
.where(BotMessage.bot_id == bot_id, BotMessage.created_at >= utc_start, BotMessage.created_at < utc_end)
.order_by(BotMessage.created_at.asc(), BotMessage.id.asc())
.limit(1)
).first()
anchor = exact_anchor
matched_exact_date = exact_anchor is not None
if anchor is None:
next_row = session.exec(
select(BotMessage)
.where(BotMessage.bot_id == bot_id, BotMessage.created_at >= utc_end)
.order_by(BotMessage.created_at.asc(), BotMessage.id.asc())
.limit(1)
).first()
prev_row = session.exec(
select(BotMessage)
.where(BotMessage.bot_id == bot_id, BotMessage.created_at < utc_start)
.order_by(BotMessage.created_at.desc(), BotMessage.id.desc())
.limit(1)
).first()
if next_row and prev_row:
gap_after = next_row.created_at - utc_end
gap_before = utc_start - prev_row.created_at
anchor = next_row if gap_after <= gap_before else prev_row
else:
anchor = next_row or prev_row
if anchor is None or anchor.id is None:
return {
"items": [],
"anchor_id": None,
"resolved_ts": None,
"matched_exact_date": False,
"has_more_before": False,
"has_more_after": False,
}
before_rows = session.exec(
select(BotMessage)
.where(BotMessage.bot_id == bot_id, BotMessage.id < anchor.id)
.order_by(BotMessage.created_at.desc(), BotMessage.id.desc())
.limit(before_limit)
).all()
after_rows = session.exec(
select(BotMessage)
.where(BotMessage.bot_id == bot_id, BotMessage.id > anchor.id)
.order_by(BotMessage.created_at.asc(), BotMessage.id.asc())
.limit(after_limit)
).all()
ordered = list(reversed(before_rows)) + [anchor] + after_rows
first_row = ordered[0] if ordered else None
last_row = ordered[-1] if ordered else None
has_more_before = False
if first_row is not None and first_row.id is not None:
has_more_before = (
session.exec(
select(BotMessage.id)
.where(BotMessage.bot_id == bot_id, BotMessage.id < first_row.id)
.order_by(BotMessage.id.desc())
.limit(1)
).first()
is not None
)
has_more_after = False
if last_row is not None and last_row.id is not None:
has_more_after = (
session.exec(
select(BotMessage.id)
.where(BotMessage.bot_id == bot_id, BotMessage.id > last_row.id)
.order_by(BotMessage.id.asc())
.limit(1)
).first()
is not None
)
return {
"items": [serialize_bot_message_row(bot_id, row) for row in ordered],
"anchor_id": anchor.id,
"resolved_ts": int(anchor.created_at.timestamp() * 1000),
"matched_exact_date": matched_exact_date,
"has_more_before": has_more_before,
"has_more_after": has_more_after,
}
def update_bot_message_feedback_payload(
session: Session,
bot_id: str,
message_id: int,
feedback: Optional[str],
) -> Dict[str, Any]:
_get_bot_or_404(session, bot_id)
row = session.get(BotMessage, message_id)
if not row or row.bot_id != bot_id:
raise HTTPException(status_code=404, detail="Message not found")
if row.role != "assistant":
raise HTTPException(status_code=400, detail="Only assistant messages support feedback")
raw = str(feedback or "").strip().lower()
if raw in {"", "none", "null"}:
row.feedback = None
row.feedback_at = None
elif raw in {"up", "down"}:
row.feedback = raw
row.feedback_at = datetime.utcnow()
else:
raise HTTPException(status_code=400, detail="feedback must be 'up' or 'down'")
session.add(row)
session.commit()
_invalidate_bot_messages_cache(bot_id)
return {
"status": "updated",
"bot_id": bot_id,
"message_id": row.id,
"feedback": row.feedback,
"feedback_at": row.feedback_at.isoformat() if row.feedback_at else None,
}
def delete_bot_message_payload(
session: Session,
bot_id: str,
message_id: int,
) -> Dict[str, Any]:
_get_bot_or_404(session, bot_id)
row = session.get(BotMessage, message_id)
if not row or row.bot_id != bot_id:
raise HTTPException(status_code=404, detail="Message not found")
deleted_role = str(row.role or "").strip() or "assistant"
session.delete(row)
record_activity_event(
session,
bot_id,
"message_deleted",
channel="dashboard",
detail=f"Deleted {deleted_role} message #{message_id}",
metadata={"message_id": message_id, "role": deleted_role},
)
session.commit()
_invalidate_bot_detail_cache(bot_id)
_invalidate_bot_messages_cache(bot_id)
return {
"status": "deleted",
"bot_id": bot_id,
"message_id": message_id,
"role": deleted_role,
}
def clear_bot_messages_payload(session: Session, bot_id: str) -> Dict[str, Any]:
bot = _get_bot_or_404(session, bot_id)
rows = session.exec(select(BotMessage).where(BotMessage.bot_id == bot_id)).all()
deleted = 0
for row in rows:
session.delete(row)
deleted += 1
cleared_sessions = _clear_bot_sessions(bot_id)
if str(bot.docker_status or "").upper() == "RUNNING":
try:
docker_manager.send_command(bot_id, "/new")
except Exception:
pass
bot.last_action = ""
bot.current_state = "IDLE"
bot.updated_at = datetime.utcnow()
session.add(bot)
record_activity_event(
session,
bot_id,
"history_cleared",
channel="system",
detail=f"Cleared {deleted} stored messages",
metadata={"deleted_messages": deleted, "cleared_sessions": cleared_sessions},
)
session.commit()
_invalidate_bot_detail_cache(bot_id)
_invalidate_bot_messages_cache(bot_id)
return {"bot_id": bot_id, "deleted": deleted, "cleared_sessions": cleared_sessions}
def clear_dashboard_direct_session_payload(session: Session, bot_id: str) -> Dict[str, Any]:
bot = _get_bot_or_404(session, bot_id)
result = _clear_bot_dashboard_direct_session(bot_id)
if str(bot.docker_status or "").upper() == "RUNNING":
try:
docker_manager.send_command(bot_id, "/new")
except Exception:
pass
bot.updated_at = datetime.utcnow()
session.add(bot)
record_activity_event(
session,
bot_id,
"dashboard_session_cleared",
channel="dashboard",
detail="Cleared dashboard_direct session file",
metadata={"session_file": result["path"], "previously_existed": result["existed"]},
)
session.commit()
_invalidate_bot_detail_cache(bot_id)
return {
"bot_id": bot_id,
"cleared": True,
"session_file": result["path"],
"previously_existed": result["existed"],
}

View File

@ -0,0 +1,42 @@
from __future__ import annotations
from pathlib import Path
from typing import Dict
from core.settings import (
AGENT_MD_TEMPLATES_FILE,
DATA_ROOT,
RUNTIME_SKILLS_ROOT,
RUNTIME_TEMPLATES_ROOT,
TOPIC_PRESETS_TEMPLATES_FILE,
)
def _require_dir(path: Path, *, label: str) -> str:
resolved = path.resolve()
if not resolved.exists() or not resolved.is_dir():
raise RuntimeError(
f"Missing required {label} directory: {resolved}. "
"Please mount the project-root data directory to /app/data before starting the backend."
)
return str(resolved)
def _require_file(path: Path, *, label: str) -> str:
resolved = path.resolve()
if not resolved.exists() or not resolved.is_file():
raise RuntimeError(
f"Missing required {label} file: {resolved}. "
"Please restore the tracked files under data/templates before starting the backend."
)
return str(resolved)
def validate_runtime_data_assets() -> Dict[str, str]:
return {
"data_root": _require_dir(Path(DATA_ROOT), label="data"),
"templates_root": _require_dir(RUNTIME_TEMPLATES_ROOT, label="templates"),
"skills_root": _require_dir(RUNTIME_SKILLS_ROOT, label="skills"),
"agent_md_templates_file": _require_file(AGENT_MD_TEMPLATES_FILE, label="agent templates"),
"topic_presets_file": _require_file(TOPIC_PRESETS_TEMPLATES_FILE, label="topic presets"),
}

View File

@ -0,0 +1,116 @@
import logging
from typing import Any, Dict, List
from fastapi import HTTPException
from sqlmodel import Session, select
from core.cache import cache
from core.docker_instance import docker_manager
from models.bot import BotInstance, NanobotImage
from services.cache_service import _cache_key_images, _invalidate_images_cache
logger = logging.getLogger("dashboard.backend")
def _serialize_image(row: NanobotImage) -> Dict[str, Any]:
created_at = row.created_at.isoformat() + "Z" if row.created_at else None
return {
"tag": row.tag,
"image_id": row.image_id,
"version": row.version,
"status": row.status,
"source_dir": row.source_dir,
"created_at": created_at,
}
def _reconcile_registered_images(session: Session) -> None:
rows = session.exec(select(NanobotImage)).all()
dirty = False
for row in rows:
docker_exists = docker_manager.has_image(row.tag)
next_status = "READY" if docker_exists else "ERROR"
next_image_id = row.image_id
if docker_exists and docker_manager.client:
try:
next_image_id = docker_manager.client.images.get(row.tag).id
except Exception:
next_image_id = row.image_id
if row.status != next_status or row.image_id != next_image_id:
row.status = next_status
row.image_id = next_image_id
session.add(row)
dirty = True
if dirty:
session.commit()
def list_registered_images(session: Session) -> List[Dict[str, Any]]:
cached = cache.get_json(_cache_key_images())
if isinstance(cached, list) and all(isinstance(row, dict) for row in cached):
return cached
if isinstance(cached, list):
_invalidate_images_cache()
try:
_reconcile_registered_images(session)
except Exception as exc:
logger.warning("image reconcile skipped: %s", exc)
rows = session.exec(select(NanobotImage).order_by(NanobotImage.created_at.desc())).all()
payload = [_serialize_image(row) for row in rows]
cache.set_json(_cache_key_images(), payload, ttl=60)
return payload
def delete_registered_image(session: Session, *, tag: str) -> Dict[str, Any]:
image = session.get(NanobotImage, tag)
if not image:
raise HTTPException(status_code=404, detail="Image not found")
bots_using = session.exec(select(BotInstance).where(BotInstance.image_tag == tag)).all()
if bots_using:
raise HTTPException(status_code=400, detail=f"Cannot delete image: {len(bots_using)} bots are using it.")
session.delete(image)
session.commit()
_invalidate_images_cache()
return {"status": "deleted"}
def list_docker_images_by_repository(repository: str = "nanobot-base") -> List[Dict[str, Any]]:
return docker_manager.list_images_by_repo(repository)
def register_image(session: Session, payload: Dict[str, Any]) -> Dict[str, Any]:
tag = str(payload.get("tag") or "").strip()
source_dir = str(payload.get("source_dir") or "manual").strip() or "manual"
if not tag:
raise HTTPException(status_code=400, detail="tag is required")
if not docker_manager.has_image(tag):
raise HTTPException(status_code=404, detail=f"Docker image not found: {tag}")
version = tag.split(":")[-1].removeprefix("v") if ":" in tag else tag
try:
docker_img = docker_manager.client.images.get(tag) if docker_manager.client else None
image_id = docker_img.id if docker_img else None
except Exception:
image_id = None
row = session.get(NanobotImage, tag)
if not row:
row = NanobotImage(
tag=tag,
version=version,
status="READY",
source_dir=source_dir,
image_id=image_id,
)
else:
row.version = version
row.status = "READY"
row.source_dir = source_dir
row.image_id = image_id
session.add(row)
session.commit()
session.refresh(row)
_invalidate_images_cache()
return _serialize_image(row)

View File

@ -0,0 +1,141 @@
import json
from datetime import datetime, timedelta
from typing import Any, Dict, List, Optional
from sqlalchemy import delete as sql_delete
from sqlmodel import Session, select
from models.platform import BotActivityEvent
from schemas.platform import PlatformActivityItem
from services.platform_settings_service import get_activity_event_retention_days
ACTIVITY_EVENT_PRUNE_INTERVAL = timedelta(minutes=10)
OPERATIONAL_ACTIVITY_EVENT_TYPES = {
"bot_created",
"bot_started",
"bot_stopped",
"bot_warning",
"bot_enabled",
"bot_disabled",
"bot_deactivated",
"command_submitted",
"command_failed",
"history_cleared",
}
_last_activity_event_prune_at: Optional[datetime] = None
def _utcnow() -> datetime:
return datetime.utcnow()
def prune_expired_activity_events(session: Session, force: bool = False) -> int:
global _last_activity_event_prune_at
now = _utcnow()
if not force and _last_activity_event_prune_at and now - _last_activity_event_prune_at < ACTIVITY_EVENT_PRUNE_INTERVAL:
return 0
retention_days = get_activity_event_retention_days(session)
cutoff = now - timedelta(days=retention_days)
result = session.exec(sql_delete(BotActivityEvent).where(BotActivityEvent.created_at < cutoff))
_last_activity_event_prune_at = now
return int(getattr(result, "rowcount", 0) or 0)
def record_activity_event(
session: Session,
bot_id: str,
event_type: str,
request_id: Optional[str] = None,
channel: str = "dashboard",
detail: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> None:
normalized_event_type = str(event_type or "unknown").strip().lower() or "unknown"
if normalized_event_type not in OPERATIONAL_ACTIVITY_EVENT_TYPES:
return
prune_expired_activity_events(session, force=False)
row = BotActivityEvent(
bot_id=bot_id,
request_id=request_id,
event_type=normalized_event_type,
channel=str(channel or "dashboard").strip().lower() or "dashboard",
detail=(str(detail or "").strip() or None),
metadata_json=json.dumps(metadata or {}, ensure_ascii=False) if metadata else None,
created_at=_utcnow(),
)
session.add(row)
def list_activity_events(
session: Session,
bot_id: Optional[str] = None,
limit: int = 100,
) -> List[Dict[str, Any]]:
deleted = prune_expired_activity_events(session, force=False)
if deleted > 0:
session.commit()
safe_limit = max(1, min(int(limit), 500))
stmt = select(BotActivityEvent).order_by(BotActivityEvent.created_at.desc(), BotActivityEvent.id.desc()).limit(safe_limit)
if bot_id:
stmt = stmt.where(BotActivityEvent.bot_id == bot_id)
rows = session.exec(stmt).all()
items: List[Dict[str, Any]] = []
for row in rows:
try:
metadata = json.loads(row.metadata_json or "{}")
except Exception:
metadata = {}
items.append(
PlatformActivityItem(
id=int(row.id or 0),
bot_id=row.bot_id,
request_id=row.request_id,
event_type=row.event_type,
channel=row.channel,
detail=row.detail,
metadata=metadata if isinstance(metadata, dict) else {},
created_at=row.created_at.isoformat() + "Z",
).model_dump()
)
return items
def get_bot_activity_stats(session: Session) -> List[Dict[str, Any]]:
from sqlalchemy import func
from models.bot import BotInstance
from models.platform import BotRequestUsage
today = _utcnow().date()
first_day = today - timedelta(days=6)
first_started_at = datetime.combine(first_day, datetime.min.time())
activity_counts = (
select(
BotRequestUsage.bot_id.label("bot_id"),
func.count(BotRequestUsage.id).label("count"),
)
.where(BotRequestUsage.started_at >= first_started_at)
.group_by(BotRequestUsage.bot_id)
.subquery()
)
stmt = (
select(
BotInstance.id,
BotInstance.name,
func.coalesce(activity_counts.c.count, 0).label("count"),
)
.select_from(BotInstance)
.join(activity_counts, activity_counts.c.bot_id == BotInstance.id, isouter=True)
.where(BotInstance.enabled.is_(True))
.order_by(func.coalesce(activity_counts.c.count, 0).desc(), BotInstance.name.asc(), BotInstance.id.asc())
)
results = session.exec(stmt).all()
return [
{"bot_id": row[0], "name": row[1] or row[0], "count": row[2]}
for row in results
]

View File

@ -0,0 +1,604 @@
from __future__ import annotations
import hashlib
import re
import secrets
from dataclasses import dataclass
from datetime import datetime, timedelta
from typing import Any, Mapping, Optional
from fastapi import Request, Response, WebSocket
from sqlmodel import Session, select
from core.cache import auth_cache
from core.settings import PANEL_ACCESS_PASSWORD
from models.auth import AuthLoginLog
from models.bot import BotInstance
from services.platform_settings_service import get_auth_token_max_active, get_auth_token_ttl_hours
PANEL_TOKEN_COOKIE = "nanobot_panel_token"
BOT_TOKEN_COOKIE_PREFIX = "nanobot_bot_token_"
PANEL_SUBJECT_ID = "panel_admin"
AUTH_STORE_SET_TTL_BUFFER_SECONDS = 300
SESSION_TOUCH_INTERVAL_SECONDS = 300
@dataclass(frozen=True)
class AuthPrincipal:
auth_type: str
subject_id: str
bot_id: Optional[str]
authenticated: bool
auth_source: str
audit_id: Optional[int] = None
def _utcnow() -> datetime:
return datetime.utcnow()
def _normalize_token(raw: str) -> str:
return str(raw or "").strip()
def _hash_session_token(raw: str) -> str:
return hashlib.sha256(_normalize_token(raw).encode("utf-8")).hexdigest()
def _normalize_bot_cookie_name(bot_id: str) -> str:
safe_bot_id = re.sub(r"[^a-zA-Z0-9_-]+", "_", str(bot_id or "").strip())
return f"{BOT_TOKEN_COOKIE_PREFIX}{safe_bot_id or 'bot'}"
def _token_key(token_hash: str) -> str:
return f"token:{str(token_hash or '').strip()}"
def _principal_tokens_key(auth_type: str, subject_id: str, bot_id: Optional[str] = None) -> str:
normalized_type = str(auth_type or "").strip().lower() or "unknown"
normalized_subject = re.sub(r"[^a-zA-Z0-9_.:-]+", "_", str(subject_id or "").strip() or "anonymous")
normalized_bot_id = re.sub(r"[^a-zA-Z0-9_.:-]+", "_", str(bot_id or "").strip()) if bot_id else ""
return f"principal:{normalized_type}:{normalized_subject}:{normalized_bot_id or '-'}"
def _auth_token_ttl_seconds(session: Session) -> int:
return max(1, int(get_auth_token_ttl_hours(session))) * 60 * 60
def _auth_token_max_active(session: Session) -> int:
return max(1, int(get_auth_token_max_active(session)))
def _touch_session(session: Session, row: AuthLoginLog) -> None:
now = _utcnow()
last_seen = row.last_seen_at or row.created_at or now
if (now - last_seen).total_seconds() < SESSION_TOUCH_INTERVAL_SECONDS:
return
row.last_seen_at = now
session.add(row)
session.commit()
def _summarize_device(user_agent: str) -> str:
normalized = str(user_agent or "").strip().lower()
if not normalized:
return "Unknown Device"
browser = "Unknown Browser"
if "edg/" in normalized:
browser = "Edge"
elif "chrome/" in normalized and "edg/" not in normalized:
browser = "Chrome"
elif "safari/" in normalized and "chrome/" not in normalized:
browser = "Safari"
elif "firefox/" in normalized:
browser = "Firefox"
platform = "Desktop"
if "iphone" in normalized:
platform = "iPhone"
elif "ipad" in normalized:
platform = "iPad"
elif "android" in normalized:
platform = "Android"
elif "mac os x" in normalized or "macintosh" in normalized:
platform = "macOS"
elif "windows" in normalized:
platform = "Windows"
elif "linux" in normalized:
platform = "Linux"
return f"{platform} / {browser}"
def _extract_client_ip(request: Request) -> str:
forwarded = str(request.headers.get("x-forwarded-for") or "").strip()
if forwarded:
return forwarded.split(",")[0].strip()[:120]
return str(getattr(request.client, "host", "") or "")[:120]
def _get_bearer_token(headers: Mapping[str, Any]) -> str:
authorization = str(headers.get("authorization") or headers.get("Authorization") or "").strip()
if not authorization.lower().startswith("bearer "):
return ""
return _normalize_token(authorization[7:])
def _read_panel_token(request: Request) -> str:
cookie_token = _normalize_token(request.cookies.get(PANEL_TOKEN_COOKIE) or "")
return cookie_token or _get_bearer_token(request.headers)
def _read_bot_token(request: Request, bot_id: str) -> str:
cookie_token = _normalize_token(request.cookies.get(_normalize_bot_cookie_name(bot_id)) or "")
return cookie_token or _get_bearer_token(request.headers)
def _read_panel_token_ws(websocket: WebSocket) -> str:
cookie_token = _normalize_token(websocket.cookies.get(PANEL_TOKEN_COOKIE) or "")
return cookie_token or _get_bearer_token(websocket.headers)
def _read_bot_token_ws(websocket: WebSocket, bot_id: str) -> str:
cookie_token = _normalize_token(websocket.cookies.get(_normalize_bot_cookie_name(bot_id)) or "")
return cookie_token or _get_bearer_token(websocket.headers)
def _is_panel_auth_enabled() -> bool:
return bool(str(PANEL_ACCESS_PASSWORD or "").strip())
def _get_bot_access_password(session: Session, bot_id: str) -> str:
bot = session.get(BotInstance, bot_id)
if not bot:
return ""
return str(bot.access_password or "").strip()
def _is_bot_access_enabled(session: Session, bot_id: str) -> bool:
return bool(_get_bot_access_password(session, bot_id))
def _resolve_bot_auth_source(session: Session, bot_id: str) -> str:
return "bot_password" if _is_bot_access_enabled(session, bot_id) else "bot_public"
def _active_token_payload(token_hash: str) -> Optional[dict[str, Any]]:
payload = auth_cache.get_json(_token_key(token_hash))
return payload if isinstance(payload, dict) else None
def _principal_from_payload(payload: dict[str, Any]) -> tuple[str, str, Optional[str]]:
auth_type = str(payload.get("auth_type") or "").strip().lower()
subject_id = str(payload.get("subject_id") or "").strip()
bot_id = str(payload.get("bot_id") or "").strip() or None
return auth_type, subject_id, bot_id
def _find_audit_row_by_token_hash(session: Session, token_hash: str) -> Optional[AuthLoginLog]:
normalized_hash = str(token_hash or "").strip()
if not normalized_hash:
return None
return session.exec(
select(AuthLoginLog).where(AuthLoginLog.token_hash == normalized_hash).limit(1)
).first()
def _purge_cached_token(*, token_hash: str, auth_type: str, subject_id: str, bot_id: Optional[str]) -> None:
if not auth_cache.enabled:
return
auth_cache.delete(_token_key(token_hash))
auth_cache.srem(_principal_tokens_key(auth_type, subject_id, bot_id), token_hash)
def _active_token_row(
session: Session,
*,
token_hash: str,
expected_type: str,
bot_id: Optional[str] = None,
) -> Optional[AuthLoginLog]:
row = _find_audit_row_by_token_hash(session, token_hash)
if row is None:
return None
normalized_bot_id = str(bot_id or "").strip() or None
if row.auth_type != expected_type:
return None
if expected_type == "bot" and (str(row.bot_id or "").strip() or None) != normalized_bot_id:
return None
if row.revoked_at is not None:
return None
if row.expires_at <= _utcnow():
now = _utcnow()
row.last_seen_at = now
row.revoked_at = now
row.revoke_reason = "expired"
session.add(row)
session.commit()
_purge_cached_token(
token_hash=token_hash,
auth_type=row.auth_type,
subject_id=row.subject_id,
bot_id=row.bot_id,
)
return None
return row
def _list_active_token_rows(
session: Session,
*,
auth_type: str,
subject_id: str,
bot_id: Optional[str],
) -> list[AuthLoginLog]:
statement = select(AuthLoginLog).where(
AuthLoginLog.auth_type == auth_type,
AuthLoginLog.subject_id == subject_id,
AuthLoginLog.revoked_at.is_(None),
)
normalized_bot_id = str(bot_id or "").strip() or None
if normalized_bot_id is None:
statement = statement.where(AuthLoginLog.bot_id.is_(None))
else:
statement = statement.where(AuthLoginLog.bot_id == normalized_bot_id)
rows = list(session.exec(statement.order_by(AuthLoginLog.created_at.asc(), AuthLoginLog.id.asc())).all())
now = _utcnow()
expired_rows: list[AuthLoginLog] = []
active_rows: list[AuthLoginLog] = []
for row in rows:
if row.expires_at <= now:
row.last_seen_at = now
row.revoked_at = now
row.revoke_reason = "expired"
session.add(row)
expired_rows.append(row)
continue
active_rows.append(row)
if expired_rows:
session.commit()
for row in expired_rows:
_purge_cached_token(
token_hash=row.token_hash,
auth_type=row.auth_type,
subject_id=row.subject_id,
bot_id=row.bot_id,
)
return active_rows
def _mark_audit_revoked(session: Session, token_hash: str, *, reason: str) -> None:
row = _find_audit_row_by_token_hash(session, token_hash)
if not row:
return
now = _utcnow()
row.last_seen_at = now
if row.revoked_at is None:
row.revoked_at = now
row.revoke_reason = str(reason or "").strip()[:120] or row.revoke_reason
session.add(row)
session.commit()
def _revoke_token_hash(session: Session, token_hash: str, *, reason: str) -> None:
normalized_hash = str(token_hash or "").strip()
if not normalized_hash:
return
payload = _active_token_payload(normalized_hash)
if payload:
auth_type, subject_id, bot_id = _principal_from_payload(payload)
auth_cache.delete(_token_key(normalized_hash))
auth_cache.srem(_principal_tokens_key(auth_type, subject_id, bot_id), normalized_hash)
_mark_audit_revoked(session, normalized_hash, reason=reason)
def _revoke_raw_token(session: Session, raw_token: str, *, reason: str) -> None:
token = _normalize_token(raw_token)
if not token:
return
_revoke_token_hash(session, _hash_session_token(token), reason=reason)
def _cleanup_principal_set(session: Session, principal_key: str) -> list[tuple[int, str]]:
active_rows: list[tuple[int, str]] = []
stale_hashes: list[str] = []
for token_hash in auth_cache.smembers(principal_key):
payload = _active_token_payload(token_hash)
if not payload:
stale_hashes.append(token_hash)
continue
issued_at_ts = int(payload.get("issued_at_ts") or 0)
active_rows.append((issued_at_ts, token_hash))
if stale_hashes:
auth_cache.srem(principal_key, *stale_hashes)
for stale_hash in stale_hashes:
_mark_audit_revoked(session, stale_hash, reason="expired")
return sorted(active_rows, key=lambda row: (row[0], row[1]))
def _ensure_auth_store_available() -> None:
return
def _persist_token_payload(
session: Session,
*,
row: AuthLoginLog,
raw_token: str,
ttl_seconds: int,
) -> None:
if not auth_cache.enabled:
return
token_hash = _hash_session_token(raw_token)
payload = {
"auth_type": row.auth_type,
"subject_id": row.subject_id,
"bot_id": row.bot_id,
"auth_source": row.auth_source,
"issued_at": row.created_at.isoformat() + "Z",
"issued_at_ts": int(row.created_at.timestamp()),
"expires_at": row.expires_at.isoformat() + "Z",
"audit_id": int(row.id or 0),
}
principal_key = _principal_tokens_key(row.auth_type, row.subject_id, row.bot_id)
auth_cache.set_json(_token_key(token_hash), payload, ttl=ttl_seconds)
auth_cache.sadd(principal_key, token_hash)
auth_cache.expire(principal_key, ttl_seconds + AUTH_STORE_SET_TTL_BUFFER_SECONDS)
if not _active_token_payload(token_hash):
row.revoked_at = _utcnow()
row.revoke_reason = "store_write_failed"
session.add(row)
session.commit()
raise RuntimeError("Failed to persist authentication token")
def _enforce_token_limit(
session: Session,
*,
auth_type: str,
subject_id: str,
bot_id: Optional[str],
max_active: int,
) -> None:
rows = [
(int(row.created_at.timestamp()), row.token_hash)
for row in _list_active_token_rows(
session,
auth_type=auth_type,
subject_id=subject_id,
bot_id=bot_id,
)
]
overflow = max(0, len(rows) - max_active + 1)
if overflow <= 0:
return
for _, token_hash in rows[:overflow]:
_revoke_token_hash(session, token_hash, reason="concurrency_limit")
def _create_audit_row(
session: Session,
*,
request: Request,
auth_type: str,
subject_id: str,
bot_id: Optional[str],
raw_token: str,
expires_at: datetime,
auth_source: str,
) -> AuthLoginLog:
now = _utcnow()
row = AuthLoginLog(
auth_type=auth_type,
token_hash=_hash_session_token(raw_token),
subject_id=subject_id,
bot_id=bot_id,
auth_source=auth_source,
created_at=now,
expires_at=expires_at,
last_seen_at=now,
client_ip=_extract_client_ip(request),
user_agent=str(request.headers.get("user-agent") or "")[:500],
device_info=_summarize_device(str(request.headers.get("user-agent") or ""))[:255],
)
session.add(row)
session.commit()
session.refresh(row)
return row
def _create_auth_token(
session: Session,
*,
request: Request,
auth_type: str,
subject_id: str,
bot_id: Optional[str],
auth_source: str,
) -> str:
_ensure_auth_store_available()
ttl_seconds = _auth_token_ttl_seconds(session)
max_active = _auth_token_max_active(session)
_enforce_token_limit(
session,
auth_type=auth_type,
subject_id=subject_id,
bot_id=bot_id,
max_active=max_active,
)
raw_token = secrets.token_urlsafe(32)
row = _create_audit_row(
session,
request=request,
auth_type=auth_type,
subject_id=subject_id,
bot_id=bot_id,
raw_token=raw_token,
expires_at=_utcnow() + timedelta(seconds=ttl_seconds),
auth_source=auth_source,
)
_persist_token_payload(session, row=row, raw_token=raw_token, ttl_seconds=ttl_seconds)
return raw_token
def create_panel_token(session: Session, request: Request) -> str:
revoke_panel_token(session, request, reason="superseded")
return _create_auth_token(
session,
request=request,
auth_type="panel",
subject_id=PANEL_SUBJECT_ID,
bot_id=None,
auth_source="panel_password",
)
def create_bot_token(session: Session, request: Request, bot_id: str) -> str:
normalized_bot_id = str(bot_id or "").strip()
revoke_bot_token(session, request, normalized_bot_id, reason="superseded")
return _create_auth_token(
session,
request=request,
auth_type="bot",
subject_id=normalized_bot_id,
bot_id=normalized_bot_id,
auth_source=_resolve_bot_auth_source(session, normalized_bot_id),
)
def revoke_panel_token(session: Session, request: Request, reason: str = "logout") -> None:
_revoke_raw_token(session, _read_panel_token(request), reason=reason)
def revoke_bot_token(session: Session, request: Request, bot_id: str, reason: str = "logout") -> None:
_revoke_raw_token(session, _read_bot_token(request, bot_id), reason=reason)
def _set_cookie(response: Response, request: Request, name: str, raw_token: str, max_age: int) -> None:
response.set_cookie(
name,
raw_token,
max_age=max_age,
httponly=True,
samesite="lax",
secure=str(request.url.scheme).lower() == "https",
path="/",
)
def set_panel_token_cookie(response: Response, request: Request, raw_token: str, session: Session) -> None:
_set_cookie(response, request, PANEL_TOKEN_COOKIE, raw_token, _auth_token_ttl_seconds(session))
def set_bot_token_cookie(response: Response, request: Request, bot_id: str, raw_token: str, session: Session) -> None:
_set_cookie(response, request, _normalize_bot_cookie_name(bot_id), raw_token, _auth_token_ttl_seconds(session))
def clear_panel_token_cookie(response: Response) -> None:
response.delete_cookie(PANEL_TOKEN_COOKIE, path="/")
def clear_bot_token_cookie(response: Response, bot_id: str) -> None:
response.delete_cookie(_normalize_bot_cookie_name(bot_id), path="/")
def _resolve_token_auth(
session: Session,
*,
raw_token: str,
expected_type: str,
bot_id: Optional[str] = None,
) -> AuthPrincipal:
token = _normalize_token(raw_token)
normalized_bot_id = str(bot_id or "").strip() or None
if not token:
return AuthPrincipal(expected_type, "", normalized_bot_id, False, "missing")
token_hash = _hash_session_token(token)
payload = _active_token_payload(token_hash) if auth_cache.enabled else None
if not payload:
row = _active_token_row(session, token_hash=token_hash, expected_type=expected_type, bot_id=normalized_bot_id)
if row is None:
return AuthPrincipal(expected_type, "", normalized_bot_id, False, "missing")
_touch_session(session, row)
return AuthPrincipal(expected_type, row.subject_id, row.bot_id, True, f"{expected_type}_token", row.id)
auth_type, subject_id, payload_bot_id = _principal_from_payload(payload)
if auth_type != expected_type or (expected_type == "bot" and payload_bot_id != normalized_bot_id):
row = _active_token_row(session, token_hash=token_hash, expected_type=expected_type, bot_id=normalized_bot_id)
if row is None:
return AuthPrincipal(expected_type, "", normalized_bot_id, False, "missing")
_touch_session(session, row)
return AuthPrincipal(expected_type, row.subject_id, row.bot_id, True, f"{expected_type}_token", row.id)
expires_at_raw = str(payload.get("expires_at") or "").strip()
if expires_at_raw:
try:
expires_at = datetime.fromisoformat(expires_at_raw.replace("Z", ""))
if expires_at <= _utcnow():
_revoke_token_hash(session, token_hash, reason="expired")
return AuthPrincipal(expected_type, "", normalized_bot_id, False, "missing")
except Exception:
pass
row_id = int(payload.get("audit_id") or 0) or None
if row_id is not None:
row = session.get(AuthLoginLog, row_id)
if row is None or row.revoked_at is not None:
fallback_row = _active_token_row(
session,
token_hash=token_hash,
expected_type=expected_type,
bot_id=normalized_bot_id,
)
if fallback_row is None:
return AuthPrincipal(expected_type, "", normalized_bot_id, False, "missing")
_touch_session(session, fallback_row)
return AuthPrincipal(
expected_type,
fallback_row.subject_id,
fallback_row.bot_id,
True,
f"{expected_type}_token",
fallback_row.id,
)
if row.expires_at <= _utcnow():
_revoke_token_hash(session, token_hash, reason="expired")
return AuthPrincipal(expected_type, "", normalized_bot_id, False, "missing")
_touch_session(session, row)
return AuthPrincipal(expected_type, subject_id, payload_bot_id, True, f"{expected_type}_token", row_id)
def resolve_panel_request_auth(session: Session, request: Request) -> AuthPrincipal:
if not _is_panel_auth_enabled():
return AuthPrincipal("panel", PANEL_SUBJECT_ID, None, True, "unprotected")
return _resolve_token_auth(session, raw_token=_read_panel_token(request), expected_type="panel")
def resolve_bot_request_auth(session: Session, request: Request, bot_id: str) -> AuthPrincipal:
normalized_bot_id = str(bot_id or "").strip()
if not normalized_bot_id:
return AuthPrincipal("bot", "", None, False, "missing")
return _resolve_token_auth(
session,
raw_token=_read_bot_token(request, normalized_bot_id),
expected_type="bot",
bot_id=normalized_bot_id,
)
def resolve_panel_websocket_auth(session: Session, websocket: WebSocket) -> AuthPrincipal:
if not _is_panel_auth_enabled():
return AuthPrincipal("panel", PANEL_SUBJECT_ID, None, True, "unprotected")
return _resolve_token_auth(session, raw_token=_read_panel_token_ws(websocket), expected_type="panel")
def resolve_bot_websocket_auth(session: Session, websocket: WebSocket, bot_id: str) -> AuthPrincipal:
normalized_bot_id = str(bot_id or "").strip()
if not normalized_bot_id:
return AuthPrincipal("bot", "", None, False, "missing")
return _resolve_token_auth(
session,
raw_token=_read_bot_token_ws(websocket, normalized_bot_id),
expected_type="bot",
bot_id=normalized_bot_id,
)

View File

@ -0,0 +1,100 @@
from __future__ import annotations
from datetime import datetime
from typing import Optional
from sqlalchemy import func, or_
from sqlmodel import Session, select
from models.auth import AuthLoginLog
from schemas.platform import PlatformLoginLogItem, PlatformLoginLogResponse
def _to_iso(value: Optional[datetime]) -> Optional[str]:
if value is None:
return None
return value.isoformat() + "Z"
def _normalize_status(value: str) -> str:
normalized = str(value or "").strip().lower()
if normalized in {"active", "revoked"}:
return normalized
return "all"
def list_login_logs(
session: Session,
*,
search: str = "",
auth_type: str = "",
status: str = "all",
limit: int = 50,
offset: int = 0,
) -> PlatformLoginLogResponse:
normalized_search = str(search or "").strip()
normalized_type = str(auth_type or "").strip().lower()
normalized_status = _normalize_status(status)
normalized_limit = max(1, min(200, int(limit or 50)))
normalized_offset = max(0, int(offset or 0))
stmt = select(AuthLoginLog)
count_stmt = select(func.count()).select_from(AuthLoginLog)
if normalized_type in {"panel", "bot"}:
stmt = stmt.where(AuthLoginLog.auth_type == normalized_type)
count_stmt = count_stmt.where(AuthLoginLog.auth_type == normalized_type)
if normalized_status == "active":
stmt = stmt.where(AuthLoginLog.revoked_at == None) # noqa: E711
count_stmt = count_stmt.where(AuthLoginLog.revoked_at == None) # noqa: E711
elif normalized_status == "revoked":
stmt = stmt.where(AuthLoginLog.revoked_at != None) # noqa: E711
count_stmt = count_stmt.where(AuthLoginLog.revoked_at != None) # noqa: E711
if normalized_search:
like_value = f"%{normalized_search}%"
search_filter = or_(
AuthLoginLog.subject_id.ilike(like_value),
AuthLoginLog.bot_id.ilike(like_value),
AuthLoginLog.client_ip.ilike(like_value),
AuthLoginLog.device_info.ilike(like_value),
AuthLoginLog.user_agent.ilike(like_value),
AuthLoginLog.auth_source.ilike(like_value),
AuthLoginLog.revoke_reason.ilike(like_value),
)
stmt = stmt.where(search_filter)
count_stmt = count_stmt.where(search_filter)
total = int(session.exec(count_stmt).one() or 0)
rows = session.exec(
stmt.order_by(AuthLoginLog.created_at.desc(), AuthLoginLog.id.desc()).offset(normalized_offset).limit(normalized_limit)
).all()
items = [
PlatformLoginLogItem(
id=int(row.id or 0),
auth_type=row.auth_type,
subject_id=row.subject_id,
bot_id=row.bot_id,
auth_source=str(row.auth_source or ""),
client_ip=row.client_ip,
user_agent=row.user_agent,
device_info=row.device_info,
created_at=_to_iso(row.created_at) or "",
last_seen_at=_to_iso(row.last_seen_at),
expires_at=_to_iso(row.expires_at),
revoked_at=_to_iso(row.revoked_at),
revoke_reason=row.revoke_reason,
status="revoked" if row.revoked_at else "active",
)
for row in rows
]
return PlatformLoginLogResponse(
items=items,
total=total,
limit=normalized_limit,
offset=normalized_offset,
has_more=normalized_offset + len(items) < total,
)

View File

@ -0,0 +1,109 @@
from typing import Any, Dict
from sqlmodel import Session, select
from models.bot import BotInstance, NanobotImage
from services.bot_storage_service import get_bot_resource_limits, get_bot_workspace_snapshot
from services.platform_activity_service import (
get_bot_activity_stats,
list_activity_events,
prune_expired_activity_events,
)
from services.platform_settings_service import get_platform_settings
from services.platform_usage_service import list_usage
def build_platform_overview(session: Session, docker_manager: Any) -> Dict[str, Any]:
deleted = prune_expired_activity_events(session, force=False)
if deleted > 0:
session.commit()
bots = session.exec(
select(BotInstance).order_by(BotInstance.created_at.desc(), BotInstance.id.asc())
).all()
images = session.exec(select(NanobotImage).order_by(NanobotImage.created_at.desc())).all()
settings = get_platform_settings(session)
running = 0
stopped = 0
disabled = 0
configured_cpu_total = 0.0
configured_memory_total = 0
configured_storage_total = 0
workspace_used_total = 0
workspace_limit_total = 0
live_cpu_percent_total = 0.0
live_memory_used_total = 0
live_memory_limit_total = 0
for bot in bots:
enabled = bool(getattr(bot, "enabled", True))
runtime_status = docker_manager.get_bot_status(bot.id) if docker_manager else str(bot.docker_status or "STOPPED")
resources = get_bot_resource_limits(bot.id)
runtime = (
docker_manager.get_bot_resource_snapshot(bot.id)
if docker_manager
else {"usage": {}, "limits": {}, "docker_status": runtime_status}
)
workspace = get_bot_workspace_snapshot(bot.id, config_data=None)
workspace_used = int(workspace.get("usage_bytes") or 0)
workspace_limit = int(workspace.get("configured_limit_bytes") or 0)
configured_cpu_total += float(resources["cpu_cores"] or 0)
configured_memory_total += int(resources["memory_mb"] or 0) * 1024 * 1024
configured_storage_total += workspace_limit
workspace_used_total += workspace_used
workspace_limit_total += workspace_limit
live_cpu_percent_total += float((runtime.get("usage") or {}).get("cpu_percent") or 0.0)
live_memory_used_total += int((runtime.get("usage") or {}).get("memory_bytes") or 0)
live_memory_limit_total += int((runtime.get("usage") or {}).get("memory_limit_bytes") or 0)
if not enabled:
disabled += 1
elif runtime_status == "RUNNING":
running += 1
else:
stopped += 1
usage = list_usage(session, limit=20)
events = list_activity_events(session, limit=20)
activity_stats = get_bot_activity_stats(session)
return {
"summary": {
"bots": {
"total": len(bots),
"running": running,
"stopped": stopped,
"disabled": disabled,
},
"images": {
"total": len(images),
"ready": len([row for row in images if row.status == "READY"]),
"abnormal": len([row for row in images if row.status != "READY"]),
},
"resources": {
"configured_cpu_cores": round(configured_cpu_total, 2),
"configured_memory_bytes": configured_memory_total,
"configured_storage_bytes": configured_storage_total,
"live_cpu_percent": round(live_cpu_percent_total, 2),
"live_memory_used_bytes": live_memory_used_total,
"live_memory_limit_bytes": live_memory_limit_total,
"workspace_used_bytes": workspace_used_total,
"workspace_limit_bytes": workspace_limit_total,
},
},
"images": [
{
"tag": row.tag,
"version": row.version,
"status": row.status,
"source_dir": row.source_dir,
"created_at": row.created_at.isoformat() + "Z",
}
for row in images
],
"settings": settings.model_dump(),
"usage": usage,
"events": events,
"activity_stats": activity_stats,
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,233 @@
import json
import re
from datetime import datetime
from typing import Any, Dict, List
from sqlmodel import Session
from core.settings import (
DEFAULT_AUTH_TOKEN_MAX_ACTIVE,
DEFAULT_AUTH_TOKEN_TTL_HOURS,
DEFAULT_CHAT_PULL_PAGE_SIZE,
DEFAULT_PAGE_SIZE,
DEFAULT_UPLOAD_MAX_MB,
DEFAULT_WORKSPACE_DOWNLOAD_EXTENSIONS,
STT_ENABLED_DEFAULT,
)
from models.platform import PlatformSetting
from schemas.platform import SystemSettingItem
DEFAULT_ALLOWED_ATTACHMENT_EXTENSIONS: tuple[str, ...] = ()
DEFAULT_ACTIVITY_EVENT_RETENTION_DAYS = 7
ACTIVITY_EVENT_RETENTION_SETTING_KEY = "activity_event_retention_days"
SETTING_KEYS = (
"page_size",
"chat_pull_page_size",
"auth_token_ttl_hours",
"auth_token_max_active",
"upload_max_mb",
"allowed_attachment_extensions",
"workspace_download_extensions",
"speech_enabled",
)
PROTECTED_SETTING_KEYS = set(SETTING_KEYS) | {ACTIVITY_EVENT_RETENTION_SETTING_KEY}
SYSTEM_SETTING_DEFINITIONS: Dict[str, Dict[str, Any]] = {
"page_size": {
"name": "分页大小",
"category": "ui",
"description": "平台各类列表默认每页条数。",
"value_type": "integer",
"value": DEFAULT_PAGE_SIZE,
"is_public": True,
"sort_order": 5,
},
"chat_pull_page_size": {
"name": "对话懒加载条数",
"category": "chat",
"description": "Bot 对话区向上懒加载时每次读取的消息条数。",
"value_type": "integer",
"value": DEFAULT_CHAT_PULL_PAGE_SIZE,
"is_public": True,
"sort_order": 8,
},
"auth_token_ttl_hours": {
"name": "认证 Token 过期小时数",
"category": "auth",
"description": "Panel 与 Bot 登录 Token 的统一有效时长,单位小时。",
"value_type": "integer",
"value": DEFAULT_AUTH_TOKEN_TTL_HOURS,
"is_public": False,
"sort_order": 10,
},
"auth_token_max_active": {
"name": "认证 Token 最大并发数",
"category": "auth",
"description": "同一主体允许同时活跃的 Token 数量,超过时自动撤销最旧 Token。",
"value_type": "integer",
"value": DEFAULT_AUTH_TOKEN_MAX_ACTIVE,
"is_public": False,
"sort_order": 11,
},
"upload_max_mb": {
"name": "上传大小限制",
"category": "upload",
"description": "单文件上传大小限制,单位 MB。",
"value_type": "integer",
"value": DEFAULT_UPLOAD_MAX_MB,
"is_public": False,
"sort_order": 20,
},
"allowed_attachment_extensions": {
"name": "允许附件后缀",
"category": "upload",
"description": "允许上传的附件后缀列表,留空表示不限制。",
"value_type": "json",
"value": list(DEFAULT_ALLOWED_ATTACHMENT_EXTENSIONS),
"is_public": False,
"sort_order": 20,
},
"workspace_download_extensions": {
"name": "工作区下载后缀",
"category": "workspace",
"description": "命中后缀的工作区文件默认走下载模式。",
"value_type": "json",
"value": list(DEFAULT_WORKSPACE_DOWNLOAD_EXTENSIONS),
"is_public": False,
"sort_order": 30,
},
"speech_enabled": {
"name": "语音识别开关",
"category": "speech",
"description": "控制 Bot 语音转写功能是否启用。",
"value_type": "boolean",
"value": STT_ENABLED_DEFAULT,
"is_public": True,
"sort_order": 32,
},
ACTIVITY_EVENT_RETENTION_SETTING_KEY: {
"name": "活动事件保留天数",
"category": "maintenance",
"description": "bot_activity_event 运维事件的保留天数,超期记录会自动清理。",
"value_type": "integer",
"value": DEFAULT_ACTIVITY_EVENT_RETENTION_DAYS,
"is_public": False,
"sort_order": 34,
},
}
def _utcnow() -> datetime:
return datetime.utcnow()
def _normalize_extension(raw: Any) -> str:
text = str(raw or "").strip().lower()
if not text:
return ""
if text.startswith("*."):
text = text[1:]
if not text.startswith("."):
text = f".{text}"
if not re.fullmatch(r"\.[a-z0-9][a-z0-9._+-]{0,31}", text):
return ""
return text
def _normalize_extension_list(rows: Any) -> List[str]:
if not isinstance(rows, list):
return []
normalized: List[str] = []
for item in rows:
ext = _normalize_extension(item)
if ext and ext not in normalized:
normalized.append(ext)
return normalized
def _normalize_setting_key(raw: Any) -> str:
text = str(raw or "").strip()
return re.sub(r"[^a-zA-Z0-9_.-]+", "_", text).strip("._-").lower()
def _normalize_setting_value(value: Any, value_type: str) -> Any:
normalized_type = str(value_type or "json").strip().lower() or "json"
if normalized_type == "integer":
return int(value or 0)
if normalized_type == "float":
return float(value or 0)
if normalized_type == "boolean":
if isinstance(value, bool):
return value
return str(value or "").strip().lower() in {"1", "true", "yes", "on"}
if normalized_type == "string":
return str(value or "")
if normalized_type == "json":
return value
raise ValueError(f"Unsupported value_type: {normalized_type}")
def _read_setting_value(row: PlatformSetting) -> Any:
try:
value = json.loads(row.value_json or "null")
except Exception:
value = None
return _normalize_setting_value(value, row.value_type)
def _setting_item_from_row(row: PlatformSetting) -> Dict[str, Any]:
return SystemSettingItem(
key=row.key,
name=row.name,
category=row.category,
description=row.description,
value_type=row.value_type,
value=_read_setting_value(row),
is_public=bool(row.is_public),
sort_order=int(row.sort_order or 100),
created_at=row.created_at.isoformat() + "Z",
updated_at=row.updated_at.isoformat() + "Z",
).model_dump()
def _upsert_setting_row(
session: Session,
key: str,
*,
name: str,
category: str,
description: str,
value_type: str,
value: Any,
is_public: bool,
sort_order: int,
) -> PlatformSetting:
normalized_key = _normalize_setting_key(key)
if not normalized_key:
raise ValueError("Setting key is required")
normalized_type = str(value_type or "json").strip().lower() or "json"
normalized_value = _normalize_setting_value(value, normalized_type)
now = _utcnow()
row = session.get(PlatformSetting, normalized_key)
if row is None:
row = PlatformSetting(
key=normalized_key,
name=str(name or normalized_key),
category=str(category or "general"),
description=str(description or ""),
value_type=normalized_type,
value_json=json.dumps(normalized_value, ensure_ascii=False),
is_public=bool(is_public),
sort_order=int(sort_order or 100),
created_at=now,
updated_at=now,
)
else:
row.name = str(name or row.name or normalized_key)
row.category = str(category or row.category or "general")
row.description = str(description or row.description or "")
row.value_type = normalized_type
row.value_json = json.dumps(normalized_value, ensure_ascii=False)
row.is_public = bool(is_public)
row.sort_order = int(sort_order or row.sort_order or 100)
row.updated_at = now
session.add(row)
return row

View File

@ -0,0 +1,142 @@
from typing import Any, Dict, List
from sqlmodel import Session, select
from core.database import engine
from core.settings import (
DEFAULT_STT_AUDIO_FILTER,
DEFAULT_STT_AUDIO_PREPROCESS,
DEFAULT_STT_DEFAULT_LANGUAGE,
DEFAULT_STT_FORCE_SIMPLIFIED,
DEFAULT_STT_INITIAL_PROMPT,
DEFAULT_STT_MAX_AUDIO_SECONDS,
STT_DEVICE,
STT_MODEL,
)
from models.platform import PlatformSetting
from schemas.platform import PlatformSettingsPayload
from services.platform_settings_core import (
ACTIVITY_EVENT_RETENTION_SETTING_KEY,
DEFAULT_ACTIVITY_EVENT_RETENTION_DAYS,
DEFAULT_ALLOWED_ATTACHMENT_EXTENSIONS,
SETTING_KEYS,
SYSTEM_SETTING_DEFINITIONS,
_normalize_extension_list,
_read_setting_value,
_upsert_setting_row,
)
from services.platform_system_settings_service import (
create_or_update_system_setting,
delete_system_setting,
get_activity_event_retention_days,
list_system_settings,
validate_required_system_settings,
)
def get_platform_settings(session: Session) -> PlatformSettingsPayload:
validate_required_system_settings(session)
rows = session.exec(select(PlatformSetting).where(PlatformSetting.key.in_(SETTING_KEYS))).all()
data: Dict[str, Any] = {row.key: _read_setting_value(row) for row in rows}
missing = [key for key in SETTING_KEYS if key not in data]
if missing:
raise RuntimeError(
"Database seed data is not initialized. "
f"Missing sys_setting keys: {', '.join(missing)}. "
"Run scripts/init-full-db.sh or apply scripts/sql/init-data.sql before starting the backend."
)
try:
return PlatformSettingsPayload.model_validate(
{
"page_size": max(1, min(100, int(data["page_size"]))),
"chat_pull_page_size": max(10, min(500, int(data["chat_pull_page_size"]))),
"auth_token_ttl_hours": max(1, min(720, int(data["auth_token_ttl_hours"]))),
"auth_token_max_active": max(1, min(20, int(data["auth_token_max_active"]))),
"upload_max_mb": int(data["upload_max_mb"]),
"allowed_attachment_extensions": _normalize_extension_list(data["allowed_attachment_extensions"]),
"workspace_download_extensions": _normalize_extension_list(data["workspace_download_extensions"]),
"speech_enabled": bool(data["speech_enabled"]),
}
)
except Exception as exc:
raise RuntimeError(
"sys_setting contains invalid platform configuration values. "
"Fix the rows manually or reapply scripts/sql/init-data.sql."
) from exc
def save_platform_settings(session: Session, payload: PlatformSettingsPayload) -> PlatformSettingsPayload:
normalized = PlatformSettingsPayload(
page_size=max(1, min(100, int(payload.page_size))),
chat_pull_page_size=max(10, min(500, int(payload.chat_pull_page_size))),
auth_token_ttl_hours=max(1, min(720, int(payload.auth_token_ttl_hours))),
auth_token_max_active=max(1, min(20, int(payload.auth_token_max_active))),
upload_max_mb=payload.upload_max_mb,
allowed_attachment_extensions=_normalize_extension_list(payload.allowed_attachment_extensions),
workspace_download_extensions=_normalize_extension_list(payload.workspace_download_extensions),
speech_enabled=bool(payload.speech_enabled),
)
payload_by_key = normalized.model_dump()
for key in SETTING_KEYS:
definition = SYSTEM_SETTING_DEFINITIONS[key]
_upsert_setting_row(
session,
key,
name=str(definition["name"]),
category=str(definition["category"]),
description=str(definition["description"]),
value_type=str(definition["value_type"]),
value=payload_by_key[key],
is_public=bool(definition["is_public"]),
sort_order=int(definition["sort_order"]),
)
session.commit()
return normalized
def get_platform_settings_snapshot() -> PlatformSettingsPayload:
with Session(engine) as session:
return get_platform_settings(session)
def get_upload_max_mb() -> int:
return get_platform_settings_snapshot().upload_max_mb
def get_allowed_attachment_extensions() -> List[str]:
return get_platform_settings_snapshot().allowed_attachment_extensions
def get_workspace_download_extensions() -> List[str]:
return get_platform_settings_snapshot().workspace_download_extensions
def get_page_size() -> int:
return get_platform_settings_snapshot().page_size
def get_chat_pull_page_size() -> int:
return get_platform_settings_snapshot().chat_pull_page_size
def get_auth_token_ttl_hours(session: Session) -> int:
return get_platform_settings(session).auth_token_ttl_hours
def get_auth_token_max_active(session: Session) -> int:
return get_platform_settings(session).auth_token_max_active
def get_speech_runtime_settings() -> Dict[str, Any]:
settings = get_platform_settings_snapshot()
return {
"enabled": bool(settings.speech_enabled),
"max_audio_seconds": int(DEFAULT_STT_MAX_AUDIO_SECONDS),
"default_language": str(DEFAULT_STT_DEFAULT_LANGUAGE or "zh").strip().lower() or "zh",
"force_simplified": bool(DEFAULT_STT_FORCE_SIMPLIFIED),
"audio_preprocess": bool(DEFAULT_STT_AUDIO_PREPROCESS),
"audio_filter": str(DEFAULT_STT_AUDIO_FILTER or "").strip(),
"initial_prompt": str(DEFAULT_STT_INITIAL_PROMPT or "").strip(),
"model": STT_MODEL,
"device": STT_DEVICE,
}

View File

@ -0,0 +1,120 @@
from typing import Any, Dict, List
from sqlmodel import Session, select
from models.platform import PlatformSetting
from schemas.platform import SystemSettingPayload
from services.platform_settings_core import (
ACTIVITY_EVENT_RETENTION_SETTING_KEY,
PROTECTED_SETTING_KEYS,
SYSTEM_SETTING_DEFINITIONS,
_normalize_setting_key,
_read_setting_value,
_setting_item_from_row,
_upsert_setting_row,
)
REQUIRED_SYSTEM_SETTING_KEYS = tuple(SYSTEM_SETTING_DEFINITIONS.keys())
DEPRECATED_SYSTEM_SETTING_KEYS = ("command_auto_unlock_seconds",)
def _prune_deprecated_system_settings(session: Session) -> None:
removed = False
for key in DEPRECATED_SYSTEM_SETTING_KEYS:
row = session.get(PlatformSetting, key)
if row is None:
continue
session.delete(row)
removed = True
if removed:
session.commit()
def validate_required_system_settings(session: Session) -> None:
_prune_deprecated_system_settings(session)
stmt = select(PlatformSetting.key).where(PlatformSetting.key.in_(REQUIRED_SYSTEM_SETTING_KEYS))
present = {
str(key or "").strip()
for key in session.exec(stmt).all()
if str(key or "").strip()
}
missing = [key for key in REQUIRED_SYSTEM_SETTING_KEYS if key not in present]
if missing:
raise RuntimeError(
"Database seed data is not initialized. "
f"Missing sys_setting keys: {', '.join(missing)}. "
"Run scripts/init-full-db.sh or apply scripts/sql/init-data.sql before starting the backend."
)
def list_system_settings(session: Session, search: str = "") -> List[Dict[str, Any]]:
validate_required_system_settings(session)
stmt = select(PlatformSetting).order_by(PlatformSetting.sort_order.asc(), PlatformSetting.key.asc())
rows = session.exec(stmt).all()
keyword = str(search or "").strip().lower()
items = [_setting_item_from_row(row) for row in rows]
if not keyword:
return items
return [
item
for item in items
if keyword in str(item["key"]).lower()
or keyword in str(item["name"]).lower()
or keyword in str(item["category"]).lower()
or keyword in str(item["description"]).lower()
]
def create_or_update_system_setting(session: Session, payload: SystemSettingPayload) -> Dict[str, Any]:
normalized_key = _normalize_setting_key(payload.key)
if normalized_key in DEPRECATED_SYSTEM_SETTING_KEYS:
raise ValueError("Setting key has been removed")
definition = SYSTEM_SETTING_DEFINITIONS.get(normalized_key, {})
row = _upsert_setting_row(
session,
payload.key,
name=payload.name or str(definition.get("name") or payload.key),
category=payload.category or str(definition.get("category") or "general"),
description=payload.description or str(definition.get("description") or ""),
value_type=payload.value_type or str(definition.get("value_type") or "json"),
value=payload.value if payload.value is not None else definition.get("value"),
is_public=payload.is_public,
sort_order=payload.sort_order or int(definition.get("sort_order") or 100),
)
if normalized_key == ACTIVITY_EVENT_RETENTION_SETTING_KEY:
from services.platform_activity_service import prune_expired_activity_events
prune_expired_activity_events(session, force=True)
session.commit()
session.refresh(row)
return _setting_item_from_row(row)
def delete_system_setting(session: Session, key: str) -> None:
normalized_key = _normalize_setting_key(key)
if normalized_key in PROTECTED_SETTING_KEYS:
raise ValueError("Core platform settings cannot be deleted")
row = session.get(PlatformSetting, normalized_key)
if row is None:
raise ValueError("Setting not found")
session.delete(row)
session.commit()
def get_activity_event_retention_days(session: Session) -> int:
validate_required_system_settings(session)
row = session.get(PlatformSetting, ACTIVITY_EVENT_RETENTION_SETTING_KEY)
if row is None:
raise RuntimeError(
"Database seed data is not initialized. "
f"Missing sys_setting key: {ACTIVITY_EVENT_RETENTION_SETTING_KEY}. "
"Run scripts/init-full-db.sh or apply scripts/sql/init-data.sql before starting the backend."
)
try:
value = int(_read_setting_value(row))
except Exception as exc:
raise RuntimeError(
f"sys_setting value is invalid for key: {ACTIVITY_EVENT_RETENTION_SETTING_KEY}. "
"Fix the row manually or reapply scripts/sql/init-data.sql."
) from exc
return max(1, min(3650, value))

View File

@ -0,0 +1,305 @@
import json
import math
import re
import uuid
from collections import defaultdict
from datetime import datetime, timedelta
from typing import Any, Dict, List, Optional
from sqlalchemy import func
from sqlmodel import Session, select
from models.platform import BotRequestUsage
from schemas.platform import (
PlatformUsageAnalytics,
PlatformUsageAnalyticsSeries,
PlatformUsageItem,
PlatformUsageResponse,
PlatformUsageSummary,
)
def _utcnow() -> datetime:
return datetime.utcnow()
def estimate_tokens(text: str) -> int:
content = str(text or "").strip()
if not content:
return 0
pieces = re.findall(r"[\u4e00-\u9fff]|[A-Za-z0-9_]+|[^\s]", content)
total = 0
for piece in pieces:
if re.fullmatch(r"[\u4e00-\u9fff]", piece):
total += 1
elif re.fullmatch(r"[A-Za-z0-9_]+", piece):
total += max(1, math.ceil(len(piece) / 4))
else:
total += 1
return max(1, total)
def create_usage_request(
session: Session,
bot_id: str,
command: str,
attachments: Optional[List[str]] = None,
channel: str = "dashboard",
metadata: Optional[Dict[str, Any]] = None,
provider: Optional[str] = None,
model: Optional[str] = None,
) -> str:
request_id = uuid.uuid4().hex
rows = [str(item).strip() for item in (attachments or []) if str(item).strip()]
input_tokens = estimate_tokens(command)
usage = BotRequestUsage(
bot_id=bot_id,
request_id=request_id,
channel=channel,
status="PENDING",
provider=(str(provider or "").strip() or None),
model=(str(model or "").strip() or None),
token_source="estimated",
input_tokens=input_tokens,
output_tokens=0,
total_tokens=input_tokens,
input_text_preview=str(command or "")[:400],
attachments_json=json.dumps(rows, ensure_ascii=False) if rows else None,
metadata_json=json.dumps(metadata or {}, ensure_ascii=False),
started_at=_utcnow(),
created_at=_utcnow(),
updated_at=_utcnow(),
)
session.add(usage)
session.flush()
return request_id
def _find_latest_pending_usage(session: Session, bot_id: str) -> Optional[BotRequestUsage]:
stmt = (
select(BotRequestUsage)
.where(BotRequestUsage.bot_id == bot_id)
.where(BotRequestUsage.status == "PENDING")
.order_by(BotRequestUsage.started_at.desc(), BotRequestUsage.id.desc())
.limit(1)
)
return session.exec(stmt).first()
def _find_pending_usage_by_request_id(session: Session, bot_id: str, request_id: str) -> Optional[BotRequestUsage]:
if not request_id:
return None
stmt = (
select(BotRequestUsage)
.where(BotRequestUsage.bot_id == bot_id)
.where(BotRequestUsage.request_id == request_id)
.where(BotRequestUsage.status == "PENDING")
.order_by(BotRequestUsage.started_at.desc(), BotRequestUsage.id.desc())
.limit(1)
)
return session.exec(stmt).first()
def bind_usage_message(
session: Session,
bot_id: str,
request_id: str,
message_id: Optional[int],
) -> Optional[BotRequestUsage]:
if not request_id or not message_id:
return None
usage_row = _find_pending_usage_by_request_id(session, bot_id, request_id)
if not usage_row:
return None
usage_row.message_id = int(message_id)
usage_row.updated_at = _utcnow()
session.add(usage_row)
return usage_row
def finalize_usage_from_packet(session: Session, bot_id: str, packet: Dict[str, Any]) -> Optional[BotRequestUsage]:
request_id = str(packet.get("request_id") or "").strip()
usage_row = _find_pending_usage_by_request_id(session, bot_id, request_id) or _find_latest_pending_usage(session, bot_id)
if not usage_row:
return None
raw_usage = packet.get("usage")
input_tokens: Optional[int] = None
output_tokens: Optional[int] = None
source = "estimated"
if isinstance(raw_usage, dict):
for key in ("input_tokens", "prompt_tokens", "promptTokens"):
if raw_usage.get(key) is not None:
try:
input_tokens = int(raw_usage.get(key) or 0)
except Exception:
input_tokens = None
break
for key in ("output_tokens", "completion_tokens", "completionTokens"):
if raw_usage.get(key) is not None:
try:
output_tokens = int(raw_usage.get(key) or 0)
except Exception:
output_tokens = None
break
if input_tokens is not None or output_tokens is not None:
source = "exact"
text = str(packet.get("text") or packet.get("content") or "").strip()
provider = str(packet.get("provider") or "").strip()
model = str(packet.get("model") or "").strip()
message_id = packet.get("message_id")
if input_tokens is None:
input_tokens = usage_row.input_tokens
if output_tokens is None:
output_tokens = estimate_tokens(text)
if source == "exact":
source = "mixed"
if provider:
usage_row.provider = provider[:120]
if model:
usage_row.model = model[:255]
if message_id is not None:
try:
usage_row.message_id = int(message_id)
except Exception:
pass
usage_row.output_tokens = max(0, int(output_tokens or 0))
usage_row.input_tokens = max(0, int(input_tokens or 0))
usage_row.total_tokens = usage_row.input_tokens + usage_row.output_tokens
usage_row.output_text_preview = text[:400] if text else usage_row.output_text_preview
usage_row.status = "COMPLETED"
usage_row.token_source = source
usage_row.completed_at = _utcnow()
usage_row.updated_at = _utcnow()
session.add(usage_row)
return usage_row
def fail_latest_usage(session: Session, bot_id: str, detail: str) -> Optional[BotRequestUsage]:
usage_row = _find_latest_pending_usage(session, bot_id)
if not usage_row:
return None
usage_row.status = "ERROR"
usage_row.error_text = str(detail or "")[:500]
usage_row.completed_at = _utcnow()
usage_row.updated_at = _utcnow()
session.add(usage_row)
return usage_row
def _build_usage_analytics(
session: Session,
bot_id: Optional[str] = None,
window_days: int = 7,
) -> PlatformUsageAnalytics:
safe_window_days = max(1, int(window_days or 0))
today = _utcnow().date()
days = [today - timedelta(days=offset) for offset in range(safe_window_days - 1, -1, -1)]
day_keys = [day.isoformat() for day in days]
day_labels = [day.strftime("%m-%d") for day in days]
first_day = days[0]
first_started_at = datetime.combine(first_day, datetime.min.time())
stmt = select(BotRequestUsage.model, BotRequestUsage.started_at).where(BotRequestUsage.started_at >= first_started_at)
if bot_id:
stmt = stmt.where(BotRequestUsage.bot_id == bot_id)
counts_by_model: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int))
total_requests = 0
for model_name, started_at in session.exec(stmt).all():
if not started_at:
continue
day_key = started_at.date().isoformat()
if day_key not in day_keys:
continue
normalized_model = str(model_name or "").strip() or "Unknown"
counts_by_model[normalized_model][day_key] += 1
total_requests += 1
series = [
PlatformUsageAnalyticsSeries(
model=model_name,
total_requests=sum(day_counts.values()),
daily_counts=[int(day_counts.get(day_key, 0)) for day_key in day_keys],
)
for model_name, day_counts in counts_by_model.items()
]
series.sort(key=lambda item: (-item.total_requests, item.model.lower()))
return PlatformUsageAnalytics(
window_days=safe_window_days,
days=day_labels,
total_requests=total_requests,
series=series,
)
def list_usage(
session: Session,
bot_id: Optional[str] = None,
limit: int = 100,
offset: int = 0,
) -> Dict[str, Any]:
safe_limit = max(1, min(int(limit), 500))
safe_offset = max(0, int(offset or 0))
stmt = (
select(BotRequestUsage)
.order_by(BotRequestUsage.started_at.desc(), BotRequestUsage.id.desc())
.offset(safe_offset)
.limit(safe_limit)
)
summary_stmt = select(
func.count(BotRequestUsage.id),
func.coalesce(func.sum(BotRequestUsage.input_tokens), 0),
func.coalesce(func.sum(BotRequestUsage.output_tokens), 0),
func.coalesce(func.sum(BotRequestUsage.total_tokens), 0),
)
total_stmt = select(func.count(BotRequestUsage.id))
if bot_id:
stmt = stmt.where(BotRequestUsage.bot_id == bot_id)
summary_stmt = summary_stmt.where(BotRequestUsage.bot_id == bot_id)
total_stmt = total_stmt.where(BotRequestUsage.bot_id == bot_id)
else:
since = _utcnow() - timedelta(days=1)
summary_stmt = summary_stmt.where(BotRequestUsage.created_at >= since)
rows = session.exec(stmt).all()
count, input_sum, output_sum, total_sum = session.exec(summary_stmt).one()
total = int(session.exec(total_stmt).one() or 0)
items = [
PlatformUsageItem(
id=int(row.id or 0),
bot_id=row.bot_id,
message_id=int(row.message_id) if row.message_id is not None else None,
request_id=row.request_id,
channel=row.channel,
status=row.status,
provider=row.provider,
model=row.model,
token_source=row.token_source,
content=row.input_text_preview or row.output_text_preview,
input_tokens=int(row.input_tokens or 0),
output_tokens=int(row.output_tokens or 0),
total_tokens=int(row.total_tokens or 0),
input_text_preview=row.input_text_preview,
output_text_preview=row.output_text_preview,
started_at=row.started_at.isoformat() + "Z",
completed_at=row.completed_at.isoformat() + "Z" if row.completed_at else None,
).model_dump()
for row in rows
]
return PlatformUsageResponse(
summary=PlatformUsageSummary(
request_count=int(count or 0),
input_tokens=int(input_sum or 0),
output_tokens=int(output_sum or 0),
total_tokens=int(total_sum or 0),
),
items=[PlatformUsageItem.model_validate(item) for item in items],
total=total,
limit=safe_limit,
offset=safe_offset,
has_more=safe_offset + len(items) < total,
analytics=_build_usage_analytics(session, bot_id=bot_id),
).model_dump()

View File

@ -0,0 +1,113 @@
from typing import Any, Dict, List
import httpx
from fastapi import HTTPException
def get_provider_defaults(provider: str) -> tuple[str, str]:
normalized = str(provider or "").lower().strip()
if normalized in {"openai"}:
return "openai", "https://api.openai.com/v1"
if normalized in {"openrouter"}:
return "openrouter", "https://openrouter.ai/api/v1"
if normalized in {"dashscope", "aliyun", "qwen", "aliyun-qwen"}:
return "dashscope", "https://dashscope.aliyuncs.com/compatible-mode/v1"
if normalized in {"deepseek"}:
return "deepseek", "https://api.deepseek.com/v1"
if normalized in {"xunfei", "iflytek", "xfyun"}:
return "openai", "https://spark-api-open.xf-yun.com/v1"
if normalized in {"vllm"}:
return "openai", ""
if normalized in {"kimi", "moonshot"}:
return "kimi", "https://api.moonshot.cn/v1"
if normalized in {"minimax"}:
return "minimax", "https://api.minimax.chat/v1"
return normalized, ""
def _is_dashscope_coding_plan_base(api_base: str) -> bool:
return "coding.dashscope.aliyuncs.com" in str(api_base or "").strip().lower()
async def test_provider_connection(payload: Dict[str, Any]) -> Dict[str, Any]:
provider = str(payload.get("provider") or "").strip()
api_key = str(payload.get("api_key") or "").strip()
model = str(payload.get("model") or "").strip()
api_base = str(payload.get("api_base") or "").strip()
if not provider or not api_key:
raise HTTPException(status_code=400, detail="provider and api_key are required")
normalized_provider, _ = get_provider_defaults(provider)
base = api_base.rstrip("/")
if normalized_provider not in {"openrouter", "dashscope", "kimi", "minimax", "openai", "deepseek"}:
raise HTTPException(status_code=400, detail=f"provider not supported for test: {provider}")
if not base:
raise HTTPException(status_code=400, detail=f"api_base is required for provider: {provider}")
headers = {"Authorization": f"Bearer {api_key}"}
timeout = httpx.Timeout(20.0, connect=10.0)
models_url = f"{base}/models"
try:
async with httpx.AsyncClient(timeout=timeout) as client:
response = await client.get(models_url, headers=headers)
if response.status_code < 400:
data = response.json()
models_raw = data.get("data", []) if isinstance(data, dict) else []
model_ids: List[str] = [
str(item["id"]) for item in models_raw[:20] if isinstance(item, dict) and item.get("id")
]
if model_ids or not (_is_dashscope_coding_plan_base(base) and model):
return {
"ok": True,
"provider": normalized_provider,
"endpoint": models_url,
"models_preview": model_ids[:8],
"model_hint": (
"model_found"
if model and any(model in item for item in model_ids)
else ("model_not_listed" if model else "")
),
}
if _is_dashscope_coding_plan_base(base) and model:
completions_url = f"{base}/chat/completions"
completion_response = await client.post(
completions_url,
headers=headers,
json={
"model": model,
"messages": [{"role": "user", "content": "ping"}],
"max_tokens": 1,
"temperature": 0,
},
)
if completion_response.status_code < 400:
return {
"ok": True,
"provider": normalized_provider,
"endpoint": completions_url,
"models_preview": [model],
"model_hint": "model_found",
}
return {
"ok": False,
"provider": normalized_provider,
"status_code": completion_response.status_code,
"detail": completion_response.text[:500],
}
return {
"ok": False,
"provider": normalized_provider,
"status_code": response.status_code,
"detail": response.text[:500],
}
except Exception as exc:
return {
"ok": False,
"provider": normalized_provider,
"endpoint": models_url,
"detail": str(exc),
}

View File

@ -0,0 +1,295 @@
import asyncio
import json
import logging
import os
import re
import time
from datetime import datetime
from typing import Any, Dict, List, Optional
from sqlmodel import Session
from core.database import engine
from core.docker_instance import docker_manager
from core.websocket_manager import manager
from models.bot import BotInstance, BotMessage
from services.bot_storage_service import get_bot_workspace_root
from services.cache_service import _invalidate_bot_detail_cache, _invalidate_bot_messages_cache
from services.platform_activity_service import record_activity_event
from services.platform_usage_service import bind_usage_message, finalize_usage_from_packet
from services.topic_runtime import publish_runtime_topic_packet
logger = logging.getLogger("dashboard.backend")
_main_loop: Optional[asyncio.AbstractEventLoop] = None
_AGENT_LOOP_READY_MARKER = "Agent loop started"
_LAST_ACTION_CONTROL_RE = re.compile(r"[\u0000-\u0008\u000B\u000C\u000E-\u001F\u007F]")
def set_main_loop(loop: Optional[asyncio.AbstractEventLoop]) -> None:
global _main_loop
_main_loop = loop
def get_main_loop() -> Optional[asyncio.AbstractEventLoop]:
return _main_loop
def _queue_runtime_broadcast(bot_id: str, packet: Dict[str, Any]) -> None:
loop = get_main_loop()
if not loop or not loop.is_running():
return
asyncio.run_coroutine_threadsafe(manager.broadcast(bot_id, packet), loop)
def broadcast_runtime_packet(bot_id: str, packet: Dict[str, Any]) -> None:
_queue_runtime_broadcast(bot_id, packet)
def _normalize_packet_channel(packet: Dict[str, Any]) -> str:
raw = str(packet.get("channel") or packet.get("source") or "").strip().lower()
if raw in {"dashboard", "dashboard_channel", "dashboard-channel"}:
return "dashboard"
return raw
def _normalize_media_item(bot_id: str, value: Any) -> str:
raw = str(value or "").strip().replace("\\", "/")
if not raw:
return ""
if raw.startswith("/root/.nanobot/workspace/"):
return raw[len("/root/.nanobot/workspace/") :].lstrip("/")
root = get_bot_workspace_root(bot_id)
if os.path.isabs(raw):
try:
if os.path.commonpath([root, raw]) == root:
return os.path.relpath(raw, root).replace("\\", "/")
except Exception:
pass
return raw.lstrip("/")
def _normalize_media_list(raw: Any, bot_id: str) -> List[str]:
if not isinstance(raw, list):
return []
rows: List[str] = []
for value in raw:
normalized = _normalize_media_item(bot_id, value)
if normalized:
rows.append(normalized)
return rows
def _normalize_last_action_text(value: Any) -> str:
text = str(value or "")
if not text:
return ""
text = _LAST_ACTION_CONTROL_RE.sub("", text)
text = text.replace("\r\n", "\n").replace("\r", "\n")
text = "\n".join(line.rstrip() for line in text.split("\n"))
text = re.sub(r"\n{4,}", "\n\n\n", text).strip()
return text[:4000]
def _persist_runtime_packet(bot_id: str, packet: Dict[str, Any]) -> Optional[int]:
packet_type = str(packet.get("type", "")).upper()
if packet_type not in {"AGENT_STATE", "ASSISTANT_MESSAGE", "USER_COMMAND", "BUS_EVENT"}:
return None
source_channel = _normalize_packet_channel(packet)
if source_channel != "dashboard":
return None
persisted_message_id: Optional[int] = None
with Session(engine) as session:
bot = session.get(BotInstance, bot_id)
if not bot:
return None
if packet_type == "AGENT_STATE":
payload = packet.get("payload") or {}
state = str(payload.get("state") or "").strip()
action = _normalize_last_action_text(payload.get("action_msg") or payload.get("msg") or "")
if state:
bot.current_state = state
if action:
bot.last_action = action
elif packet_type == "ASSISTANT_MESSAGE":
bot.current_state = "IDLE"
text_msg = str(packet.get("text") or "").strip()
media_list = _normalize_media_list(packet.get("media"), bot_id)
if text_msg or media_list:
if text_msg:
bot.last_action = _normalize_last_action_text(text_msg)
message_row = BotMessage(
bot_id=bot_id,
role="assistant",
text=text_msg,
media_json=json.dumps(media_list, ensure_ascii=False) if media_list else None,
)
session.add(message_row)
session.flush()
persisted_message_id = message_row.id
finalize_usage_from_packet(
session,
bot_id,
{
**packet,
"message_id": persisted_message_id,
},
)
elif packet_type == "USER_COMMAND":
text_msg = str(packet.get("text") or "").strip()
media_list = _normalize_media_list(packet.get("media"), bot_id)
if text_msg or media_list:
message_row = BotMessage(
bot_id=bot_id,
role="user",
text=text_msg,
media_json=json.dumps(media_list, ensure_ascii=False) if media_list else None,
)
session.add(message_row)
session.flush()
persisted_message_id = message_row.id
bind_usage_message(
session,
bot_id,
str(packet.get("request_id") or "").strip(),
persisted_message_id,
)
elif packet_type == "BUS_EVENT":
is_progress = bool(packet.get("is_progress"))
detail_text = str(packet.get("content") or packet.get("text") or "").strip()
if not is_progress:
text_msg = detail_text
media_list = _normalize_media_list(packet.get("media"), bot_id)
if text_msg or media_list:
bot.current_state = "IDLE"
if text_msg:
bot.last_action = _normalize_last_action_text(text_msg)
message_row = BotMessage(
bot_id=bot_id,
role="assistant",
text=text_msg,
media_json=json.dumps(media_list, ensure_ascii=False) if media_list else None,
)
session.add(message_row)
session.flush()
persisted_message_id = message_row.id
finalize_usage_from_packet(
session,
bot_id,
{
"text": text_msg,
"usage": packet.get("usage"),
"request_id": packet.get("request_id"),
"provider": packet.get("provider"),
"model": packet.get("model"),
"message_id": persisted_message_id,
},
)
bot.updated_at = datetime.utcnow()
session.add(bot)
session.commit()
publish_runtime_topic_packet(
engine,
bot_id,
packet,
source_channel,
persisted_message_id,
logger,
)
if persisted_message_id:
packet["message_id"] = persisted_message_id
if packet_type in {"ASSISTANT_MESSAGE", "USER_COMMAND", "BUS_EVENT"}:
_invalidate_bot_messages_cache(bot_id)
_invalidate_bot_detail_cache(bot_id)
return persisted_message_id
def persist_runtime_packet(bot_id: str, packet: Dict[str, Any]) -> Optional[int]:
return _persist_runtime_packet(bot_id, packet)
def docker_callback(bot_id: str, packet: Dict[str, Any]) -> None:
packet_type = str(packet.get("type", "")).upper()
if packet_type == "RAW_LOG":
_queue_runtime_broadcast(bot_id, packet)
return
persisted_message_id = _persist_runtime_packet(bot_id, packet)
if persisted_message_id:
packet["message_id"] = persisted_message_id
_queue_runtime_broadcast(bot_id, packet)
async def _wait_for_agent_loop_ready(
bot_id: str,
timeout_seconds: float = 12.0,
poll_interval_seconds: float = 0.5,
) -> bool:
deadline = time.monotonic() + max(1.0, timeout_seconds)
marker = _AGENT_LOOP_READY_MARKER.lower()
while time.monotonic() < deadline:
logs = docker_manager.get_recent_logs(bot_id, tail=200)
if any(marker in str(line or "").lower() for line in logs):
return True
await asyncio.sleep(max(0.1, poll_interval_seconds))
return False
async def _record_agent_loop_ready_warning(
bot_id: str,
timeout_seconds: float = 12.0,
poll_interval_seconds: float = 0.5,
) -> None:
try:
agent_loop_ready = await _wait_for_agent_loop_ready(
bot_id,
timeout_seconds=timeout_seconds,
poll_interval_seconds=poll_interval_seconds,
)
if agent_loop_ready:
return
if docker_manager.get_bot_status(bot_id) != "RUNNING":
return
detail = (
"Bot container started, but ready marker was not found in logs within "
f"{int(timeout_seconds)}s. Check bot logs or MCP config if the bot stays unavailable."
)
logger.warning("bot_id=%s agent loop ready marker not found within %ss", bot_id, timeout_seconds)
with Session(engine) as background_session:
if not background_session.get(BotInstance, bot_id):
return
record_activity_event(
background_session,
bot_id,
"bot_warning",
channel="system",
detail=detail,
metadata={
"kind": "agent_loop_ready_timeout",
"marker": _AGENT_LOOP_READY_MARKER,
"timeout_seconds": timeout_seconds,
},
)
background_session.commit()
_invalidate_bot_detail_cache(bot_id)
except Exception:
logger.exception("Failed to record agent loop readiness warning for bot_id=%s", bot_id)
async def record_agent_loop_ready_warning(
bot_id: str,
timeout_seconds: float = 12.0,
poll_interval_seconds: float = 0.5,
) -> None:
await _record_agent_loop_ready_warning(
bot_id,
timeout_seconds=timeout_seconds,
poll_interval_seconds=poll_interval_seconds,
)

View File

@ -0,0 +1,434 @@
import json
import os
import tempfile
import zipfile
from datetime import datetime
from typing import Any, Dict, List, Optional
from fastapi import HTTPException, UploadFile
from sqlmodel import Session, select
from core.settings import DATA_ROOT
from core.utils import (
_is_ignored_skill_zip_top_level,
_is_valid_top_level_skill_name,
_parse_json_string_list,
_read_description_from_text,
_sanitize_skill_market_key,
_sanitize_zip_filename,
)
from models.skill import BotSkillInstall, SkillMarketItem
from services.platform_settings_service import get_platform_settings_snapshot
from services.skill_service import get_bot_skills_root, install_skill_zip_into_workspace
def _skill_market_root() -> str:
return os.path.abspath(os.path.join(DATA_ROOT, "skills"))
def _extract_skill_zip_summary(zip_path: str) -> Dict[str, Any]:
entry_names: List[str] = []
description = ""
with zipfile.ZipFile(zip_path) as archive:
members = archive.infolist()
file_members = [member for member in members if not member.is_dir()]
for member in file_members:
raw_name = str(member.filename or "").replace("\\", "/").lstrip("/")
if not raw_name:
continue
first = raw_name.split("/", 1)[0].strip()
if _is_ignored_skill_zip_top_level(first):
continue
if _is_valid_top_level_skill_name(first) and first not in entry_names:
entry_names.append(first)
candidates = sorted(
[
str(member.filename or "").replace("\\", "/").lstrip("/")
for member in file_members
if str(member.filename or "").replace("\\", "/").rsplit("/", 1)[-1].lower()
in {"skill.md", "readme.md"}
],
key=lambda value: (value.count("/"), value.lower()),
)
for candidate in candidates:
try:
with archive.open(candidate, "r") as file:
preview = file.read(4096).decode("utf-8", errors="ignore")
description = _read_description_from_text(preview)
if description:
break
except Exception:
continue
return {
"entry_names": entry_names,
"description": description,
}
def _resolve_unique_skill_market_key(session: Session, preferred_key: str, exclude_id: Optional[int] = None) -> str:
base_key = _sanitize_skill_market_key(preferred_key) or "skill"
candidate = base_key
counter = 2
while True:
stmt = select(SkillMarketItem).where(SkillMarketItem.skill_key == candidate)
rows = session.exec(stmt).all()
conflict = next((row for row in rows if exclude_id is None or row.id != exclude_id), None)
if not conflict:
return candidate
candidate = f"{base_key}-{counter}"
counter += 1
def _resolve_unique_skill_market_zip_filename(
session: Session,
filename: str,
*,
exclude_filename: Optional[str] = None,
exclude_id: Optional[int] = None,
) -> str:
root = _skill_market_root()
os.makedirs(root, exist_ok=True)
safe_name = _sanitize_zip_filename(filename)
if not safe_name.lower().endswith(".zip"):
raise HTTPException(status_code=400, detail="Only .zip skill package is supported")
candidate = safe_name
stem, ext = os.path.splitext(safe_name)
counter = 2
while True:
file_conflict = os.path.exists(os.path.join(root, candidate)) and candidate != str(exclude_filename or "").strip()
rows = session.exec(select(SkillMarketItem).where(SkillMarketItem.zip_filename == candidate)).all()
db_conflict = next((row for row in rows if exclude_id is None or row.id != exclude_id), None)
if not file_conflict and not db_conflict:
return candidate
candidate = f"{stem}-{counter}{ext}"
counter += 1
async def _store_skill_market_zip_upload(
session: Session,
upload: UploadFile,
*,
exclude_filename: Optional[str] = None,
exclude_id: Optional[int] = None,
) -> Dict[str, Any]:
root = _skill_market_root()
os.makedirs(root, exist_ok=True)
incoming_name = _sanitize_zip_filename(upload.filename or "")
if not incoming_name.lower().endswith(".zip"):
raise HTTPException(status_code=400, detail="Only .zip skill package is supported")
target_filename = _resolve_unique_skill_market_zip_filename(
session,
incoming_name,
exclude_filename=exclude_filename,
exclude_id=exclude_id,
)
max_bytes = get_platform_settings_snapshot().upload_max_mb * 1024 * 1024
total_size = 0
tmp_path: Optional[str] = None
try:
with tempfile.NamedTemporaryFile(prefix=".skill_market_", suffix=".zip", dir=root, delete=False) as tmp_zip:
tmp_path = tmp_zip.name
while True:
chunk = await upload.read(1024 * 1024)
if not chunk:
break
total_size += len(chunk)
if total_size > max_bytes:
raise HTTPException(
status_code=413,
detail=f"Zip package too large (max {max_bytes // (1024 * 1024)}MB)",
)
tmp_zip.write(chunk)
if total_size == 0:
raise HTTPException(status_code=400, detail="Zip package is empty")
summary = _extract_skill_zip_summary(tmp_path)
if not summary["entry_names"]:
raise HTTPException(status_code=400, detail="Zip package has no valid skill entries")
final_path = os.path.join(root, target_filename)
os.replace(tmp_path, final_path)
tmp_path = None
return {
"zip_filename": target_filename,
"zip_size_bytes": total_size,
"entry_names": summary["entry_names"],
"description": summary["description"],
}
except zipfile.BadZipFile as exc:
raise HTTPException(status_code=400, detail="Invalid zip file") from exc
finally:
await upload.close()
if tmp_path and os.path.exists(tmp_path):
os.remove(tmp_path)
def _serialize_skill_market_item(
item: SkillMarketItem,
*,
install_count: int = 0,
install_row: Optional[BotSkillInstall] = None,
workspace_installed: Optional[bool] = None,
installed_entries: Optional[List[str]] = None,
) -> Dict[str, Any]:
zip_path = os.path.join(_skill_market_root(), str(item.zip_filename or ""))
entry_names = _parse_json_string_list(item.entry_names_json)
payload = {
"id": item.id,
"skill_key": item.skill_key,
"display_name": item.display_name or item.skill_key,
"description": item.description or "",
"zip_filename": item.zip_filename,
"zip_size_bytes": int(item.zip_size_bytes or 0),
"entry_names": entry_names,
"entry_count": len(entry_names),
"zip_exists": os.path.isfile(zip_path),
"install_count": int(install_count or 0),
"created_at": item.created_at.isoformat() + "Z" if item.created_at else None,
"updated_at": item.updated_at.isoformat() + "Z" if item.updated_at else None,
}
if install_row is not None:
resolved_entries = (
installed_entries
if installed_entries is not None
else _parse_json_string_list(install_row.installed_entries_json)
)
resolved_installed = workspace_installed if workspace_installed is not None else install_row.status == "INSTALLED"
payload.update(
{
"installed": resolved_installed,
"install_status": install_row.status,
"installed_at": install_row.installed_at.isoformat() + "Z" if install_row.installed_at else None,
"installed_entries": resolved_entries,
"install_error": install_row.last_error,
}
)
return payload
def _build_install_count_by_skill(session: Session) -> Dict[int, int]:
installs = session.exec(select(BotSkillInstall)).all()
install_count_by_skill: Dict[int, int] = {}
for row in installs:
skill_id = int(row.skill_market_item_id or 0)
if skill_id <= 0 or row.status != "INSTALLED":
continue
install_count_by_skill[skill_id] = install_count_by_skill.get(skill_id, 0) + 1
return install_count_by_skill
def list_skill_market_items(session: Session) -> List[Dict[str, Any]]:
items = session.exec(select(SkillMarketItem).order_by(SkillMarketItem.display_name, SkillMarketItem.id)).all()
install_count_by_skill = _build_install_count_by_skill(session)
return [
_serialize_skill_market_item(item, install_count=install_count_by_skill.get(int(item.id or 0), 0))
for item in items
]
async def create_skill_market_item_record(
session: Session,
*,
skill_key: str,
display_name: str,
description: str,
upload: UploadFile,
) -> Dict[str, Any]:
upload_meta = await _store_skill_market_zip_upload(session, upload)
try:
preferred_key = skill_key or display_name or os.path.splitext(upload_meta["zip_filename"])[0]
next_key = _resolve_unique_skill_market_key(session, preferred_key)
item = SkillMarketItem(
skill_key=next_key,
display_name=str(display_name or next_key).strip() or next_key,
description=str(description or upload_meta["description"] or "").strip(),
zip_filename=upload_meta["zip_filename"],
zip_size_bytes=int(upload_meta["zip_size_bytes"] or 0),
entry_names_json=json.dumps(upload_meta["entry_names"], ensure_ascii=False),
)
session.add(item)
session.commit()
session.refresh(item)
return _serialize_skill_market_item(item, install_count=0)
except Exception:
target_path = os.path.join(_skill_market_root(), upload_meta["zip_filename"])
if os.path.exists(target_path):
os.remove(target_path)
raise
async def update_skill_market_item_record(
session: Session,
*,
skill_id: int,
skill_key: str,
display_name: str,
description: str,
upload: Optional[UploadFile] = None,
) -> Dict[str, Any]:
item = session.get(SkillMarketItem, skill_id)
if not item:
raise HTTPException(status_code=404, detail="Skill market item not found")
old_filename = str(item.zip_filename or "").strip()
upload_meta: Optional[Dict[str, Any]] = None
if upload is not None:
upload_meta = await _store_skill_market_zip_upload(
session,
upload,
exclude_filename=old_filename or None,
exclude_id=item.id,
)
next_key = _resolve_unique_skill_market_key(
session,
skill_key or item.skill_key or display_name or os.path.splitext(upload_meta["zip_filename"] if upload_meta else old_filename)[0],
exclude_id=item.id,
)
item.skill_key = next_key
item.display_name = str(display_name or item.display_name or next_key).strip() or next_key
item.description = str(description or (upload_meta["description"] if upload_meta else item.description) or "").strip()
item.updated_at = datetime.utcnow()
if upload_meta:
item.zip_filename = upload_meta["zip_filename"]
item.zip_size_bytes = int(upload_meta["zip_size_bytes"] or 0)
item.entry_names_json = json.dumps(upload_meta["entry_names"], ensure_ascii=False)
session.add(item)
session.commit()
session.refresh(item)
if upload_meta and old_filename and old_filename != upload_meta["zip_filename"]:
old_path = os.path.join(_skill_market_root(), old_filename)
if os.path.exists(old_path):
os.remove(old_path)
installs = session.exec(select(BotSkillInstall).where(BotSkillInstall.skill_market_item_id == skill_id)).all()
install_count = sum(1 for row in installs if row.status == "INSTALLED")
return _serialize_skill_market_item(item, install_count=install_count)
def delete_skill_market_item_record(session: Session, *, skill_id: int) -> Dict[str, Any]:
item = session.get(SkillMarketItem, skill_id)
if not item:
raise HTTPException(status_code=404, detail="Skill market item not found")
zip_filename = str(item.zip_filename or "").strip()
installs = session.exec(select(BotSkillInstall).where(BotSkillInstall.skill_market_item_id == skill_id)).all()
for row in installs:
session.delete(row)
session.delete(item)
session.commit()
if zip_filename:
zip_path = os.path.join(_skill_market_root(), zip_filename)
if os.path.exists(zip_path):
os.remove(zip_path)
return {"status": "deleted", "id": skill_id}
def list_bot_skill_market_items(session: Session, *, bot_id: str) -> List[Dict[str, Any]]:
items = session.exec(select(SkillMarketItem).order_by(SkillMarketItem.display_name, SkillMarketItem.id)).all()
install_rows = session.exec(select(BotSkillInstall).where(BotSkillInstall.bot_id == bot_id)).all()
install_lookup = {int(row.skill_market_item_id): row for row in install_rows}
install_count_by_skill = _build_install_count_by_skill(session)
return [
_serialize_skill_market_item(
item,
install_count=install_count_by_skill.get(int(item.id or 0), 0),
install_row=install_lookup.get(int(item.id or 0)),
workspace_installed=(
None
if install_lookup.get(int(item.id or 0)) is None
else (
install_lookup[int(item.id or 0)].status == "INSTALLED"
and all(
os.path.exists(os.path.join(get_bot_skills_root(bot_id), name))
for name in _parse_json_string_list(install_lookup[int(item.id or 0)].installed_entries_json)
)
)
),
installed_entries=(
None
if install_lookup.get(int(item.id or 0)) is None
else _parse_json_string_list(install_lookup[int(item.id or 0)].installed_entries_json)
),
)
for item in items
]
def install_skill_market_item_for_bot(
session: Session,
*,
bot_id: str,
skill_id: int,
) -> Dict[str, Any]:
item = session.get(SkillMarketItem, skill_id)
if not item:
raise HTTPException(status_code=404, detail="Skill market item not found")
zip_path = os.path.join(_skill_market_root(), str(item.zip_filename or ""))
if not os.path.isfile(zip_path):
raise HTTPException(status_code=404, detail="Skill zip package not found")
install_row = session.exec(
select(BotSkillInstall).where(
BotSkillInstall.bot_id == bot_id,
BotSkillInstall.skill_market_item_id == skill_id,
)
).first()
try:
install_result = install_skill_zip_into_workspace(bot_id, zip_path)
now = datetime.utcnow()
if not install_row:
install_row = BotSkillInstall(
bot_id=bot_id,
skill_market_item_id=skill_id,
)
install_row.installed_entries_json = json.dumps(install_result["installed"], ensure_ascii=False)
install_row.source_zip_filename = str(item.zip_filename or "")
install_row.status = "INSTALLED"
install_row.last_error = None
install_row.installed_at = now
install_row.updated_at = now
session.add(install_row)
session.commit()
session.refresh(install_row)
return {
"status": "installed",
"bot_id": bot_id,
"skill_market_item_id": skill_id,
"installed": install_result["installed"],
"skills": install_result["skills"],
"market_item": _serialize_skill_market_item(item, install_count=0, install_row=install_row),
}
except HTTPException as exc:
now = datetime.utcnow()
if not install_row:
install_row = BotSkillInstall(
bot_id=bot_id,
skill_market_item_id=skill_id,
installed_at=now,
)
install_row.source_zip_filename = str(item.zip_filename or "")
install_row.status = "FAILED"
install_row.last_error = str(exc.detail or "Install failed")
install_row.updated_at = now
session.add(install_row)
session.commit()
raise
except Exception as exc:
now = datetime.utcnow()
if not install_row:
install_row = BotSkillInstall(
bot_id=bot_id,
skill_market_item_id=skill_id,
installed_at=now,
)
install_row.source_zip_filename = str(item.zip_filename or "")
install_row.status = "FAILED"
install_row.last_error = str(exc or "Install failed")[:1000]
install_row.updated_at = now
session.add(install_row)
session.commit()
raise HTTPException(status_code=500, detail="Skill install failed unexpectedly") from exc

View File

@ -0,0 +1,207 @@
import shutil
import zipfile
import tempfile
from datetime import datetime
import os
from typing import Any, Dict, List, Optional
from fastapi import HTTPException, UploadFile
from core.utils import (
_is_ignored_skill_zip_top_level,
_is_valid_top_level_skill_name,
)
from services.bot_storage_service import get_bot_workspace_root
from services.platform_settings_service import get_platform_settings_snapshot
def get_bot_skills_root(bot_id: str) -> str:
return _skills_root(bot_id)
def _skills_root(bot_id: str) -> str:
return os.path.join(get_bot_workspace_root(bot_id), "skills")
def _read_skill_description(entry_path: str) -> str:
candidates: List[str] = []
if os.path.isdir(entry_path):
candidates = [
os.path.join(entry_path, "SKILL.md"),
os.path.join(entry_path, "skill.md"),
os.path.join(entry_path, "README.md"),
os.path.join(entry_path, "readme.md"),
]
elif entry_path.lower().endswith(".md"):
candidates = [entry_path]
for candidate in candidates:
if not os.path.isfile(candidate):
continue
try:
with open(candidate, "r", encoding="utf-8") as f:
for line in f:
text = line.strip()
if text and not text.startswith("#"):
return text[:240]
except Exception:
continue
return ""
def _list_workspace_skills(bot_id: str) -> List[Dict[str, Any]]:
root = _skills_root(bot_id)
os.makedirs(root, exist_ok=True)
rows: List[Dict[str, Any]] = []
names = sorted(os.listdir(root), key=lambda n: (not os.path.isdir(os.path.join(root, n)), n.lower()))
for name in names:
if not name or name.startswith("."):
continue
if not _is_valid_top_level_skill_name(name):
continue
abs_path = os.path.join(root, name)
if not os.path.exists(abs_path):
continue
stat = os.stat(abs_path)
rows.append(
{
"id": name,
"name": name,
"type": "dir" if os.path.isdir(abs_path) else "file",
"path": f"skills/{name}",
"size": stat.st_size if os.path.isfile(abs_path) else None,
"mtime": datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z",
"description": _read_skill_description(abs_path),
}
)
return rows
def _install_skill_zip_into_workspace(bot_id: str, zip_path: str) -> Dict[str, Any]:
try:
archive = zipfile.ZipFile(zip_path)
except Exception as exc:
raise HTTPException(status_code=400, detail="Invalid zip file") from exc
skills_root = _skills_root(bot_id)
os.makedirs(skills_root, exist_ok=True)
installed: List[str] = []
with archive:
members = archive.infolist()
file_members = [m for m in members if not m.is_dir()]
if not file_members:
raise HTTPException(status_code=400, detail="Zip package has no files")
top_names: List[str] = []
for member in file_members:
raw_name = str(member.filename or "").replace("\\", "/").lstrip("/")
if not raw_name:
continue
first = raw_name.split("/", 1)[0].strip()
if _is_ignored_skill_zip_top_level(first):
continue
if not _is_valid_top_level_skill_name(first):
raise HTTPException(status_code=400, detail=f"Invalid skill entry name in zip: {first}")
if first not in top_names:
top_names.append(first)
if not top_names:
raise HTTPException(status_code=400, detail="Zip package has no valid skill entries")
conflicts = [name for name in top_names if os.path.exists(os.path.join(skills_root, name))]
if conflicts:
raise HTTPException(status_code=400, detail=f"Skill already exists: {', '.join(conflicts)}")
with tempfile.TemporaryDirectory(prefix=".skill_upload_", dir=skills_root) as tmp_dir:
tmp_root = os.path.abspath(tmp_dir)
for member in members:
raw_name = str(member.filename or "").replace("\\", "/").lstrip("/")
if not raw_name:
continue
target = os.path.abspath(os.path.join(tmp_root, raw_name))
if os.path.commonpath([tmp_root, target]) != tmp_root:
raise HTTPException(status_code=400, detail=f"Unsafe zip entry path: {raw_name}")
if member.is_dir():
os.makedirs(target, exist_ok=True)
continue
os.makedirs(os.path.dirname(target), exist_ok=True)
with archive.open(member, "r") as source, open(target, "wb") as dest:
shutil.copyfileobj(source, dest)
for name in top_names:
src = os.path.join(tmp_root, name)
dst = os.path.join(skills_root, name)
if not os.path.exists(src):
continue
shutil.move(src, dst)
installed.append(name)
if not installed:
raise HTTPException(status_code=400, detail="No skill entries installed from zip")
return {
"installed": installed,
"skills": _list_workspace_skills(bot_id),
}
def install_skill_zip_into_workspace(bot_id: str, zip_path: str) -> Dict[str, Any]:
return _install_skill_zip_into_workspace(bot_id, zip_path)
def list_bot_skills(bot_id: str) -> List[Dict[str, Any]]:
return _list_workspace_skills(bot_id)
async def upload_bot_skill_zip_to_workspace(bot_id: str, *, upload: UploadFile) -> Dict[str, Any]:
tmp_zip_path: Optional[str] = None
try:
with tempfile.NamedTemporaryFile(prefix=".skill_upload_", suffix=".zip", delete=False) as tmp_zip:
tmp_zip_path = tmp_zip.name
filename = str(upload.filename or "").strip()
if not filename.lower().endswith(".zip"):
raise HTTPException(status_code=400, detail="Only .zip skill package is supported")
max_bytes = get_platform_settings_snapshot().upload_max_mb * 1024 * 1024
total_size = 0
while True:
chunk = await upload.read(1024 * 1024)
if not chunk:
break
total_size += len(chunk)
if total_size > max_bytes:
raise HTTPException(
status_code=413,
detail=f"Zip package too large (max {max_bytes // (1024 * 1024)}MB)",
)
tmp_zip.write(chunk)
if total_size == 0:
raise HTTPException(status_code=400, detail="Zip package is empty")
finally:
await upload.close()
try:
install_result = _install_skill_zip_into_workspace(bot_id, tmp_zip_path)
finally:
if tmp_zip_path and os.path.exists(tmp_zip_path):
os.remove(tmp_zip_path)
return {
"status": "installed",
"bot_id": bot_id,
"installed": install_result["installed"],
"skills": install_result["skills"],
}
def delete_workspace_skill_entry(bot_id: str, *, skill_name: str) -> Dict[str, Any]:
name = str(skill_name or "").strip()
if not _is_valid_top_level_skill_name(name):
raise HTTPException(status_code=400, detail="Invalid skill name")
root = _skills_root(bot_id)
target = os.path.abspath(os.path.join(root, name))
if os.path.commonpath([os.path.abspath(root), target]) != os.path.abspath(root):
raise HTTPException(status_code=400, detail="Invalid skill path")
if not os.path.exists(target):
raise HTTPException(status_code=404, detail="Skill not found in workspace")
if os.path.isdir(target):
shutil.rmtree(target, ignore_errors=False)
else:
os.remove(target)
return {"status": "deleted", "bot_id": bot_id, "skill": name}

View File

@ -0,0 +1,99 @@
import asyncio
import os
import tempfile
from typing import Any, Dict, Optional
from fastapi import HTTPException, UploadFile
from sqlmodel import Session
from core.settings import DATA_ROOT
from core.speech_service import (
SpeechDisabledError,
SpeechDurationError,
SpeechServiceError,
WhisperSpeechService,
)
from models.bot import BotInstance
from services.platform_settings_service import get_speech_runtime_settings
async def transcribe_bot_speech_upload(
session: Session,
bot_id: str,
upload: UploadFile,
language: Optional[str],
speech_service: WhisperSpeechService,
logger: Any,
) -> Dict[str, Any]:
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
speech_settings = get_speech_runtime_settings()
if not speech_settings["enabled"]:
raise HTTPException(status_code=400, detail="Speech recognition is disabled")
if not upload:
raise HTTPException(status_code=400, detail="no audio file uploaded")
original_name = str(upload.filename or "audio.webm").strip() or "audio.webm"
safe_name = os.path.basename(original_name).replace("\\", "_").replace("/", "_")
ext = os.path.splitext(safe_name)[1].strip().lower() or ".webm"
if len(ext) > 12:
ext = ".webm"
tmp_path = ""
try:
with tempfile.NamedTemporaryFile(delete=False, suffix=ext, prefix=".speech_", dir=DATA_ROOT) as tmp:
tmp_path = tmp.name
while True:
chunk = await upload.read(1024 * 1024)
if not chunk:
break
tmp.write(chunk)
if not tmp_path or not os.path.exists(tmp_path) or os.path.getsize(tmp_path) <= 0:
raise HTTPException(status_code=400, detail="audio payload is empty")
resolved_language = str(language or "").strip() or speech_settings["default_language"]
result = await asyncio.to_thread(speech_service.transcribe_file, tmp_path, resolved_language)
text = str(result.get("text") or "").strip()
if not text:
raise HTTPException(status_code=400, detail="No speech detected")
return {
"bot_id": bot_id,
"text": text,
"duration_seconds": result.get("duration_seconds"),
"max_audio_seconds": speech_settings["max_audio_seconds"],
"model": speech_settings["model"],
"device": speech_settings["device"],
"language": result.get("language") or resolved_language,
}
except SpeechDisabledError as exc:
logger.warning("speech transcribe disabled bot_id=%s file=%s language=%s detail=%s", bot_id, safe_name, language, exc)
raise HTTPException(status_code=400, detail=str(exc)) from exc
except SpeechDurationError as exc:
logger.warning(
"speech transcribe too long bot_id=%s file=%s language=%s max_seconds=%s",
bot_id,
safe_name,
language,
speech_settings["max_audio_seconds"],
)
raise HTTPException(status_code=413, detail=f"Audio duration exceeds {speech_settings['max_audio_seconds']} seconds") from exc
except SpeechServiceError as exc:
logger.exception("speech transcribe failed bot_id=%s file=%s language=%s", bot_id, safe_name, language)
raise HTTPException(status_code=400, detail=str(exc)) from exc
except HTTPException:
raise
except Exception as exc:
logger.exception("speech transcribe unexpected error bot_id=%s file=%s language=%s", bot_id, safe_name, language)
raise HTTPException(status_code=500, detail=f"speech transcription failed: {exc}") from exc
finally:
try:
await upload.close()
except Exception:
pass
if tmp_path and os.path.exists(tmp_path):
try:
os.remove(tmp_path)
except Exception:
pass

View File

@ -0,0 +1,98 @@
from __future__ import annotations
from pathlib import Path
from typing import Any, Dict, List
from core.settings import AGENT_MD_TEMPLATES_FILE, TOPIC_PRESETS_TEMPLATES_FILE
TEMPLATE_KEYS = ("agents_md", "soul_md", "user_md", "tools_md", "identity_md")
def _load_json_object(path: Path, *, label: str) -> Dict[str, Any]:
import json
target = Path(path).resolve()
if not target.is_file():
raise RuntimeError(
f"Missing required {label} file: {target}. "
"Please restore the tracked files under data/templates before starting the backend."
)
try:
with target.open("r", encoding="utf-8") as file:
data = json.load(file)
except Exception as exc:
raise RuntimeError(f"Invalid JSON in {label} file: {target}") from exc
if not isinstance(data, dict):
raise RuntimeError(f"{label} file must contain a JSON object: {target}")
return data
def _normalize_md_text(value: Any) -> str:
return str(value or "").replace("\r\n", "\n").strip()
def _write_json_atomic(path: str, payload: Dict[str, Any]) -> None:
import json
import os
os.makedirs(os.path.dirname(path), exist_ok=True)
tmp_path = f"{path}.tmp"
with open(tmp_path, "w", encoding="utf-8") as file:
json.dump(payload, file, ensure_ascii=False, indent=2)
os.replace(tmp_path, path)
def get_agent_md_templates() -> Dict[str, str]:
raw = _load_json_object(AGENT_MD_TEMPLATES_FILE, label="agent templates")
missing_keys = [key for key in TEMPLATE_KEYS if key not in raw]
if missing_keys:
raise RuntimeError(
"Agent template file is missing required keys: "
f"{', '.join(missing_keys)}. File: {Path(AGENT_MD_TEMPLATES_FILE).resolve()}"
)
return {key: _normalize_md_text(raw.get(key)) for key in TEMPLATE_KEYS}
def get_topic_presets() -> Dict[str, Any]:
raw = _load_json_object(TOPIC_PRESETS_TEMPLATES_FILE, label="topic presets")
presets = raw.get("presets")
if not isinstance(presets, list):
raise RuntimeError(
f"Topic presets file must contain a presets array: {Path(TOPIC_PRESETS_TEMPLATES_FILE).resolve()}"
)
invalid_rows = [index for index, row in enumerate(presets) if not isinstance(row, dict)]
if invalid_rows:
raise RuntimeError(
"Topic presets file contains non-object entries at indexes: "
f"{', '.join(str(index) for index in invalid_rows)}. "
f"File: {Path(TOPIC_PRESETS_TEMPLATES_FILE).resolve()}"
)
return {"presets": [dict(row) for row in presets]}
def update_agent_md_templates(raw: Dict[str, Any]) -> Dict[str, str]:
payload = {key: _normalize_md_text(raw.get(key)) for key in TEMPLATE_KEYS}
_write_json_atomic(str(AGENT_MD_TEMPLATES_FILE), payload)
return payload
def update_topic_presets(raw: Dict[str, Any]) -> Dict[str, Any]:
presets = raw.get("presets") if isinstance(raw, dict) else None
if not isinstance(presets, list):
raise ValueError("topic_presets.presets must be an array")
invalid_rows = [index for index, row in enumerate(presets) if not isinstance(row, dict)]
if invalid_rows:
raise ValueError(
"topic_presets.presets must contain objects only; invalid indexes: "
+ ", ".join(str(index) for index in invalid_rows)
)
payload: Dict[str, List[Dict[str, Any]]] = {"presets": [dict(row) for row in presets]}
_write_json_atomic(str(TOPIC_PRESETS_TEMPLATES_FILE), payload)
return payload
def get_agent_template_value(key: str) -> str:
templates = get_agent_md_templates()
if key not in templates:
raise KeyError(f"Unknown agent template key: {key}")
return templates[key]

View File

@ -3,7 +3,7 @@ from typing import Any, Dict, Optional
from sqlmodel import Session
from services.topic_service import _topic_publish_internal
from services.topic_service import publish_topic_item
from .publisher import build_topic_publish_payload
@ -30,6 +30,6 @@ def publish_runtime_topic_packet(
try:
with Session(engine) as session:
_topic_publish_internal(session, bot_id, topic_payload)
publish_topic_item(session, bot_id, topic_payload)
except Exception:
logger.exception("topic auto publish failed for bot %s packet %s", bot_id, packet_type)

View File

@ -3,13 +3,16 @@ import re
from datetime import datetime
from typing import Any, Dict, List, Optional
from fastapi import HTTPException
from sqlmodel import Session, select
from models.bot import BotInstance
from models.topic import TopicItem, TopicTopic
TOPIC_DEDUPE_WINDOW_SECONDS = 10 * 60
TOPIC_LEVEL_SET = {"info", "warn", "error", "success"}
_TOPIC_KEY_RE = re.compile(r"^[a-z0-9][a-z0-9_.-]{0,63}$")
TOPIC_KEY_RE = _TOPIC_KEY_RE
def _as_bool(value: Any) -> bool:
@ -101,6 +104,13 @@ def _topic_get_row(session: Session, bot_id: str, topic_key: str) -> Optional[To
).first()
def _get_bot_or_404(session: Session, bot_id: str) -> BotInstance:
bot = session.get(BotInstance, bot_id)
if not bot:
raise HTTPException(status_code=404, detail="Bot not found")
return bot
def _normalize_topic_keywords(raw: Any) -> List[str]:
rows: List[str] = []
if isinstance(raw, list):
@ -338,3 +348,217 @@ def _topic_publish_internal(session: Session, bot_id: str, payload: Dict[str, An
"item": _topic_item_to_dict(item),
"route": route_result,
}
def normalize_topic_key(raw: Any) -> str:
return _normalize_topic_key(raw)
def list_topics(session: Session, bot_id: str) -> List[Dict[str, Any]]:
_get_bot_or_404(session, bot_id)
return _list_topics(session, bot_id)
def create_topic(
session: Session,
*,
bot_id: str,
topic_key: str,
name: Optional[str] = None,
description: Optional[str] = None,
is_active: bool = True,
routing: Optional[Dict[str, Any]] = None,
view_schema: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
_get_bot_or_404(session, bot_id)
normalized_key = _normalize_topic_key(topic_key)
if not normalized_key:
raise HTTPException(status_code=400, detail="topic_key is required")
if not TOPIC_KEY_RE.fullmatch(normalized_key):
raise HTTPException(status_code=400, detail="invalid topic_key")
exists = _topic_get_row(session, bot_id, normalized_key)
if exists:
raise HTTPException(status_code=400, detail=f"Topic already exists: {normalized_key}")
now = datetime.utcnow()
row = TopicTopic(
bot_id=bot_id,
topic_key=normalized_key,
name=str(name or normalized_key).strip() or normalized_key,
description=str(description or "").strip(),
is_active=bool(is_active),
is_default_fallback=False,
routing_json=json.dumps(routing or {}, ensure_ascii=False),
view_schema_json=json.dumps(view_schema or {}, ensure_ascii=False),
created_at=now,
updated_at=now,
)
session.add(row)
session.commit()
session.refresh(row)
return _topic_to_dict(row)
def update_topic(
session: Session,
*,
bot_id: str,
topic_key: str,
updates: Dict[str, Any],
) -> Dict[str, Any]:
_get_bot_or_404(session, bot_id)
normalized_key = _normalize_topic_key(topic_key)
if not normalized_key:
raise HTTPException(status_code=400, detail="topic_key is required")
row = _topic_get_row(session, bot_id, normalized_key)
if not row:
raise HTTPException(status_code=404, detail="Topic not found")
if "name" in updates:
row.name = str(updates.get("name") or "").strip() or row.topic_key
if "description" in updates:
row.description = str(updates.get("description") or "").strip()
if "is_active" in updates:
row.is_active = bool(updates.get("is_active"))
if "routing" in updates:
row.routing_json = json.dumps(updates.get("routing") or {}, ensure_ascii=False)
if "view_schema" in updates:
row.view_schema_json = json.dumps(updates.get("view_schema") or {}, ensure_ascii=False)
row.is_default_fallback = False
row.updated_at = datetime.utcnow()
session.add(row)
session.commit()
session.refresh(row)
return _topic_to_dict(row)
def delete_topic(session: Session, *, bot_id: str, topic_key: str) -> Dict[str, Any]:
_get_bot_or_404(session, bot_id)
normalized_key = _normalize_topic_key(topic_key)
if not normalized_key:
raise HTTPException(status_code=400, detail="topic_key is required")
row = _topic_get_row(session, bot_id, normalized_key)
if not row:
raise HTTPException(status_code=404, detail="Topic not found")
items = session.exec(
select(TopicItem)
.where(TopicItem.bot_id == bot_id)
.where(TopicItem.topic_key == normalized_key)
).all()
for item in items:
session.delete(item)
session.delete(row)
session.commit()
return {"status": "deleted", "bot_id": bot_id, "topic_key": normalized_key}
def _count_topic_items(
session: Session,
bot_id: str,
topic_key: Optional[str] = None,
unread_only: bool = False,
) -> int:
stmt = select(TopicItem).where(TopicItem.bot_id == bot_id)
normalized_topic_key = _normalize_topic_key(topic_key or "")
if normalized_topic_key:
stmt = stmt.where(TopicItem.topic_key == normalized_topic_key)
rows = session.exec(stmt).all()
if unread_only:
return sum(1 for row in rows if not bool(row.is_read))
return len(rows)
def list_topic_items(
session: Session,
*,
bot_id: str,
topic_key: Optional[str] = None,
cursor: Optional[int] = None,
limit: int = 50,
) -> Dict[str, Any]:
_get_bot_or_404(session, bot_id)
normalized_limit = max(1, min(int(limit or 50), 100))
stmt = select(TopicItem).where(TopicItem.bot_id == bot_id)
normalized_topic_key = _normalize_topic_key(topic_key or "")
if normalized_topic_key:
stmt = stmt.where(TopicItem.topic_key == normalized_topic_key)
if cursor is not None:
normalized_cursor = int(cursor)
if normalized_cursor > 0:
stmt = stmt.where(TopicItem.id < normalized_cursor)
rows = session.exec(stmt.order_by(TopicItem.id.desc()).limit(normalized_limit + 1)).all()
next_cursor: Optional[int] = None
if len(rows) > normalized_limit:
next_cursor = rows[-1].id
rows = rows[:normalized_limit]
return {
"bot_id": bot_id,
"topic_key": normalized_topic_key or None,
"items": [_topic_item_to_dict(row) for row in rows],
"next_cursor": next_cursor,
"unread_count": _count_topic_items(session, bot_id, normalized_topic_key, unread_only=True),
"total_unread_count": _count_topic_items(session, bot_id, unread_only=True),
}
def get_topic_item_stats(session: Session, *, bot_id: str) -> Dict[str, Any]:
_get_bot_or_404(session, bot_id)
latest_item = session.exec(
select(TopicItem)
.where(TopicItem.bot_id == bot_id)
.order_by(TopicItem.id.desc())
.limit(1)
).first()
return {
"bot_id": bot_id,
"total_count": _count_topic_items(session, bot_id),
"unread_count": _count_topic_items(session, bot_id, unread_only=True),
"latest_item_id": int(latest_item.id or 0) if latest_item and latest_item.id else None,
}
def mark_topic_item_read(session: Session, *, bot_id: str, item_id: int) -> Dict[str, Any]:
_get_bot_or_404(session, bot_id)
row = session.exec(
select(TopicItem)
.where(TopicItem.bot_id == bot_id)
.where(TopicItem.id == item_id)
.limit(1)
).first()
if not row:
raise HTTPException(status_code=404, detail="Topic item not found")
if not bool(row.is_read):
row.is_read = True
session.add(row)
session.commit()
session.refresh(row)
return {
"status": "updated",
"bot_id": bot_id,
"item": _topic_item_to_dict(row),
}
def delete_topic_item(session: Session, *, bot_id: str, item_id: int) -> Dict[str, Any]:
_get_bot_or_404(session, bot_id)
row = session.exec(
select(TopicItem)
.where(TopicItem.bot_id == bot_id)
.where(TopicItem.id == item_id)
.limit(1)
).first()
if not row:
raise HTTPException(status_code=404, detail="Topic item not found")
payload = _topic_item_to_dict(row)
session.delete(row)
session.commit()
return {
"status": "deleted",
"bot_id": bot_id,
"item": payload,
}
def publish_topic_item(session: Session, bot_id: str, payload: Dict[str, Any]) -> Dict[str, Any]:
_get_bot_or_404(session, bot_id)
return _topic_publish_internal(session, bot_id, payload)

View File

@ -0,0 +1,95 @@
import base64
import hashlib
import hmac
import json
import time
from typing import Any, Dict, Optional
from core.settings import WORKSPACE_PREVIEW_SIGNING_SECRET, WORKSPACE_PREVIEW_TOKEN_TTL_SECONDS
HTML_PREVIEW_EXTENSIONS = {".html", ".htm"}
def _b64url_encode(raw: bytes) -> str:
return base64.urlsafe_b64encode(raw).decode("ascii").rstrip("=")
def _b64url_decode(raw: str) -> bytes:
padding = "=" * (-len(raw) % 4)
return base64.urlsafe_b64decode(f"{raw}{padding}".encode("ascii"))
def normalize_workspace_preview_path(path: str) -> str:
return "/".join(part for part in str(path or "").strip().replace("\\", "/").split("/") if part)
def is_html_preview_path(path: str) -> bool:
normalized = normalize_workspace_preview_path(path).lower()
return any(normalized.endswith(ext) for ext in HTML_PREVIEW_EXTENSIONS)
def create_workspace_preview_token(bot_id: str, path: str, ttl_seconds: Optional[int] = None) -> Dict[str, Any]:
normalized_bot_id = str(bot_id or "").strip()
normalized_path = normalize_workspace_preview_path(path)
ttl = max(60, min(int(ttl_seconds or WORKSPACE_PREVIEW_TOKEN_TTL_SECONDS), 86400))
expires_at = int(time.time()) + ttl
payload = {
"bot_id": normalized_bot_id,
"entry_path": normalized_path,
"exp": expires_at,
"kind": "workspace-preview-session",
}
payload_json = json.dumps(payload, ensure_ascii=False, separators=(",", ":"), sort_keys=True).encode("utf-8")
body = _b64url_encode(payload_json)
signature = hmac.new(
WORKSPACE_PREVIEW_SIGNING_SECRET.encode("utf-8"),
body.encode("ascii"),
hashlib.sha256,
).digest()
return {
"token": f"{body}.{_b64url_encode(signature)}",
"expires_at": expires_at,
"ttl_seconds": ttl,
}
def resolve_workspace_preview_token(token: str) -> Optional[Dict[str, Any]]:
raw_token = str(token or "").strip()
if not raw_token or "." not in raw_token:
return None
body, signature_raw = raw_token.split(".", 1)
expected_signature = hmac.new(
WORKSPACE_PREVIEW_SIGNING_SECRET.encode("utf-8"),
body.encode("ascii"),
hashlib.sha256,
).digest()
try:
provided_signature = _b64url_decode(signature_raw)
except Exception:
return None
if not hmac.compare_digest(expected_signature, provided_signature):
return None
try:
payload = json.loads(_b64url_decode(body).decode("utf-8"))
except Exception:
return None
if not isinstance(payload, dict):
return None
if str(payload.get("kind") or "") != "workspace-preview-session":
return None
bot_id = str(payload.get("bot_id") or "").strip()
entry_path = normalize_workspace_preview_path(str(payload.get("entry_path") or ""))
if not bot_id or not is_html_preview_path(entry_path):
return None
try:
expires_at = int(payload.get("exp") or 0)
except Exception:
return None
if expires_at < int(time.time()):
return None
return {
"bot_id": bot_id,
"entry_path": entry_path,
"expires_at": expires_at,
}

View File

@ -0,0 +1,504 @@
import mimetypes
import os
import re
from datetime import datetime
from typing import Any, Dict, Generator, List, Optional
from urllib.parse import quote
from fastapi import HTTPException, Request, UploadFile
from fastapi.responses import FileResponse, RedirectResponse, Response, StreamingResponse
from core.utils import _workspace_stat_ctime_iso
from services.bot_storage_service import get_bot_workspace_root
from services.platform_settings_service import get_platform_settings_snapshot
from services.workspace_preview_token_service import (
create_workspace_preview_token,
is_html_preview_path,
normalize_workspace_preview_path,
resolve_workspace_preview_token,
)
TEXT_PREVIEW_EXTENSIONS = {
"",
".md",
".txt",
".log",
".json",
".yaml",
".yml",
".cfg",
".ini",
".csv",
".tsv",
".toml",
".py",
".sh",
}
MARKDOWN_EXTENSIONS = {".md", ".markdown"}
def _is_hidden_workspace_name(name: str) -> bool:
return str(name or "").startswith(".")
def _resolve_workspace_path(bot_id: str, rel_path: Optional[str] = None) -> tuple[str, str]:
root = get_bot_workspace_root(bot_id)
rel = (rel_path or "").strip().replace("\\", "/")
target = os.path.abspath(os.path.join(root, rel))
if os.path.commonpath([root, target]) != root:
raise HTTPException(status_code=400, detail="invalid workspace path")
return root, target
def resolve_workspace_path(bot_id: str, rel_path: Optional[str] = None) -> tuple[str, str]:
return _resolve_workspace_path(bot_id, rel_path)
def _write_text_atomic(target: str, content: str) -> None:
os.makedirs(os.path.dirname(target), exist_ok=True)
tmp = f"{target}.tmp"
with open(tmp, "w", encoding="utf-8") as fh:
fh.write(content)
os.replace(tmp, target)
def _build_workspace_tree(path: str, root: str, depth: int) -> List[Dict[str, Any]]:
rows: List[Dict[str, Any]] = []
try:
names = sorted(os.listdir(path), key=lambda v: (not os.path.isdir(os.path.join(path, v)), v.lower()))
except FileNotFoundError:
return rows
for name in names:
if _is_hidden_workspace_name(name):
continue
abs_path = os.path.join(path, name)
rel_path = os.path.relpath(abs_path, root).replace("\\", "/")
stat = os.stat(abs_path)
base: Dict[str, Any] = {
"name": name,
"path": rel_path,
"ctime": _workspace_stat_ctime_iso(stat),
"mtime": datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z",
}
if os.path.isdir(abs_path):
node = {**base, "type": "dir"}
if depth > 0:
node["children"] = _build_workspace_tree(abs_path, root, depth - 1)
rows.append(node)
continue
rows.append(
{
**base,
"type": "file",
"size": stat.st_size,
"ext": os.path.splitext(name)[1].lower(),
}
)
return rows
def _list_workspace_dir(path: str, root: str) -> List[Dict[str, Any]]:
rows: List[Dict[str, Any]] = []
names = sorted(os.listdir(path), key=lambda v: (not os.path.isdir(os.path.join(path, v)), v.lower()))
for name in names:
if _is_hidden_workspace_name(name):
continue
abs_path = os.path.join(path, name)
rel_path = os.path.relpath(abs_path, root).replace("\\", "/")
stat = os.stat(abs_path)
rows.append(
{
"name": name,
"path": rel_path,
"type": "dir" if os.path.isdir(abs_path) else "file",
"size": stat.st_size if os.path.isfile(abs_path) else None,
"ext": os.path.splitext(name)[1].lower() if os.path.isfile(abs_path) else "",
"ctime": _workspace_stat_ctime_iso(stat),
"mtime": datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z",
}
)
return rows
def _list_workspace_dir_recursive(path: str, root: str) -> List[Dict[str, Any]]:
rows: List[Dict[str, Any]] = []
for walk_root, dirnames, filenames in os.walk(path):
dirnames[:] = [name for name in dirnames if not _is_hidden_workspace_name(name)]
filenames = [name for name in filenames if not _is_hidden_workspace_name(name)]
dirnames.sort(key=lambda v: v.lower())
filenames.sort(key=lambda v: v.lower())
for name in dirnames:
abs_path = os.path.join(walk_root, name)
rel_path = os.path.relpath(abs_path, root).replace("\\", "/")
stat = os.stat(abs_path)
rows.append(
{
"name": name,
"path": rel_path,
"type": "dir",
"size": None,
"ext": "",
"ctime": _workspace_stat_ctime_iso(stat),
"mtime": datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z",
}
)
for name in filenames:
abs_path = os.path.join(walk_root, name)
rel_path = os.path.relpath(abs_path, root).replace("\\", "/")
stat = os.stat(abs_path)
rows.append(
{
"name": name,
"path": rel_path,
"type": "file",
"size": stat.st_size,
"ext": os.path.splitext(name)[1].lower(),
"ctime": _workspace_stat_ctime_iso(stat),
"mtime": datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z",
}
)
rows.sort(key=lambda v: (v.get("type") != "dir", str(v.get("path", "")).lower()))
return rows
def _stream_file_range(target: str, start: int, end: int, chunk_size: int = 1024 * 1024) -> Generator[bytes, None, None]:
with open(target, "rb") as fh:
fh.seek(start)
remaining = end - start + 1
while remaining > 0:
chunk = fh.read(min(chunk_size, remaining))
if not chunk:
break
remaining -= len(chunk)
yield chunk
def _build_ranged_workspace_response(target: str, media_type: str, range_header: str) -> Response:
file_size = os.path.getsize(target)
range_match = re.match(r"bytes=(\d*)-(\d*)", range_header.strip())
if not range_match:
raise HTTPException(status_code=416, detail="Invalid range")
start_raw, end_raw = range_match.groups()
if start_raw == "" and end_raw == "":
raise HTTPException(status_code=416, detail="Invalid range")
if start_raw == "":
length = int(end_raw)
if length <= 0:
raise HTTPException(status_code=416, detail="Invalid range")
start = max(file_size - length, 0)
end = file_size - 1
else:
start = int(start_raw)
end = int(end_raw) if end_raw else file_size - 1
if start >= file_size or start < 0:
raise HTTPException(status_code=416, detail="Requested range not satisfiable")
end = min(end, file_size - 1)
if end < start:
raise HTTPException(status_code=416, detail="Requested range not satisfiable")
content_length = end - start + 1
headers = {
"Accept-Ranges": "bytes",
"Content-Range": f"bytes {start}-{end}/{file_size}",
"Content-Length": str(content_length),
}
return StreamingResponse(
_stream_file_range(target, start, end),
status_code=206,
media_type=media_type or "application/octet-stream",
headers=headers,
)
def _build_workspace_raw_url(bot_id: str, path: str, public: bool) -> str:
normalized = "/".join(part for part in str(path or "").strip().split("/") if part)
if not normalized:
return ""
prefix = "/public" if public else "/api"
return f"{prefix}/bots/{quote(bot_id, safe='')}/workspace/raw/{quote(normalized, safe='/')}"
def create_workspace_html_preview_url(
bot_id: str,
*,
path: str,
ttl_seconds: Optional[int] = None,
) -> Dict[str, Any]:
normalized = normalize_workspace_preview_path(path)
if not normalized:
raise HTTPException(status_code=400, detail="workspace path is required")
if not is_html_preview_path(normalized):
raise HTTPException(status_code=400, detail="signed preview URLs are only supported for html files")
_root, target = _resolve_workspace_path(bot_id, normalized)
if not os.path.isfile(target):
raise HTTPException(status_code=404, detail="workspace file not found")
token_data = create_workspace_preview_token(bot_id, normalized, ttl_seconds=ttl_seconds)
return {
"url": f"/api/preview/workspace/{quote(token_data['token'], safe='')}/{quote(normalized, safe='/')}",
"expires_at": token_data["expires_at"],
"ttl_seconds": token_data["ttl_seconds"],
}
def _serve_workspace_file(
*,
bot_id: str,
path: str,
download: bool,
request: Request,
public: bool = False,
redirect_html_to_raw: bool = False,
) -> Response:
_root, target = _resolve_workspace_path(bot_id, path)
if not os.path.isfile(target):
raise HTTPException(status_code=404, detail="File not found")
media_type, _ = mimetypes.guess_type(target)
if redirect_html_to_raw and not download and str(media_type or "").startswith("text/html"):
raw_url = _build_workspace_raw_url(bot_id, path, public=public)
if raw_url:
return RedirectResponse(url=raw_url, status_code=307)
range_header = request.headers.get("range", "") if request else ""
if range_header and not download:
return _build_ranged_workspace_response(target, media_type or "application/octet-stream", range_header)
common_headers = {"Accept-Ranges": "bytes"}
if download:
return FileResponse(
target,
media_type=media_type or "application/octet-stream",
filename=os.path.basename(target),
headers=common_headers,
)
return FileResponse(target, media_type=media_type or "application/octet-stream", headers=common_headers)
def get_workspace_tree_data(
bot_id: str,
*,
path: Optional[str] = None,
recursive: bool = False,
) -> Dict[str, Any]:
root = get_bot_workspace_root(bot_id)
if not os.path.isdir(root):
return {"bot_id": bot_id, "root": root, "cwd": "", "parent": None, "entries": []}
_, target = _resolve_workspace_path(bot_id, path)
if not os.path.isdir(target):
raise HTTPException(status_code=400, detail="workspace path is not a directory")
cwd = os.path.relpath(target, root).replace("\\", "/")
if cwd == ".":
cwd = ""
parent = None
if cwd:
parent = os.path.dirname(cwd).replace("\\", "/")
if parent == ".":
parent = ""
return {
"bot_id": bot_id,
"root": root,
"cwd": cwd,
"parent": parent,
"entries": _list_workspace_dir_recursive(target, root) if recursive else _list_workspace_dir(target, root),
}
def read_workspace_text_file(
bot_id: str,
*,
path: str,
max_bytes: int = 200000,
) -> Dict[str, Any]:
root, target = _resolve_workspace_path(bot_id, path)
if not os.path.isfile(target):
raise HTTPException(status_code=404, detail="workspace file not found")
ext = os.path.splitext(target)[1].lower()
if ext not in TEXT_PREVIEW_EXTENSIONS:
raise HTTPException(status_code=400, detail=f"unsupported file type: {ext or '(none)'}")
safe_max = max(4096, min(int(max_bytes), 1000000))
with open(target, "rb") as file:
raw = file.read(safe_max + 1)
if b"\x00" in raw:
raise HTTPException(status_code=400, detail="binary file is not previewable")
truncated = len(raw) > safe_max
body = raw[:safe_max] if truncated else raw
rel_path = os.path.relpath(target, root).replace("\\", "/")
return {
"bot_id": bot_id,
"path": rel_path,
"size": os.path.getsize(target),
"is_markdown": ext in MARKDOWN_EXTENSIONS,
"truncated": truncated,
"content": body.decode("utf-8", errors="replace"),
}
def update_workspace_markdown_file(
bot_id: str,
*,
path: str,
content: str,
) -> Dict[str, Any]:
root, target = _resolve_workspace_path(bot_id, path)
if not os.path.isfile(target):
raise HTTPException(status_code=404, detail="workspace file not found")
ext = os.path.splitext(target)[1].lower()
if ext not in MARKDOWN_EXTENSIONS:
raise HTTPException(
status_code=400,
detail=f"editing is only supported for markdown files: {ext or '(none)'}",
)
normalized_content = str(content or "")
encoded = normalized_content.encode("utf-8")
if len(encoded) > 2_000_000:
raise HTTPException(status_code=413, detail="markdown file too large to save")
if "\x00" in normalized_content:
raise HTTPException(status_code=400, detail="markdown content contains invalid null bytes")
_write_text_atomic(target, normalized_content)
rel_path = os.path.relpath(target, root).replace("\\", "/")
return {
"bot_id": bot_id,
"path": rel_path,
"size": os.path.getsize(target),
"is_markdown": True,
"truncated": False,
"content": normalized_content,
}
def serve_workspace_file(
*,
bot_id: str,
path: str,
download: bool,
request: Request,
public: bool = False,
redirect_html_to_raw: bool = False,
) -> Response:
return _serve_workspace_file(
bot_id=bot_id,
path=path,
download=download,
request=request,
public=public,
redirect_html_to_raw=redirect_html_to_raw,
)
def serve_workspace_preview_file(
*,
preview_token: str,
path: str,
request: Request,
) -> Response:
token_data = resolve_workspace_preview_token(preview_token)
if not token_data:
raise HTTPException(status_code=401, detail="Invalid or expired preview token")
normalized = normalize_workspace_preview_path(path)
if not normalized:
raise HTTPException(status_code=400, detail="workspace path is required")
return _serve_workspace_file(
bot_id=str(token_data["bot_id"]),
path=normalized,
download=False,
request=request,
public=True,
redirect_html_to_raw=False,
)
def _sanitize_upload_filename(original_name: str) -> str:
name = os.path.basename(original_name).replace("\\", "_").replace("/", "_")
name = re.sub(r"[^\w.\-()+@ ]+", "_", name)
return name or "upload.bin"
async def upload_workspace_files_to_workspace(
bot_id: str,
*,
files: List[UploadFile],
path: Optional[str] = None,
) -> Dict[str, Any]:
if not files:
raise HTTPException(status_code=400, detail="no files uploaded")
platform_settings = get_platform_settings_snapshot()
max_bytes = platform_settings.upload_max_mb * 1024 * 1024
allowed_extensions = set(platform_settings.allowed_attachment_extensions)
root, upload_dir = _resolve_workspace_path(bot_id, path or "uploads")
os.makedirs(upload_dir, exist_ok=True)
safe_dir_real = os.path.abspath(upload_dir)
if os.path.commonpath([root, safe_dir_real]) != root:
raise HTTPException(status_code=400, detail="invalid upload target path")
rows: List[Dict[str, Any]] = []
for upload in files:
original = (upload.filename or "upload.bin").strip() or "upload.bin"
name = _sanitize_upload_filename(original)
ext = str(os.path.splitext(name)[1] or "").strip().lower()
if allowed_extensions and ext not in allowed_extensions:
raise HTTPException(
status_code=400,
detail=f"File '{name}' extension is not allowed. Allowed: {', '.join(sorted(allowed_extensions))}",
)
abs_path = os.path.join(safe_dir_real, name)
if os.path.exists(abs_path):
base, file_ext = os.path.splitext(name)
name = f"{base}-{int(datetime.utcnow().timestamp())}{file_ext}"
abs_path = os.path.join(safe_dir_real, name)
total_size = 0
try:
with open(abs_path, "wb") as file:
while True:
chunk = await upload.read(1024 * 1024)
if not chunk:
break
total_size += len(chunk)
if total_size > max_bytes:
raise HTTPException(
status_code=413,
detail=f"File '{name}' too large (max {max_bytes // (1024 * 1024)}MB)",
)
file.write(chunk)
except HTTPException:
if os.path.exists(abs_path):
os.remove(abs_path)
raise
except OSError as exc:
if os.path.exists(abs_path):
os.remove(abs_path)
raise HTTPException(
status_code=500,
detail=f"Failed to write file '{name}': {exc.strerror or str(exc)}",
)
except Exception:
if os.path.exists(abs_path):
os.remove(abs_path)
raise HTTPException(status_code=500, detail=f"Failed to upload file '{name}'")
finally:
await upload.close()
rel_path = os.path.relpath(abs_path, root).replace("\\", "/")
rows.append({"name": name, "path": rel_path, "size": total_size})
return {"bot_id": bot_id, "files": rows}

View File

@ -0,0 +1,352 @@
import sys
import tempfile
import types
import unittest
from unittest.mock import MagicMock, patch
docker_stub = types.ModuleType("docker")
docker_stub.errors = types.SimpleNamespace(
ImageNotFound=type("ImageNotFound", (Exception,), {}),
NotFound=type("NotFound", (Exception,), {}),
)
sys.modules.setdefault("docker", docker_stub)
from core.docker_manager import BotDockerManager
class BotDockerManagerTests(unittest.TestCase):
def setUp(self) -> None:
self._tmpdir = tempfile.TemporaryDirectory()
def tearDown(self) -> None:
self._tmpdir.cleanup()
def _make_manager(self) -> BotDockerManager:
manager = BotDockerManager.__new__(BotDockerManager)
manager.client = MagicMock()
manager.host_data_root = self._tmpdir.name
manager.base_image = "nanobot-base"
manager.network_name = ""
manager.active_monitors = {}
manager._last_delivery_error = {}
manager._storage_limit_supported = True
manager._storage_limit_warning_emitted = False
return manager
@staticmethod
def _build_container(
*,
status: str,
image: str,
image_id: str | None = None,
nano_cpus: int,
memory_bytes: int,
storage_opt_size: str,
source_mount: str,
network_name: str,
bootstrap_label: str | None = "env-json-v1",
) -> MagicMock:
actual_image_id = image_id or image
container = MagicMock()
container.status = status
container.reload = MagicMock()
container.start = MagicMock()
container.stop = MagicMock()
container.remove = MagicMock()
container.image = types.SimpleNamespace(id=actual_image_id)
container.attrs = {
"Image": actual_image_id,
"Config": {
"Image": image,
"Labels": (
{"dashboard.runtime_bootstrap": bootstrap_label}
if bootstrap_label is not None
else {}
),
},
"HostConfig": {
"NanoCpus": nano_cpus,
"Memory": memory_bytes,
"StorageOpt": {"size": storage_opt_size},
},
"Mounts": [
{
"Source": source_mount,
"Destination": "/root/.nanobot",
"RW": True,
}
],
"NetworkSettings": {
"Networks": {network_name: {"IPAddress": "172.18.0.2"}},
},
}
return container
def test_stop_bot_keeps_container_by_default(self) -> None:
manager = self._make_manager()
container = MagicMock()
container.status = "running"
container.reload = MagicMock()
container.stop = MagicMock()
container.remove = MagicMock()
manager.client.containers.get.return_value = container
result = manager.stop_bot("demo")
self.assertTrue(result)
container.stop.assert_called_once_with(timeout=5)
container.remove.assert_not_called()
def test_stop_bot_remove_true_deletes_container(self) -> None:
manager = self._make_manager()
container = MagicMock()
container.status = "exited"
container.reload = MagicMock()
container.stop = MagicMock()
container.remove = MagicMock()
manager.client.containers.get.return_value = container
result = manager.stop_bot("demo", remove=True)
self.assertTrue(result)
container.stop.assert_not_called()
container.remove.assert_called_once_with()
def test_start_bot_reuses_compatible_stopped_container(self) -> None:
manager = self._make_manager()
image_tag = "nanobot-base:v1"
image_id = "sha256:img-v1"
workspace_mount = f"{self._tmpdir.name}/demo/.nanobot"
container = self._build_container(
status="exited",
image=image_tag,
image_id=image_id,
nano_cpus=1_000_000_000,
memory_bytes=1024 * 1024 * 1024,
storage_opt_size="10G",
source_mount=workspace_mount,
network_name="bridge",
)
manager.client.images.get.return_value = types.SimpleNamespace(id=image_id)
manager.client.containers.get.return_value = container
result = manager.start_bot(
"demo",
image_tag=image_tag,
env_vars={"TZ": "UTC", "API_KEY": "updated-secret"},
cpu_cores=1.0,
memory_mb=1024,
storage_gb=10,
)
self.assertTrue(result)
container.start.assert_called_once_with()
container.remove.assert_not_called()
manager.client.containers.run.assert_not_called()
def test_start_bot_recreates_incompatible_stopped_container(self) -> None:
manager = self._make_manager()
image_tag = "nanobot-base:v1"
desired_image_id = "sha256:img-v1"
workspace_mount = f"{self._tmpdir.name}/demo/.nanobot"
container = self._build_container(
status="exited",
image="nanobot-base:old",
image_id="sha256:img-old",
nano_cpus=1_000_000_000,
memory_bytes=1024 * 1024 * 1024,
storage_opt_size="10G",
source_mount=workspace_mount,
network_name="bridge",
)
manager.client.images.get.return_value = types.SimpleNamespace(id=desired_image_id)
manager.client.containers.get.return_value = container
manager._run_container_with_storage_fallback = MagicMock(return_value=MagicMock())
result = manager.start_bot(
"demo",
image_tag=image_tag,
env_vars={"TZ": "Asia/Shanghai"},
cpu_cores=1.0,
memory_mb=1024,
storage_gb=10,
)
self.assertTrue(result)
container.start.assert_not_called()
container.remove.assert_called_once_with(force=True)
manager._run_container_with_storage_fallback.assert_called_once()
def test_start_bot_recreates_stopped_container_when_image_id_changes_under_same_tag(self) -> None:
manager = self._make_manager()
image_tag = "nanobot-base:v1"
workspace_mount = f"{self._tmpdir.name}/demo/.nanobot"
container = self._build_container(
status="exited",
image=image_tag,
image_id="sha256:img-old",
nano_cpus=1_000_000_000,
memory_bytes=1024 * 1024 * 1024,
storage_opt_size="10G",
source_mount=workspace_mount,
network_name="bridge",
)
manager.client.images.get.return_value = types.SimpleNamespace(id="sha256:img-new")
manager.client.containers.get.return_value = container
manager._run_container_with_storage_fallback = MagicMock(return_value=MagicMock())
result = manager.start_bot(
"demo",
image_tag=image_tag,
env_vars={"TZ": "Asia/Shanghai"},
cpu_cores=1.0,
memory_mb=1024,
storage_gb=10,
)
self.assertTrue(result)
container.start.assert_not_called()
container.remove.assert_called_once_with(force=True)
manager._run_container_with_storage_fallback.assert_called_once()
def test_start_bot_recreates_container_without_new_entrypoint(self) -> None:
manager = self._make_manager()
image_tag = "nanobot-base:v1"
image_id = "sha256:img-v1"
workspace_mount = f"{self._tmpdir.name}/demo/.nanobot"
container = self._build_container(
status="exited",
image=image_tag,
image_id=image_id,
nano_cpus=1_000_000_000,
memory_bytes=1024 * 1024 * 1024,
storage_opt_size="10G",
source_mount=workspace_mount,
network_name="bridge",
bootstrap_label=None,
)
manager.client.images.get.return_value = types.SimpleNamespace(id=image_id)
manager.client.containers.get.return_value = container
manager._run_container_with_storage_fallback = MagicMock(return_value=MagicMock())
result = manager.start_bot(
"demo",
image_tag=image_tag,
env_vars={"TZ": "Asia/Shanghai"},
cpu_cores=1.0,
memory_mb=1024,
storage_gb=10,
)
self.assertTrue(result)
container.start.assert_not_called()
container.remove.assert_called_once_with(force=True)
manager._run_container_with_storage_fallback.assert_called_once()
def test_start_bot_recreates_running_container_when_image_id_changes_under_same_tag(self) -> None:
manager = self._make_manager()
image_tag = "nanobot-base:v1"
workspace_mount = f"{self._tmpdir.name}/demo/.nanobot"
container = self._build_container(
status="running",
image=image_tag,
image_id="sha256:img-old",
nano_cpus=1_000_000_000,
memory_bytes=1024 * 1024 * 1024,
storage_opt_size="10G",
source_mount=workspace_mount,
network_name="bridge",
)
manager.client.images.get.return_value = types.SimpleNamespace(id="sha256:img-new")
manager.client.containers.get.return_value = container
manager._run_container_with_storage_fallback = MagicMock(return_value=MagicMock())
result = manager.start_bot(
"demo",
image_tag=image_tag,
env_vars={"TZ": "Asia/Shanghai"},
cpu_cores=1.0,
memory_mb=1024,
storage_gb=10,
)
self.assertTrue(result)
container.remove.assert_called_once_with(force=True)
manager._run_container_with_storage_fallback.assert_called_once()
def test_send_command_waits_for_dashboard_ready(self) -> None:
manager = self._make_manager()
manager._wait_for_dashboard_ready = MagicMock(return_value=True)
manager._send_command_via_exec = MagicMock(return_value=True)
result = manager.send_command("demo", "hello")
self.assertTrue(result)
manager._wait_for_dashboard_ready.assert_called_once_with("demo")
manager._send_command_via_exec.assert_called_once_with("demo", "hello", [])
def test_send_command_returns_false_when_dashboard_never_becomes_ready(self) -> None:
manager = self._make_manager()
def _wait_timeout(bot_id: str) -> bool:
manager._last_delivery_error[bot_id] = "Dashboard channel was not ready within 15s"
return False
manager._wait_for_dashboard_ready = MagicMock(side_effect=_wait_timeout)
manager._send_command_via_exec = MagicMock()
manager._send_command_via_host_http = MagicMock()
result = manager.send_command("demo", "hello")
self.assertFalse(result)
manager._send_command_via_exec.assert_not_called()
manager._send_command_via_host_http.assert_not_called()
self.assertEqual(
manager.get_last_delivery_error("demo"),
"Dashboard channel was not ready within 15s",
)
def test_wait_for_dashboard_ready_returns_true_after_start_log(self) -> None:
manager = self._make_manager()
manager.get_bot_status = MagicMock(return_value="RUNNING")
manager.get_recent_logs = MagicMock(
side_effect=[
["Agent loop started"],
["2026-04-25 | INFO | nanobot.channels.dashboard:start:66 - ready"],
]
)
with patch("core.docker_manager.time.sleep", return_value=None):
result = manager._wait_for_dashboard_ready(
"demo",
timeout_seconds=2.0,
poll_interval_seconds=0.1,
)
self.assertTrue(result)
def test_wait_for_dashboard_ready_sets_timeout_error(self) -> None:
manager = self._make_manager()
manager.get_bot_status = MagicMock(return_value="RUNNING")
manager.get_recent_logs = MagicMock(return_value=["Agent loop started"])
time_values = iter([0.0, 0.2, 0.4, 1.2])
with (
patch("core.docker_manager.time.monotonic", side_effect=lambda: next(time_values)),
patch("core.docker_manager.time.sleep", return_value=None),
):
result = manager._wait_for_dashboard_ready(
"demo",
timeout_seconds=1.0,
poll_interval_seconds=0.1,
)
self.assertFalse(result)
self.assertEqual(
manager.get_last_delivery_error("demo"),
"Dashboard channel was not ready within 1s",
)
if __name__ == "__main__":
unittest.main()

View File

@ -1,30 +1,86 @@
FROM python:3.12-slim
FROM python:3.12-slim AS builder
ENV PYTHONUNBUFFERED=1
ENV LANG=C.UTF-8
ENV LC_ALL=C.UTF-8
ENV PYTHONIOENCODING=utf-8
ENV PATH=/opt/venv/bin:$PATH
# 1. 替换 Debian 源为国内镜像
RUN sed -i 's/deb.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list.d/debian.sources && \
sed -i 's/security.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list.d/debian.sources
# 2. 安装基础依赖
# 2. 仅在构建阶段安装编译依赖
RUN apt-get update && apt-get install -y --no-install-recommends \
curl \
gcc \
libpq-dev \
&& rm -rf /var/lib/apt/lists/*
# 3. 安装 aiohttp 和基础 python 工具
RUN python -m pip install --no-cache-dir -i https://mirrors.aliyun.com/pypi/simple/ --upgrade \
pip setuptools wheel aiohttp
RUN python -m venv /opt/venv
WORKDIR /app
# 这一步会把您修改好的 nanobot/channels/dashboard.py 一起拷进去
COPY . /app
COPY pyproject.toml README.md LICENSE THIRD_PARTY_NOTICES.md ./
# 4. 安装 nanobot
RUN pip install --no-cache-dir -i https://mirrors.aliyun.com/pypi/simple/ .
# 3. 先安装第三方依赖。该层只依赖 pyproject.toml源码改动不会触发整套依赖重装。
RUN --mount=type=cache,target=/root/.cache/pip \
python -m pip install -i https://mirrors.aliyun.com/pypi/simple/ --upgrade \
--no-compile pip setuptools wheel aiohttp hatchling && \
python -c 'import tomllib; data=tomllib.load(open("pyproject.toml","rb")); deps=list(data["project"].get("dependencies", [])); deps.extend(data["project"].get("optional-dependencies", {}).get("wecom", [])); print("\n".join(deps))' > /tmp/requirements.txt && \
pip install --no-compile -i https://mirrors.aliyun.com/pypi/simple/ -r /tmp/requirements.txt && \
rm -f /tmp/requirements.txt
COPY nanobot/ nanobot/
COPY bridge/ bridge/
# 4. 源码层只安装 nanobot 本体,不重复解析/下载第三方依赖。
RUN --mount=type=cache,target=/root/.cache/pip \
pip install --no-compile --no-deps --no-build-isolation . && \
find /opt/venv -type d -name __pycache__ -prune -exec rm -rf {} + && \
find /opt/venv -name '*.pyc' -delete
FROM python:3.12-slim
ENV PYTHONUNBUFFERED=1
ENV LANG=C.UTF-8
ENV LC_ALL=C.UTF-8
ENV PYTHONIOENCODING=utf-8
ENV PATH=/opt/venv/bin:$PATH
ARG INSTALL_EXTRA_CLI=false
# 1. 替换 Debian 源为国内镜像
RUN sed -i 's/deb.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list.d/debian.sources && \
sed -i 's/security.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list.d/debian.sources
# 2. 安装基础运行时依赖
RUN apt-get update && apt-get install -y --no-install-recommends \
bubblewrap \
ca-certificates \
curl \
git \
openssh-client \
tmux \
&& git config --global --add url."https://github.com/".insteadOf ssh://git@github.com/ \
&& git config --global --add url."https://github.com/".insteadOf git@github.com: \
&& rm -rf /var/lib/apt/lists/*
# 3. Node.js 与 GitHub CLI 只在需要相关工具时安装,默认跳过以避免访问外部 apt 源。
RUN if [ "$INSTALL_EXTRA_CLI" = "true" ]; then \
apt-get update && apt-get install -y --no-install-recommends gnupg \
&& mkdir -p /etc/apt/keyrings /etc/apt/sources.list.d \
&& curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg \
&& echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_22.x nodistro main" > /etc/apt/sources.list.d/nodesource.list \
&& curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg > /etc/apt/keyrings/githubcli-archive-keyring.gpg \
&& chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg \
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" > /etc/apt/sources.list.d/github-cli.list \
&& apt-get update && apt-get install -y --no-install-recommends \
gh \
nodejs \
&& apt-get purge -y --auto-remove gnupg \
&& rm -rf /var/lib/apt/lists/*; \
else \
echo "Skipping optional Node.js and GitHub CLI"; \
fi
# 4. 仅复制已安装好的运行环境,避免把源码目录打进最终镜像
COPY --from=builder /opt/venv /opt/venv
WORKDIR /root
# 官方 gateway 模式,现在它会自动加载您的 DashboardChannel

View File

@ -0,0 +1,97 @@
#!/bin/bash
set -e
BASE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# 执行拷贝和打包的核心函数
build_image() {
local dir_name=$1
local version=$2
local image_name="nanobot-base:${version}"
echo "=================================================="
echo "准备构建镜像: ${image_name}"
echo "=================================================="
# 1. 拷贝 Dashboard.Dockerfile
echo ">> [1/3] 拷贝 Dashboard.Dockerfile 到根目录 ..."
cp "${BASE_DIR}/Dashboard.Dockerfile" "${BASE_DIR}/${dir_name}/"
# 2. 拷贝 dashboard.py
echo ">> [2/3] 拷贝 dashboard.py 到 channels 目录 ..."
if [ -d "${BASE_DIR}/${dir_name}/nanobot/channels" ]; then
cp "${BASE_DIR}/dashboard.py" "${BASE_DIR}/${dir_name}/nanobot/channels/"
else
# 兜底创建 nanobot/channels/
mkdir -p "${BASE_DIR}/${dir_name}/nanobot/channels/"
cp "${BASE_DIR}/dashboard.py" "${BASE_DIR}/${dir_name}/nanobot/channels/"
fi
# 3. 执行 Docker build
echo ">> [3/3] 开始打包 Docker 镜像: ${image_name} ..."
cd "${BASE_DIR}/${dir_name}"
DOCKER_BUILDKIT=1 docker build \
--build-arg INSTALL_EXTRA_CLI="${INSTALL_EXTRA_CLI:-false}" \
-f Dashboard.Dockerfile \
-t "${image_name}" \
.
echo "=================================================="
echo "✅ 构建完成: ${image_name}"
echo "=================================================="
}
echo "请选择操作模式:"
echo "1) 从 Git 拉取最新代码并打包 (会覆盖已有同名目录)"
echo "2) 扫描本地已有的目录并打包"
read -p "输入选项 [1/2]: " mode
if [ "$mode" = "1" ]; then
echo "正在从 https://github.com/HKUDS/nanobot.git 获取最新版本号..."
LATEST_TAG=$(git ls-remote --tags https://github.com/HKUDS/nanobot.git | awk -F/ '{print $3}' | grep -v '\^{}$' | sort -V | tail -n1)
if [ -z "$LATEST_TAG" ]; then
echo "获取远程版本号失败,请检查网络或仓库地址。"
exit 1
fi
VERSION="${LATEST_TAG}"
DIR_NAME="nanobot-base-${VERSION}"
if [ -d "${BASE_DIR}/${DIR_NAME}" ]; then
echo ">> 清理已有的目录 ${DIR_NAME} ..."
rm -rf "${BASE_DIR}/${DIR_NAME}"
fi
echo ">> 正在克隆 nanobot (版本: ${VERSION}) ..."
git clone -b "${VERSION}" https://github.com/HKUDS/nanobot.git "${BASE_DIR}/${DIR_NAME}"
build_image "${DIR_NAME}" "${VERSION}"
elif [ "$mode" = "2" ]; then
echo "正在扫描本地目录..."
# 查找 nanobot-base-* 格式的目录
dirs=($(find "${BASE_DIR}" -maxdepth 1 -type d -name "nanobot-base-*" | awk -F/ '{print $NF}'))
if [ ${#dirs[@]} -eq 0 ]; then
echo "未找到任何本地克隆的目录 (格式: nanobot-base-*)。"
exit 1
fi
echo "找到以下本地目录,请选择要打包的目录:"
select DIR_NAME in "${dirs[@]}"; do
if [ -n "$DIR_NAME" ]; then
echo "您选择了: $DIR_NAME"
# 提取版本号,例如从 nanobot-base-v0.1.5 提取 v0.1.5
VERSION=${DIR_NAME#nanobot-base-}
build_image "${DIR_NAME}" "${VERSION}"
break
else
echo "无效的选项,请重新选择。"
fi
done
else
echo "无效的选项,退出。"
exit 1
fi

View File

@ -1,3 +1,4 @@
import asyncio
import json
from types import SimpleNamespace
from typing import Any
@ -51,6 +52,7 @@ class DashboardChannel(BaseChannel):
self.host = host if host is not None else getattr(config_obj, "host", "0.0.0.0")
self.port = port if port is not None else getattr(config_obj, "port", 9000)
self.runner: web.AppRunner | None = None
self._chat_tasks: set[asyncio.Task[Any]] = set()
async def start(self) -> None:
"""启动 Dashboard HTTP 服务"""
@ -70,6 +72,9 @@ class DashboardChannel(BaseChannel):
if self.runner:
await self.runner.cleanup()
self.runner = None
for task in list(self._chat_tasks):
task.cancel()
self._chat_tasks.clear()
self._running = False
logger.info("Dashboard Channel 已下线")
@ -110,12 +115,24 @@ class DashboardChannel(BaseChannel):
# 使用 JSON 格式输出,方便面板后端精准解析,告别正则
print(f"\n__DASHBOARD_DATA_START__{json.dumps(payload, ensure_ascii=False)}__DASHBOARD_DATA_END__\n", flush=True)
async def _dispatch_chat_message(self, user_message: str, media: list[str]) -> None:
try:
await self._handle_message(
sender_id="user",
chat_id="direct",
content=user_message,
media=media,
)
except Exception as e:
logger.error(f"❌ Dashboard Channel 后台处理指令失败: {e}")
async def _handle_chat(self, request: web.Request) -> web.Response:
"""处理来自面板的指令入站"""
try:
data = await request.json()
user_message = data.get("message", "").strip()
media = [str(v).strip().replace("\\", "/") for v in (data.get("media") or []) if str(v).strip()]
user_message = str(data.get("message") or "").strip()
raw_media = data.get("media") or []
media = [str(v).strip().replace("\\", "/") for v in raw_media if str(v).strip()] if isinstance(raw_media, list) else []
if not user_message and not media:
return web.json_response({"status": "error", "reason": "empty message and media"}, status=400)
@ -125,13 +142,10 @@ class DashboardChannel(BaseChannel):
# 调试日志:打印收到的原始消息长度和前 20 个字符,确保中文未乱码
logger.info(f"📥 [Dashboard Channel] 收到指令 (len={len(user_message)}): {user_message[:20]}...")
# 统一走基类入口,兼容不同核心的会话与权限逻辑。
await self._handle_message(
sender_id="user",
chat_id="direct",
content=user_message,
media=media,
)
# 先确认收件,避免面板投递请求被后续 LLM/工具处理链路拖到超时。
task = asyncio.create_task(self._dispatch_chat_message(user_message, media))
self._chat_tasks.add(task)
task.add_done_callback(self._chat_tasks.discard)
return web.json_response({"status": "ok"})
except Exception as e:

View File

@ -1,355 +0,0 @@
"""LiteLLM provider implementation for multi-provider support."""
import hashlib
import os
import secrets
import string
from typing import Any
import json_repair
import litellm
from litellm import acompletion
from loguru import logger
from nanobot.providers.base import LLMProvider, LLMResponse, ToolCallRequest
from nanobot.providers.registry import find_by_model, find_gateway
# Standard chat-completion message keys.
_ALLOWED_MSG_KEYS = frozenset({"role", "content", "tool_calls", "tool_call_id", "name", "reasoning_content"})
_ANTHROPIC_EXTRA_KEYS = frozenset({"thinking_blocks"})
_ALNUM = string.ascii_letters + string.digits
def _short_tool_id() -> str:
"""Generate a 9-char alphanumeric ID compatible with all providers (incl. Mistral)."""
return "".join(secrets.choice(_ALNUM) for _ in range(9))
class LiteLLMProvider(LLMProvider):
"""
LLM provider using LiteLLM for multi-provider support.
Supports OpenRouter, Anthropic, OpenAI, Gemini, MiniMax, and many other providers through
a unified interface. Provider-specific logic is driven by the registry
(see providers/registry.py) no if-elif chains needed here.
"""
def __init__(
self,
api_key: str | None = None,
api_base: str | None = None,
default_model: str = "anthropic/claude-opus-4-5",
extra_headers: dict[str, str] | None = None,
provider_name: str | None = None,
):
super().__init__(api_key, api_base)
self.default_model = default_model
self.extra_headers = extra_headers or {}
# Detect gateway / local deployment.
# provider_name (from config key) is the primary signal;
# api_key / api_base are fallback for auto-detection.
self._gateway = find_gateway(provider_name, api_key, api_base)
# Configure environment variables
if api_key:
self._setup_env(api_key, api_base, default_model)
if api_base:
litellm.api_base = api_base
# Disable LiteLLM logging noise
litellm.suppress_debug_info = True
# Drop unsupported parameters for providers (e.g., gpt-5 rejects some params)
litellm.drop_params = True
self._langsmith_enabled = bool(os.getenv("LANGSMITH_API_KEY"))
def _setup_env(self, api_key: str, api_base: str | None, model: str) -> None:
"""Set environment variables based on detected provider."""
spec = self._gateway or find_by_model(model)
if not spec:
return
if not spec.env_key:
# OAuth/provider-only specs (for example: openai_codex)
return
# Gateway/local overrides existing env; standard provider doesn't
if self._gateway:
os.environ[spec.env_key] = api_key
else:
os.environ.setdefault(spec.env_key, api_key)
# Resolve env_extras placeholders:
# {api_key} → user's API key
# {api_base} → user's api_base, falling back to spec.default_api_base
effective_base = api_base or spec.default_api_base
for env_name, env_val in spec.env_extras:
resolved = env_val.replace("{api_key}", api_key)
resolved = resolved.replace("{api_base}", effective_base)
os.environ.setdefault(env_name, resolved)
def _resolve_model(self, model: str) -> str:
"""Resolve model name by applying provider/gateway prefixes."""
if self._gateway:
prefix = self._gateway.litellm_prefix
if self._gateway.strip_model_prefix:
model = model.split("/")[-1]
if prefix:
model = f"{prefix}/{model}"
return model
# Standard mode: auto-prefix for known providers
spec = find_by_model(model)
if spec and spec.litellm_prefix:
model = self._canonicalize_explicit_prefix(model, spec.name, spec.litellm_prefix)
if not any(model.startswith(s) for s in spec.skip_prefixes):
model = f"{spec.litellm_prefix}/{model}"
return model
@staticmethod
def _canonicalize_explicit_prefix(model: str, spec_name: str, canonical_prefix: str) -> str:
"""Normalize explicit provider prefixes like `github-copilot/...`."""
if "/" not in model:
return model
prefix, remainder = model.split("/", 1)
if prefix.lower().replace("-", "_") != spec_name:
return model
return f"{canonical_prefix}/{remainder}"
def _supports_cache_control(self, model: str) -> bool:
"""Return True when the provider supports cache_control on content blocks."""
if self._gateway is not None:
return self._gateway.supports_prompt_caching
spec = find_by_model(model)
return spec is not None and spec.supports_prompt_caching
def _apply_cache_control(
self,
messages: list[dict[str, Any]],
tools: list[dict[str, Any]] | None,
) -> tuple[list[dict[str, Any]], list[dict[str, Any]] | None]:
"""Return copies of messages and tools with cache_control injected."""
new_messages = []
for msg in messages:
if msg.get("role") == "system":
content = msg["content"]
if isinstance(content, str):
new_content = [{"type": "text", "text": content, "cache_control": {"type": "ephemeral"}}]
else:
new_content = list(content)
new_content[-1] = {**new_content[-1], "cache_control": {"type": "ephemeral"}}
new_messages.append({**msg, "content": new_content})
else:
new_messages.append(msg)
new_tools = tools
if tools:
new_tools = list(tools)
new_tools[-1] = {**new_tools[-1], "cache_control": {"type": "ephemeral"}}
return new_messages, new_tools
def _apply_model_overrides(self, model: str, kwargs: dict[str, Any]) -> None:
"""Apply model-specific parameter overrides from the registry."""
model_lower = model.lower()
spec = find_by_model(model)
if spec:
for pattern, overrides in spec.model_overrides:
if pattern in model_lower:
kwargs.update(overrides)
return
@staticmethod
def _extra_msg_keys(original_model: str, resolved_model: str) -> frozenset[str]:
"""Return provider-specific extra keys to preserve in request messages."""
spec = find_by_model(original_model) or find_by_model(resolved_model)
if (spec and spec.name == "anthropic") or "claude" in original_model.lower() or resolved_model.startswith("anthropic/"):
return _ANTHROPIC_EXTRA_KEYS
return frozenset()
@staticmethod
def _normalize_tool_call_id(tool_call_id: Any) -> Any:
"""Normalize tool_call_id to a provider-safe 9-char alphanumeric form."""
if not isinstance(tool_call_id, str):
return tool_call_id
if len(tool_call_id) == 9 and tool_call_id.isalnum():
return tool_call_id
return hashlib.sha1(tool_call_id.encode()).hexdigest()[:9]
@staticmethod
def _sanitize_messages(messages: list[dict[str, Any]], extra_keys: frozenset[str] = frozenset()) -> list[dict[str, Any]]:
"""Strip non-standard keys and ensure assistant messages have a content key."""
allowed = _ALLOWED_MSG_KEYS | extra_keys
sanitized = LLMProvider._sanitize_request_messages(messages, allowed)
id_map: dict[str, str] = {}
def map_id(value: Any) -> Any:
if not isinstance(value, str):
return value
return id_map.setdefault(value, LiteLLMProvider._normalize_tool_call_id(value))
for clean in sanitized:
# Keep assistant tool_calls[].id and tool tool_call_id in sync after
# shortening, otherwise strict providers reject the broken linkage.
if isinstance(clean.get("tool_calls"), list):
normalized_tool_calls = []
for tc in clean["tool_calls"]:
if not isinstance(tc, dict):
normalized_tool_calls.append(tc)
continue
tc_clean = dict(tc)
tc_clean["id"] = map_id(tc_clean.get("id"))
normalized_tool_calls.append(tc_clean)
clean["tool_calls"] = normalized_tool_calls
if "tool_call_id" in clean and clean["tool_call_id"]:
clean["tool_call_id"] = map_id(clean["tool_call_id"])
return sanitized
async def chat(
self,
messages: list[dict[str, Any]],
tools: list[dict[str, Any]] | None = None,
model: str | None = None,
max_tokens: int = 4096,
temperature: float = 0.7,
reasoning_effort: str | None = None,
tool_choice: str | dict[str, Any] | None = None,
) -> LLMResponse:
"""
Send a chat completion request via LiteLLM.
Args:
messages: List of message dicts with 'role' and 'content'.
tools: Optional list of tool definitions in OpenAI format.
model: Model identifier (e.g., 'anthropic/claude-sonnet-4-5').
max_tokens: Maximum tokens in response.
temperature: Sampling temperature.
Returns:
LLMResponse with content and/or tool calls.
"""
original_model = model or self.default_model
model = self._resolve_model(original_model)
extra_msg_keys = self._extra_msg_keys(original_model, model)
if self._supports_cache_control(original_model):
messages, tools = self._apply_cache_control(messages, tools)
# Clamp max_tokens to at least 1 — negative or zero values cause
# LiteLLM to reject the request with "max_tokens must be at least 1".
max_tokens = max(1, max_tokens)
kwargs: dict[str, Any] = {
"model": model,
"messages": self._sanitize_messages(self._sanitize_empty_content(messages), extra_keys=extra_msg_keys),
"max_tokens": max_tokens,
"temperature": temperature,
}
if self._gateway:
kwargs.update(self._gateway.litellm_kwargs)
# Apply model-specific overrides (e.g. kimi-k2.5 temperature)
self._apply_model_overrides(model, kwargs)
if self._langsmith_enabled:
kwargs.setdefault("callbacks", []).append("langsmith")
# Pass api_key directly — more reliable than env vars alone
if self.api_key:
kwargs["api_key"] = self.api_key
# Pass api_base for custom endpoints
if self.api_base:
kwargs["api_base"] = self.api_base
# Pass extra headers (e.g. APP-Code for AiHubMix)
if self.extra_headers:
kwargs["extra_headers"] = self.extra_headers
if reasoning_effort:
kwargs["reasoning_effort"] = reasoning_effort
kwargs["drop_params"] = True
if tools:
kwargs["tools"] = tools
kwargs["tool_choice"] = tool_choice or "auto"
try:
response = await acompletion(**kwargs)
return self._parse_response(response)
except Exception as e:
# Return error as content for graceful handling
return LLMResponse(
content=f"Error calling LLM: {str(e)}",
finish_reason="error",
)
def _parse_response(self, response: Any) -> LLMResponse:
"""Parse LiteLLM response into our standard format."""
choice = response.choices[0]
message = choice.message
content = message.content
finish_reason = choice.finish_reason
# Some providers (e.g. GitHub Copilot) split content and tool_calls
# across multiple choices. Merge them so tool_calls are not lost.
raw_tool_calls = []
for ch in response.choices:
msg = ch.message
if hasattr(msg, "tool_calls") and msg.tool_calls:
raw_tool_calls.extend(msg.tool_calls)
if ch.finish_reason in ("tool_calls", "stop"):
finish_reason = ch.finish_reason
if not content and msg.content:
content = msg.content
if len(response.choices) > 1:
logger.debug("LiteLLM response has {} choices, merged {} tool_calls",
len(response.choices), len(raw_tool_calls))
tool_calls = []
for tc in raw_tool_calls:
# Parse arguments from JSON string if needed
args = tc.function.arguments
if isinstance(args, str):
args = json_repair.loads(args)
provider_specific_fields = getattr(tc, "provider_specific_fields", None) or None
function_provider_specific_fields = (
getattr(tc.function, "provider_specific_fields", None) or None
)
tool_calls.append(ToolCallRequest(
id=_short_tool_id(),
name=tc.function.name,
arguments=args,
provider_specific_fields=provider_specific_fields,
function_provider_specific_fields=function_provider_specific_fields,
))
usage = {}
if hasattr(response, "usage") and response.usage:
usage = {
"prompt_tokens": response.usage.prompt_tokens,
"completion_tokens": response.usage.completion_tokens,
"total_tokens": response.usage.total_tokens,
}
reasoning_content = getattr(message, "reasoning_content", None) or None
thinking_blocks = getattr(message, "thinking_blocks", None) or None
return LLMResponse(
content=content,
tool_calls=tool_calls,
finish_reason=finish_reason or "stop",
usage=usage,
reasoning_content=reasoning_content,
thinking_blocks=thinking_blocks,
)
def get_default_model(self) -> str:
"""Get the default model."""
return self.default_model

552
bot-images/wecom.py 100644
View File

@ -0,0 +1,552 @@
"""WeCom (Enterprise WeChat) channel implementation using wecom_aibot_sdk."""
import asyncio
import base64
import hashlib
import importlib.util
import os
import re
from collections import OrderedDict
from pathlib import Path
from typing import Any
from loguru import logger
from nanobot.bus.events import OutboundMessage
from nanobot.bus.queue import MessageBus
from nanobot.channels.base import BaseChannel
from nanobot.config.paths import get_media_dir, get_workspace_path
from nanobot.config.schema import Base
from pydantic import Field
WECOM_AVAILABLE = importlib.util.find_spec("wecom_aibot_sdk") is not None
# Upload safety limits (matching QQ channel defaults)
WECOM_UPLOAD_MAX_BYTES = 1024 * 1024 * 200 # 200MB
# Replace unsafe characters with "_", keep Chinese and common safe punctuation.
_SAFE_NAME_RE = re.compile(r"[^\w.\-()\[\]()【】\u4e00-\u9fff]+", re.UNICODE)
def _sanitize_filename(name: str) -> str:
"""Sanitize filename to avoid traversal and problematic chars."""
name = (name or "").strip()
name = Path(name).name
name = _SAFE_NAME_RE.sub("_", name).strip("._ ")
return name
_IMAGE_EXTS = {".jpg", ".jpeg", ".png", ".gif", ".webp", ".bmp"}
_VIDEO_EXTS = {".mp4", ".avi", ".mov"}
_AUDIO_EXTS = {".amr", ".mp3", ".wav", ".ogg"}
def _guess_wecom_media_type(filename: str) -> str:
"""Classify file extension as WeCom media_type string."""
ext = Path(filename).suffix.lower()
if ext in _IMAGE_EXTS:
return "image"
if ext in _VIDEO_EXTS:
return "video"
if ext in _AUDIO_EXTS:
return "voice"
return "file"
class WecomConfig(Base):
"""WeCom (Enterprise WeChat) AI Bot channel configuration."""
enabled: bool = False
bot_id: str = ""
secret: str = ""
allow_from: list[str] = Field(default_factory=list)
welcome_message: str = ""
# Message type display mapping
MSG_TYPE_MAP = {
"image": "[image]",
"voice": "[voice]",
"file": "[file]",
"mixed": "[mixed content]",
}
class WecomChannel(BaseChannel):
"""
WeCom (Enterprise WeChat) channel using WebSocket long connection.
Uses WebSocket to receive events - no public IP or webhook required.
Requires:
- Bot ID and Secret from WeCom AI Bot platform
"""
name = "wecom"
display_name = "WeCom"
@classmethod
def default_config(cls) -> dict[str, Any]:
return WecomConfig().model_dump(by_alias=True)
def __init__(self, config: Any, bus: MessageBus):
if isinstance(config, dict):
config = WecomConfig.model_validate(config)
super().__init__(config, bus)
self.config: WecomConfig = config
self._client: Any = None
self._processed_message_ids: OrderedDict[str, None] = OrderedDict()
self._loop: asyncio.AbstractEventLoop | None = None
self._generate_req_id = None
# Store frame headers for each chat to enable replies
self._chat_frames: dict[str, Any] = {}
async def start(self) -> None:
"""Start the WeCom bot with WebSocket long connection."""
if not WECOM_AVAILABLE:
logger.error("WeCom SDK not installed. Run: pip install nanobot-ai[wecom]")
return
if not self.config.bot_id or not self.config.secret:
logger.error("WeCom bot_id and secret not configured")
return
from wecom_aibot_sdk import WSClient, generate_req_id
self._running = True
self._loop = asyncio.get_running_loop()
self._generate_req_id = generate_req_id
# Create WebSocket client
self._client = WSClient({
"bot_id": self.config.bot_id,
"secret": self.config.secret,
"reconnect_interval": 1000,
"max_reconnect_attempts": -1, # Infinite reconnect
"heartbeat_interval": 30000,
})
# Register event handlers
self._client.on("connected", self._on_connected)
self._client.on("authenticated", self._on_authenticated)
self._client.on("disconnected", self._on_disconnected)
self._client.on("error", self._on_error)
self._client.on("message.text", self._on_text_message)
self._client.on("message.image", self._on_image_message)
self._client.on("message.voice", self._on_voice_message)
self._client.on("message.file", self._on_file_message)
self._client.on("message.mixed", self._on_mixed_message)
self._client.on("event.enter_chat", self._on_enter_chat)
logger.info("WeCom bot starting with WebSocket long connection")
logger.info("No public IP required - using WebSocket to receive events")
# Connect
await self._client.connect_async()
# Keep running until stopped
while self._running:
await asyncio.sleep(1)
async def stop(self) -> None:
"""Stop the WeCom bot."""
self._running = False
if self._client:
await self._client.disconnect()
logger.info("WeCom bot stopped")
async def _on_connected(self, frame: Any) -> None:
"""Handle WebSocket connected event."""
logger.info("WeCom WebSocket connected")
async def _on_authenticated(self, frame: Any) -> None:
"""Handle authentication success event."""
logger.info("WeCom authenticated successfully")
async def _on_disconnected(self, frame: Any) -> None:
"""Handle WebSocket disconnected event."""
reason = frame.body if hasattr(frame, 'body') else str(frame)
logger.warning("WeCom WebSocket disconnected: {}", reason)
async def _on_error(self, frame: Any) -> None:
"""Handle error event."""
logger.error("WeCom error: {}", frame)
async def _on_text_message(self, frame: Any) -> None:
"""Handle text message."""
await self._process_message(frame, "text")
async def _on_image_message(self, frame: Any) -> None:
"""Handle image message."""
await self._process_message(frame, "image")
async def _on_voice_message(self, frame: Any) -> None:
"""Handle voice message."""
await self._process_message(frame, "voice")
async def _on_file_message(self, frame: Any) -> None:
"""Handle file message."""
await self._process_message(frame, "file")
async def _on_mixed_message(self, frame: Any) -> None:
"""Handle mixed content message."""
await self._process_message(frame, "mixed")
async def _on_enter_chat(self, frame: Any) -> None:
"""Handle enter_chat event (user opens chat with bot)."""
try:
# Extract body from WsFrame dataclass or dict
if hasattr(frame, 'body'):
body = frame.body or {}
elif isinstance(frame, dict):
body = frame.get("body", frame)
else:
body = {}
chat_id = body.get("chatid", "") if isinstance(body, dict) else ""
if chat_id and self.config.welcome_message:
await self._client.reply_welcome(frame, {
"msgtype": "text",
"text": {"content": self.config.welcome_message},
})
except Exception as e:
logger.error("Error handling enter_chat: {}", e)
async def _process_message(self, frame: Any, msg_type: str) -> None:
"""Process incoming message and forward to bus."""
try:
# Extract body from WsFrame dataclass or dict
if hasattr(frame, 'body'):
body = frame.body or {}
elif isinstance(frame, dict):
body = frame.get("body", frame)
else:
body = {}
# Ensure body is a dict
if not isinstance(body, dict):
logger.warning("Invalid body type: {}", type(body))
return
# Extract message info
msg_id = body.get("msgid", "")
if not msg_id:
msg_id = f"{body.get('chatid', '')}_{body.get('sendertime', '')}"
# Deduplication check
if msg_id in self._processed_message_ids:
return
self._processed_message_ids[msg_id] = None
# Trim cache
while len(self._processed_message_ids) > 1000:
self._processed_message_ids.popitem(last=False)
# Extract sender info from "from" field (SDK format)
from_info = body.get("from", {})
sender_id = from_info.get("userid", "unknown") if isinstance(from_info, dict) else "unknown"
# For single chat, chatid is the sender's userid
# For group chat, chatid is provided in body
chat_type = body.get("chattype", "single")
chat_id = body.get("chatid", sender_id)
content_parts = []
media_paths: list[str] = []
if msg_type == "text":
text = body.get("text", {}).get("content", "")
if text:
content_parts.append(text)
elif msg_type == "image":
image_info = body.get("image", {})
file_url = image_info.get("url", "")
aes_key = image_info.get("aeskey", "")
if file_url and aes_key:
file_path = await self._download_and_save_media(file_url, aes_key, "image")
if file_path:
filename = os.path.basename(file_path)
content_parts.append(f"[image: {filename}]")
media_paths.append(file_path)
else:
content_parts.append("[image: download failed]")
else:
content_parts.append("[image: download failed]")
elif msg_type == "voice":
voice_info = body.get("voice", {})
# Voice message already contains transcribed content from WeCom
voice_content = voice_info.get("content", "")
if voice_content:
content_parts.append(f"[voice] {voice_content}")
else:
content_parts.append("[voice]")
elif msg_type == "file":
file_info = body.get("file", {})
file_url = file_info.get("url", "")
aes_key = file_info.get("aeskey", "")
file_name = file_info.get("name", "unknown")
if file_url and aes_key:
file_path = await self._download_and_save_media(file_url, aes_key, "file", file_name)
if file_path:
content_parts.append(f"[file: {file_name}]")
media_paths.append(file_path)
else:
content_parts.append(f"[file: {file_name}: download failed]")
else:
content_parts.append(f"[file: {file_name}: download failed]")
elif msg_type == "mixed":
# Mixed content contains multiple message items
msg_items = body.get("mixed", {}).get("msg_item", [])
for item in msg_items:
item_type = item.get("msgtype", "")
if item_type == "text":
text = item.get("text", {}).get("content", "")
if text:
content_parts.append(text)
elif item_type == "image":
file_url = item.get("image", {}).get("url", "")
aes_key = item.get("image", {}).get("aeskey", "")
if file_url and aes_key:
file_path = await self._download_and_save_media(file_url, aes_key, "image")
if file_path:
filename = os.path.basename(file_path)
content_parts.append(f"[image: {filename}]")
media_paths.append(file_path)
else:
content_parts.append(MSG_TYPE_MAP.get(item_type, f"[{item_type}]"))
else:
content_parts.append(MSG_TYPE_MAP.get(msg_type, f"[{msg_type}]"))
content = "\n".join(content_parts) if content_parts else ""
if not content:
return
# Store frame for this chat to enable replies
self._chat_frames[chat_id] = frame
# Forward to message bus
await self._handle_message(
sender_id=sender_id,
chat_id=chat_id,
content=content,
media=media_paths or None,
metadata={
"message_id": msg_id,
"msg_type": msg_type,
"chat_type": chat_type,
}
)
except Exception as e:
logger.error("Error processing WeCom message: {}", e)
async def _download_and_save_media(
self,
file_url: str,
aes_key: str,
media_type: str,
filename: str | None = None,
) -> str | None:
"""
Download and decrypt media from WeCom.
Returns:
file_path or None if download failed
"""
try:
data, fname = await self._client.download_file(file_url, aes_key)
if not data:
logger.warning("Failed to download media from WeCom")
return None
if len(data) > WECOM_UPLOAD_MAX_BYTES:
logger.warning(
"WeCom inbound media too large: {} bytes (max {})",
len(data),
WECOM_UPLOAD_MAX_BYTES,
)
return None
media_dir = get_media_dir("wecom")
if not filename:
filename = fname or f"{media_type}_{hash(file_url) % 100000}"
filename = _sanitize_filename(filename)
file_path = media_dir / filename
await asyncio.to_thread(file_path.write_bytes, data)
logger.debug("Downloaded {} to {}", media_type, file_path)
return str(file_path)
except Exception as e:
logger.error("Error downloading media: {}", e)
return None
async def _upload_media_ws(
self, client: Any, file_path: str,
) -> "tuple[str, str] | tuple[None, None]":
"""Upload a local file to WeCom via WebSocket 3-step protocol (base64).
Uses the WeCom WebSocket upload commands directly via
``client._ws_manager.send_reply()``:
``aibot_upload_media_init`` upload_id
``aibot_upload_media_chunk`` × N (512 KB raw per chunk, base64)
``aibot_upload_media_finish`` media_id
Returns (media_id, media_type) on success, (None, None) on failure.
"""
from wecom_aibot_sdk.utils import generate_req_id as _gen_req_id
try:
fname = os.path.basename(file_path)
media_type = _guess_wecom_media_type(fname)
# Read file size and data in a thread to avoid blocking the event loop
def _read_file():
file_size = os.path.getsize(file_path)
if file_size > WECOM_UPLOAD_MAX_BYTES:
raise ValueError(
f"File too large: {file_size} bytes (max {WECOM_UPLOAD_MAX_BYTES})"
)
with open(file_path, "rb") as f:
return file_size, f.read()
file_size, data = await asyncio.to_thread(_read_file)
# MD5 is used for file integrity only, not cryptographic security
md5_hash = hashlib.md5(data).hexdigest()
CHUNK_SIZE = 512 * 1024 # 512 KB raw (before base64)
mv = memoryview(data)
chunk_list = [bytes(mv[i : i + CHUNK_SIZE]) for i in range(0, file_size, CHUNK_SIZE)]
n_chunks = len(chunk_list)
del mv, data
# Step 1: init
req_id = _gen_req_id("upload_init")
resp = await client._ws_manager.send_reply(req_id, {
"type": media_type,
"filename": fname,
"total_size": file_size,
"total_chunks": n_chunks,
"md5": md5_hash,
}, "aibot_upload_media_init")
if resp.errcode != 0:
logger.warning("WeCom upload init failed ({}): {}", resp.errcode, resp.errmsg)
return None, None
upload_id = resp.body.get("upload_id") if resp.body else None
if not upload_id:
logger.warning("WeCom upload init: no upload_id in response")
return None, None
# Step 2: send chunks
for i, chunk in enumerate(chunk_list):
req_id = _gen_req_id("upload_chunk")
resp = await client._ws_manager.send_reply(req_id, {
"upload_id": upload_id,
"chunk_index": i,
"base64_data": base64.b64encode(chunk).decode(),
}, "aibot_upload_media_chunk")
if resp.errcode != 0:
logger.warning("WeCom upload chunk {} failed ({}): {}", i, resp.errcode, resp.errmsg)
return None, None
# Step 3: finish
req_id = _gen_req_id("upload_finish")
resp = await client._ws_manager.send_reply(req_id, {
"upload_id": upload_id,
}, "aibot_upload_media_finish")
if resp.errcode != 0:
logger.warning("WeCom upload finish failed ({}): {}", resp.errcode, resp.errmsg)
return None, None
media_id = resp.body.get("media_id") if resp.body else None
if not media_id:
logger.warning("WeCom upload finish: no media_id in response body={}", resp.body)
return None, None
suffix = "..." if len(media_id) > 16 else ""
logger.debug("WeCom uploaded {} ({}) → media_id={}", fname, media_type, media_id[:16] + suffix)
return media_id, media_type
except ValueError as e:
logger.warning("WeCom upload skipped for {}: {}", file_path, e)
return None, None
except Exception as e:
logger.error("WeCom _upload_media_ws error for {}: {}", file_path, e)
return None, None
async def send(self, msg: OutboundMessage) -> None:
"""Send a message through WeCom."""
if not self._client:
logger.warning("WeCom client not initialized")
return
try:
content = (msg.content or "").strip()
is_progress = bool(msg.metadata.get("_progress"))
# Get the stored frame for this chat
frame = self._chat_frames.get(msg.chat_id)
# Send media files via WebSocket upload
for file_path in msg.media or []:
upload_path = file_path
if not os.path.isfile(upload_path) and not os.path.isabs(file_path):
upload_path = str(get_workspace_path() / file_path)
if not os.path.isfile(upload_path):
logger.warning("WeCom media file not found: {}", file_path)
continue
media_id, media_type = await self._upload_media_ws(self._client, upload_path)
if media_id:
if frame:
await self._client.reply(frame, {
"msgtype": media_type,
media_type: {"media_id": media_id},
})
else:
await self._client.send_message(msg.chat_id, {
"msgtype": media_type,
media_type: {"media_id": media_id},
})
logger.debug("WeCom sent {}{}", media_type, msg.chat_id)
else:
content += f"\n[file upload failed: {os.path.basename(file_path)}]"
if not content:
return
if frame:
# Both progress and final messages must use reply_stream (cmd="aibot_respond_msg").
# The plain reply() uses cmd="reply" which does not support "text" msgtype
# and causes errcode=40008 from WeCom API.
stream_id = self._generate_req_id("stream")
await self._client.reply_stream(
frame,
stream_id,
content,
finish=not is_progress,
)
logger.debug(
"WeCom {} sent to {}",
"progress" if is_progress else "message",
msg.chat_id,
)
else:
# No frame (e.g. cron push): proactive send only supports markdown
await self._client.send_message(msg.chat_id, {
"msgtype": "markdown",
"markdown": {"content": content},
})
logger.info("WeCom proactive send to {}", msg.chat_id)
except Exception:
logger.exception("Error sending WeCom message to chat_id={}", msg.chat_id)

View File

@ -0,0 +1,9 @@
# Speech Model Directory
This directory is reserved for local Whisper `.bin` model files and is intentionally not committed to source control.
Put the file configured by `STT_MODEL` here, for example:
- `ggml-small-q8_0.bin`
If the model file is missing, the backend will still start, but it will print a startup warning and speech transcription requests will not work until the file is added.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -4,4 +4,4 @@
"user_md": "- 语言: 中文\n- 风格: 专业\n- 偏好: 简明且有步骤",
"tools_md": "- 谨慎使用 shell\n- 修改文件后复核\n- 失败时说明原因并重试策略",
"identity_md": "- 角色: 企业数字员工\n- 领域: 运维与任务执行"
}
}

View File

@ -0,0 +1,325 @@
# Dashboard Nanobot 代码结构规范(强制执行)
本文档定义后续前端、后端、`dashboard-edge` 的结构边界与拆分规则。
目标不是“尽可能多拆文件”,而是:
- 保持装配层足够薄
- 保持业务边界清晰
- 避免出现单文件多职责膨胀
- 让后续迭代继续走低风险、小步验证路线
本文档自落地起作为**后续开发强制规范**执行。
---
## 1. 总原则
### 1.1 先分层,再分文件
- 优先先把“页面装配 / 业务编排 / 基础设施 / 纯视图”分开,再决定是否继续拆文件。
- 不允许为了“看起来模块化”而把强耦合逻辑拆成大量碎文件。
- 允许保留中等体量的“单主题控制器”文件,但不允许继续把多个主题堆进一个文件。
### 1.2 领域内聚优先于机械拆分
- 代码拆分的第一判断标准是“是否仍属于同一业务域”,不是“是否还能再拆小”。
- 同一业务域内的读、写、校验、少量编排、少量派生逻辑,可以保留在同一个模块中。
- 如果拆分只会制造多层跳转、隐藏真实依赖、降低可读性,则不应继续拆。
- 真正需要拆分的场景是跨域、跨层、跨边界,而不是单纯文件偏长。
### 1.3 低风险重构优先
- 结构重构优先做“搬运与收口”,不顺手修改业务行为。
- 同一轮改动里,默认**不要**同时做:
- 大规模结构调整
- 新功能
- 行为修复
- 如果确实需要行为修复,只允许修复拆分直接引入的问题。
### 1.4 装配层必须薄
- 页面层、路由层、应用启动层都只负责装配。
- 装配层可以做依赖注入、状态接线、事件转发。
- 装配层不允许承载复杂业务判断、持久化细节、长流程编排。
### 1.5 新文件必须按主题命名
- 文件名必须直接表达职责。
- 禁止模糊命名,例如:
- `helpers2.py`
- `misc.ts`
- `commonPage.tsx`
- `temp_service.py`
---
## 2. 前端结构规范
### 2.1 目录分层
前端统一按以下层次组织:
- `frontend/src/app`
- 应用壳、全局路由视图、全局初始化
- `frontend/src/modules/<domain>`
- 领域模块入口
- `frontend/src/modules/<domain>/components`
- 纯视图组件、弹层、区块组件
- `frontend/src/modules/<domain>/hooks`
- 领域内控制器 hook、状态编排 hook
- `frontend/src/modules/<domain>/api`
- 仅该领域使用的 API 请求封装
- `frontend/src/modules/<domain>/shared`
- 领域内共享的纯函数、常量、类型桥接
- `frontend/src/components`
- 跨模块通用 UI 组件
- `frontend/src/utils`
- 真正跨领域的通用工具
目录分层的目标是稳定边界,不是把每一段逻辑都拆成独立文件:
- 同一页面域内强关联的视图、状态、交互逻辑,允许在同一模块内靠近放置
- 只有当某段逻辑已经被多个页面或多个子流程稳定复用时,才提炼到更高层级
- 禁止为了“文件更短”而把一个连续可读的页面流程拆成大量来回跳转的小文件
### 2.2 页面文件职责
页面文件如:
- `frontend/src/modules/platform/PlatformDashboardPage.tsx`
- `frontend/src/modules/platform/NodeWorkspacePage.tsx`
- `frontend/src/modules/platform/NodeHomePage.tsx`
必须遵守:
- 只做页面装配
- 只组织已有区块、弹层、控制器 hook
- 不直接承载长段 API 请求、副作用、数据清洗逻辑
- 如果一个页面本身就是单一业务域,并且逻辑连续可读,可以保留适量页面内状态与事件处理
- 不要求为了行数把本来紧密耦合的页面逻辑强拆到多个 hooks / sections / shared 文件中
页面文件目标体量:
- 行数只作为预警,不作为硬性拆分依据
- 先判断页面是否仍然属于单一业务域、是否能顺序读懂、依赖是否清晰
- 只有在页面同时承担多个子域、多个弹层流程、多个数据源编排时,才优先拆出页面控制器 hook 或区块装配组件
### 2.3 控制器 hook 规范
控制器 hook 用于承载:
- 页面状态
- 副作用
- API 调用编排
- 事件处理
- 派生数据
典型命名:
- `useNodeHomePage`
- `useNodeWorkspacePage`
- `usePlatformDashboardPage`
规则:
- 一个 hook 只服务一个明确页面或一个明确子流程
- hook 不直接产出大量 JSX
- hook 内部允许组合更小的子 hook但不要为了拆分而拆分
- 如果页面逻辑并不复杂,不要求必须抽出“页面总 hook”
- 只有当副作用编排、状态联动、接口交互已经影响页面可读性时,才值得抽成控制器 hook
控制器 hook 目标体量:
- 行数只作为风险提示
- 优先保证 hook 的流程连续、命名清晰、状态收口明确
- 如果继续拆分只会让调用链更深、上下文更难追踪,则不应继续拆
- 只有当 hook 明显同时承载多个子流程时,才按主题拆成子 hook 或把稳定复用逻辑提到 `shared`/`api`
### 2.4 视图组件规范
组件分为两类:
- 区块组件:例如列表区、详情区、摘要卡片区
- 弹层组件:例如 Drawer、Modal、Sheet
规则:
- 视图组件默认不直接请求接口
- 视图组件只接收已经整理好的 props
- 纯视图组件内部不保留与页面强耦合的业务缓存
- 不要求把所有小片段都抽成组件;只在存在明确复用、明显视觉区块、或能显著降低页面噪音时再拆组件
### 2.5 前端复用原则
- 优先提炼“稳定复用的模式”,不要提炼“碰巧重复一次的代码”
- 三处以上重复,优先考虑抽取
- 同域复用优先放 `modules/<domain>/shared`
- 跨域复用优先放 `src/components``src/utils`
- 如果抽取后的接口比原地实现更难理解,就不应抽取
- 不允许创建只有单个页面使用、但又被过度包装的“伪复用层”
### 2.6 前端禁止事项
- 禁止把页面做成“一个文件管状态、接口、弹层、列表、详情、搜索、分页”
- 禁止把样式、业务逻辑、视图结构三者重新耦合回单文件
- 禁止创建无明确职责的超通用组件
- 禁止为减少行数而做不可读的过度抽象
- 禁止为了满足结构指标,把单一页面域强拆成大量细碎 hooks、sections、shared 文件
- 禁止新增纯转发、纯包装、无独立语义价值的组件或 hook
---
## 3. 后端结构规范
### 3.1 目录分层
后端统一按以下边界组织:
- `backend/main.py`
- 仅启动入口
- `backend/app_factory.py`
- 应用实例创建
- `backend/bootstrap`
- 依赖装配、应用初始化、生命周期拼装
- `backend/api`
- FastAPI 路由层
- `backend/services`
- 业务用例与领域服务
- `backend/core`
- 数据库、缓存、配置、基础设施适配
- `backend/models`
- ORM 模型
- `backend/schemas`
- 请求/响应 DTO
- `backend/providers`
- runtime/workspace/provision 适配层
### 3.2 启动与装配层规范
以下文件必须保持装配层属性:
- `backend/main.py`
- `backend/app_factory.py`
- `backend/bootstrap/app_runtime.py`
规则:
- 只做依赖创建、注入、路由注册、生命周期绑定
- 不写业务 SQL
- 不写领域规则判断
- 不写长流程编排
### 3.3 Router 规范
`backend/api/*.py` 只允许承担:
- HTTP 参数接收
- schema 校验
- 调用 service
- 把领域异常转换成 HTTP 异常
Router 不允许承担:
- 多步业务编排
- 大量数据聚合
- 数据库表间拼装
- 本地文件系统读写细节
### 3.4 Service 规范
Service 必须按业务域内聚组织,而不是为了压缩行数而机械切碎。
允许的 service 类型:
- `*_settings_service.py`
- `*_usage_service.py`
- `*_activity_service.py`
- `*_analytics_service.py`
- `*_overview_service.py`
- `*_query_service.py`
- `*_command_service.py`
- `*_lifecycle_service.py`
Service 文件规则:
- 一个文件只负责一个业务域或一个稳定子主题
- 同一文件内允许同时包含该域内的查询、写入、校验、少量派生逻辑
- 同一文件内允许有私有 helper但 helper 只能服务当前域
- 只有当一个文件已经明显跨域,或者把 router/core/provider 的职责卷入进来时,才必须继续拆分
- 不允许为了“看起来更模块化”而创建纯转发、纯 re-export、纯别名性质的 service 层
Service 体量规则:
- 行数只作为预警信号,不作为机械拆分依据
- 优先判断是否仍然保持单一业务域、可顺序阅读、依赖方向清晰
- 如果一个文件虽然较大,但域边界稳定、跳转成本低、上下文连续,可以保留
- 如果一个文件即使不大,但已经跨域、跨层、混入无关职责,也必须拆分
### 3.6 Schema 规范
- `schemas` 只定义 DTO
- 不允许在 schema 中直接读数据库、读文件、发网络请求
- schema 字段演进必须保持前后端契约可追踪
### 3.7 Core 规范
`core` 只允许放:
- 数据库与 Session 管理
- 缓存
- 配置
- 基础设施适配器
不允许把领域业务塞回 `core` 来“躲避 service 变大”。
### 3.8 Provider 规范
`providers` 只处理运行时/工作区/部署目标差异。
不允许把平台业务逻辑塞进 provider。
---
## 4. 本项目后续开发的执行规则
### 4.1 每轮改动的默认顺序
1. 先审计职责边界
2. 先做装配层变薄
3. 再提炼稳定复用块
4. 最后再考虑继续细拆
### 4.2 校验规则
- 前端结构改动后,默认执行 `frontend` 构建校验
- 后端结构改动后,默认至少执行 `python3 -m py_compile`
- 如果改动触达运行时或边界协议,再考虑追加更高层验证
### 4.3 文档同步规则
以下情况必须同步设计文档:
- 新增一层目录边界
- 新增一个领域的标准拆法
- 改变页面/服务的职责划分
- 把兼容层正式降级为装配/导出层
### 4.4 禁止事项
- 禁止回到“大文件集中堆功能”的开发方式
- 禁止为了图省事把新逻辑加回兼容层
- 禁止在没有明确复用收益时过度抽象
- 禁止为了满足行数指标而把同一业务域强行拆碎
- 禁止在一次改动里同时重写 UI、重写数据流、重写接口协议
---
## 5. 当前执行基线2026-03
当前结构治理目标分两层:
- 第一层:主入口、页面入口、路由入口必须变薄
- 第二层:领域内部的 service / hook / overlays / sections 必须按主题稳定收口
后续所有新增功能与重构,均以本文档为准执行。

View File

@ -1,36 +1,42 @@
# Dashboard Nanobot 数据库设计文档(当前实现)
# Dashboard Nanobot 数据库设计文档
数据库默认使用 SQLite`data/nanobot_dashboard.db`
数据库默认使用 PostgreSQL推荐使用 psycopg3 驱动)
## 1. ERD
```mermaid
erDiagram
BOTINSTANCE ||--o{ BOTMESSAGE : "messages"
NANOBOTIMAGE ||--o{ BOTINSTANCE : "referenced by"
bot_instance ||--o{ bot_message : "messages"
bot_instance ||--o{ bot_request_usage : "usage"
bot_instance ||--o{ bot_activity_event : "events"
bot_image ||--o{ bot_instance : "referenced by"
BOTINSTANCE {
bot_instance {
string id PK
string name
boolean enabled
string access_password
string workspace_dir UK
string docker_status
string image_tag
string current_state
text last_action
string last_action
string image_tag
datetime created_at
datetime updated_at
}
BOTMESSAGE {
bot_message {
int id PK
string bot_id FK
string role
text text
text media_json
string feedback
datetime feedback_at
datetime created_at
}
NANOBOTIMAGE {
bot_image {
string tag PK
string image_id
string version
@ -38,48 +44,81 @@ erDiagram
string source_dir
datetime created_at
}
bot_request_usage {
int id PK
string bot_id FK
string request_id
string channel
string status
string provider
string model
int input_tokens
int output_tokens
int total_tokens
datetime started_at
datetime completed_at
datetime created_at
}
bot_activity_event {
int id PK
string bot_id FK
string request_id
string event_type
string channel
string detail
text metadata_json
datetime created_at
}
sys_setting {
string key PK
string name
string category
string description
string value_type
text value_json
boolean is_public
int sort_order
datetime created_at
datetime updated_at
}
```
## 2. 设计原则
- 数据库只保留运行索引和历史消息。
- Bot 参数模型、渠道、资源配额、5 个 MD 文件)统一持久化在:
- 数据库保留运行索引、历史消息、用量统计与运维事件
- Bot 核心配置(渠道、资源配额、5 个 MD 文件)统一持久化在文件系统
- `.nanobot/config.json`
- `.nanobot/workspace/*.md`
- `.nanobot/env.json`
- `channelroute` 已废弃,不再使用数据库存储渠道。
## 3. 表说明
### 3.1 `botinstance`
### 3.1 `bot_instance`
存储 Bot 基础索引与运行态。
仅存基础索引与运行态:
### 3.2 `bot_message`
Dashboard 渠道对话历史(用于会话回放与反馈)。
- 标识与展示:`id`、`name`
- 容器与镜像:`docker_status`、`image_tag`
- 运行状态:`current_state`、`last_action`
- 路径与时间:`workspace_dir`、`created_at`、`updated_at`
### 3.3 `bot_image`
基础镜像登记表。
### 3.2 `botmessage`
### 3.4 `bot_request_usage`
模型调用用量详细记录。
Dashboard 渠道对话历史(用于会话回放):
### 3.5 `bot_activity_event`
运维事件记录(如容器启动/停止、指令提交、系统告警等)。
- `role`: `user | assistant`
- `text`: 文本内容
- `media_json`: 附件相对路径 JSON
### 3.6 `sys_setting`
平台全局参数设置。
### 3.3 `nanobotimage`
## 4. 初始化与迁移策略
基础镜像登记表(手动注册):
数据库初始化改为离线显式执行
- `tag`: 如 `nanobot-base:v0.1.4`
- `status`: `READY | UNKNOWN | ERROR`
- `source_dir`: 来源标识(通常 `manual`
## 4. 迁移策略
服务启动时:
1. `SQLModel.metadata.create_all(engine)`
2. 清理废弃表:`DROP TABLE IF EXISTS channelroute`
3. 对 `botinstance` 做列对齐,删除历史遗留配置列(保留当前最小字段集)
1. `scripts/sql/create-tables.sql` 负责创建业务表和索引。
2. `scripts/sql/init-data.sql` 负责初始化 `sys_setting` 和默认 `skill_market_item` 数据。
3. `scripts/init-full-db.sh` 在完整部署场景下会按顺序执行 PostgreSQL 引导 SQL、建表 SQL、初始化数据 SQL。
4. 后端启动时(`backend/core/database.py`)只校验必需表和核心 `sys_setting` 是否已经存在;若缺失则直接中止启动,不再做运行时迁移或结构修复。

View File

@ -0,0 +1,156 @@
services:
postgres:
image: ${POSTGRES_IMAGE:-postgres:16-alpine}
container_name: dashboard-nanobot-postgres
restart: unless-stopped
environment:
TZ: ${TZ:-Asia/Shanghai}
POSTGRES_USER: ${POSTGRES_SUPERUSER:-postgres}
POSTGRES_PASSWORD: ${POSTGRES_SUPERPASSWORD:?POSTGRES_SUPERPASSWORD is required}
POSTGRES_DB: ${POSTGRES_BOOTSTRAP_DB:-postgres}
volumes:
- ./data/postgres:/var/lib/postgresql/data
expose:
- "5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U \"$${POSTGRES_USER}\" -d \"$${POSTGRES_DB}\""]
interval: 10s
timeout: 5s
retries: 10
start_period: 20s
logging:
driver: json-file
options:
max-size: "20m"
max-file: "3"
redis:
image: ${REDIS_IMAGE:-redis:7-alpine}
container_name: dashboard-nanobot-redis
restart: unless-stopped
environment:
TZ: ${TZ:-Asia/Shanghai}
command: ["redis-server", "--appendonly", "yes", "--save", "60", "1000"]
volumes:
- ./data/redis:/data
expose:
- "6379"
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 10
start_period: 10s
logging:
driver: json-file
options:
max-size: "20m"
max-file: "3"
backend:
build:
context: .
dockerfile: backend/Dockerfile
args:
PYTHON_BASE_IMAGE: ${PYTHON_BASE_IMAGE:-python:3.12-slim}
PIP_INDEX_URL: ${PIP_INDEX_URL:-https://pypi.org/simple}
PIP_TRUSTED_HOST: ${PIP_TRUSTED_HOST:-}
image: dashboard-nanobot/backend:${BACKEND_IMAGE_TAG:-latest}
container_name: dashboard-nanobot-backend
restart: unless-stopped
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
environment:
TZ: ${TZ:-Asia/Shanghai}
APP_HOST: 0.0.0.0
APP_PORT: 8000
APP_RELOAD: "false"
DATABASE_ECHO: "false"
DATABASE_POOL_SIZE: ${DATABASE_POOL_SIZE:-20}
DATABASE_MAX_OVERFLOW: ${DATABASE_MAX_OVERFLOW:-40}
DATABASE_POOL_TIMEOUT: ${DATABASE_POOL_TIMEOUT:-30}
DATABASE_POOL_RECYCLE: ${DATABASE_POOL_RECYCLE:-1800}
DATA_ROOT: /app/data
BOTS_WORKSPACE_ROOT: ${HOST_BOTS_WORKSPACE_ROOT}
DOCKER_NETWORK_NAME: ${DOCKER_NETWORK_NAME:-dashboard-nanobot-network}
DATABASE_URL: postgresql+psycopg://${POSTGRES_APP_USER}:${POSTGRES_APP_PASSWORD}@postgres:5432/${POSTGRES_APP_DB}
REDIS_ENABLED: ${REDIS_ENABLED:-true}
REDIS_URL: redis://redis:6379/${REDIS_DB:-8}
REDIS_PREFIX: ${REDIS_PREFIX:-dashboard_nanobot}
REDIS_DEFAULT_TTL: ${REDIS_DEFAULT_TTL:-60}
DEFAULT_BOT_SYSTEM_TIMEZONE: ${DEFAULT_BOT_SYSTEM_TIMEZONE:-Asia/Shanghai}
PANEL_ACCESS_PASSWORD: ${PANEL_ACCESS_PASSWORD:-}
WORKSPACE_PREVIEW_SIGNING_SECRET: ${WORKSPACE_PREVIEW_SIGNING_SECRET:-}
WORKSPACE_PREVIEW_TOKEN_TTL_SECONDS: ${WORKSPACE_PREVIEW_TOKEN_TTL_SECONDS:-3600}
CORS_ALLOWED_ORIGINS: ${CORS_ALLOWED_ORIGINS:-}
STT_ENABLED: ${STT_ENABLED:-true}
STT_MODEL: ${STT_MODEL:-ggml-small-q8_0.bin}
STT_MODEL_DIR: ${STT_MODEL_DIR:-/app/data/model}
STT_DEVICE: ${STT_DEVICE:-cpu}
STT_MAX_AUDIO_SECONDS: ${STT_MAX_AUDIO_SECONDS:-20}
STT_DEFAULT_LANGUAGE: ${STT_DEFAULT_LANGUAGE:-zh}
STT_FORCE_SIMPLIFIED: ${STT_FORCE_SIMPLIFIED:-true}
STT_AUDIO_PREPROCESS: ${STT_AUDIO_PREPROCESS:-true}
STT_AUDIO_FILTER: ${STT_AUDIO_FILTER:-highpass=f=120,lowpass=f=7600,afftdn=nf=-20}
STT_INITIAL_PROMPT: ${STT_INITIAL_PROMPT:-以下内容可能包含简体中文和英文术语。请优先输出简体中文,英文单词、缩写、品牌名和数字保持原文,不要翻译。}
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./data:/app/data
- ${HOST_BOTS_WORKSPACE_ROOT}:${HOST_BOTS_WORKSPACE_ROOT}
expose:
- "8000"
healthcheck:
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000/api/health', timeout=3).read()"]
interval: 15s
timeout: 5s
retries: 5
start_period: 20s
logging:
driver: json-file
options:
max-size: "20m"
max-file: "3"
nginx:
build:
context: ./frontend
dockerfile: Dockerfile
args:
NODE_BASE_IMAGE: ${NODE_BASE_IMAGE:-node:22-alpine}
NGINX_BASE_IMAGE: ${NGINX_BASE_IMAGE:-nginx:alpine}
NPM_REGISTRY: ${NPM_REGISTRY:-https://registry.npmjs.org/}
VITE_API_BASE: /api
VITE_WS_BASE: /ws/monitor
image: dashboard-nanobot/nginx:${FRONTEND_IMAGE_TAG:-latest}
container_name: dashboard-nanobot-nginx
restart: unless-stopped
environment:
TZ: ${TZ:-Asia/Shanghai}
UPLOAD_MAX_MB: ${UPLOAD_MAX_MB:-100}
depends_on:
backend:
condition: service_healthy
ports:
- "${NGINX_PORT}:80"
healthcheck:
test: ["CMD", "wget", "-q", "-O", "/dev/null", "http://127.0.0.1/"]
interval: 15s
timeout: 5s
retries: 5
start_period: 10s
logging:
driver: json-file
options:
max-size: "20m"
max-file: "3"
networks:
default:
name: ${DOCKER_NETWORK_NAME:-dashboard-nanobot-network}
driver: bridge
ipam:
config:
- subnet: ${DOCKER_NETWORK_SUBNET:-172.20.0.0/16}

View File

@ -12,28 +12,29 @@ services:
restart: unless-stopped
environment:
APP_HOST: 0.0.0.0
APP_PORT: 8000
APP_PORT: 8002
APP_RELOAD: "false"
DATABASE_ECHO: "false"
DATABASE_POOL_SIZE: ${DATABASE_POOL_SIZE:-20}
DATABASE_MAX_OVERFLOW: ${DATABASE_MAX_OVERFLOW:-40}
DATABASE_POOL_TIMEOUT: ${DATABASE_POOL_TIMEOUT:-30}
DATABASE_POOL_RECYCLE: ${DATABASE_POOL_RECYCLE:-1800}
UPLOAD_MAX_MB: ${UPLOAD_MAX_MB:-100}
DATA_ROOT: ${HOST_DATA_ROOT}
DATA_ROOT: /app/data
BOTS_WORKSPACE_ROOT: ${HOST_BOTS_WORKSPACE_ROOT}
DOCKER_NETWORK_NAME: ${DOCKER_NETWORK_NAME:-dashboard-nanobot-network}
DATABASE_URL: ${DATABASE_URL:-}
REDIS_ENABLED: ${REDIS_ENABLED:-false}
REDIS_URL: ${REDIS_URL:-}
REDIS_PREFIX: ${REDIS_PREFIX:-dashboard_nanobot}
REDIS_DEFAULT_TTL: ${REDIS_DEFAULT_TTL:-60}
CHAT_PULL_PAGE_SIZE: ${CHAT_PULL_PAGE_SIZE:-60}
COMMAND_AUTO_UNLOCK_SECONDS: ${COMMAND_AUTO_UNLOCK_SECONDS:-10}
DEFAULT_BOT_SYSTEM_TIMEZONE: ${DEFAULT_BOT_SYSTEM_TIMEZONE:-Asia/Shanghai}
PANEL_ACCESS_PASSWORD: ${PANEL_ACCESS_PASSWORD:-}
WORKSPACE_PREVIEW_SIGNING_SECRET: ${WORKSPACE_PREVIEW_SIGNING_SECRET:-}
WORKSPACE_PREVIEW_TOKEN_TTL_SECONDS: ${WORKSPACE_PREVIEW_TOKEN_TTL_SECONDS:-3600}
CORS_ALLOWED_ORIGINS: ${CORS_ALLOWED_ORIGINS:-}
STT_ENABLED: ${STT_ENABLED:-true}
STT_MODEL: ${STT_MODEL:-ggml-small-q8_0.bin}
STT_MODEL_DIR: ${STT_MODEL_DIR:-${HOST_DATA_ROOT}/model}
STT_MODEL_DIR: ${STT_MODEL_DIR:-/app/data/model}
STT_DEVICE: ${STT_DEVICE:-cpu}
STT_MAX_AUDIO_SECONDS: ${STT_MAX_AUDIO_SECONDS:-20}
STT_DEFAULT_LANGUAGE: ${STT_DEFAULT_LANGUAGE:-zh}
@ -43,12 +44,12 @@ services:
STT_INITIAL_PROMPT: ${STT_INITIAL_PROMPT:-以下内容可能包含简体中文和英文术语。请优先输出简体中文,英文单词、缩写、品牌名和数字保持原文,不要翻译。}
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ${HOST_DATA_ROOT}:${HOST_DATA_ROOT}
- ./data:/app/data
- ${HOST_BOTS_WORKSPACE_ROOT}:${HOST_BOTS_WORKSPACE_ROOT}
expose:
- "8000"
- "8002"
healthcheck:
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000/api/health', timeout=3).read()"]
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://127.0.0.1:8002/api/health', timeout=3).read()"]
interval: 15s
timeout: 5s
retries: 5
@ -93,4 +94,8 @@ services:
networks:
default:
name: dashboard-nanobot-network
name: ${DOCKER_NETWORK_NAME:-dashboard-nanobot-network}
driver: bridge
ipam:
config:
- subnet: ${DOCKER_NETWORK_SUBNET:-172.20.0.0/16}

View File

@ -1,5 +1,5 @@
# Backend API entry
VITE_API_BASE=http://localhost:8000/api
VITE_API_BASE=/api
# Backend WebSocket entry
VITE_WS_BASE=ws://localhost:8000/ws/monitor
VITE_WS_BASE=/ws/monitor

View File

@ -1,5 +1,5 @@
upstream nanobot_backend {
server backend:8000;
server backend:8002;
}
server {

5989
frontend/package-lock.json generated 100644

File diff suppressed because it is too large Load Diff

Binary file not shown.

Before

Width:  |  Height:  |  Size: 148 KiB

Some files were not shown because too many files have changed in this diff Show More