2026-03-26 06:55:12 +00:00
|
|
|
import json
|
|
|
|
|
from typing import Any
|
|
|
|
|
|
|
|
|
|
from fastapi import APIRouter, Depends, Query
|
|
|
|
|
from pydantic import BaseModel
|
|
|
|
|
|
|
|
|
|
from app.core.auth import get_current_admin_user
|
|
|
|
|
from app.core.database import get_db_connection
|
|
|
|
|
from app.core.response import create_api_response
|
|
|
|
|
from app.services.async_transcription_service import AsyncTranscriptionService
|
|
|
|
|
from app.services.llm_service import LLMService
|
2026-04-01 08:36:52 +00:00
|
|
|
from app.services.system_config_service import SystemConfigService
|
2026-03-26 06:55:12 +00:00
|
|
|
|
|
|
|
|
router = APIRouter()
|
|
|
|
|
llm_service = LLMService()
|
|
|
|
|
transcription_service = AsyncTranscriptionService()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _parse_json_object(value: Any) -> dict[str, Any]:
|
|
|
|
|
if value is None:
|
|
|
|
|
return {}
|
|
|
|
|
if isinstance(value, dict):
|
|
|
|
|
return dict(value)
|
|
|
|
|
if isinstance(value, str):
|
|
|
|
|
value = value.strip()
|
|
|
|
|
if not value:
|
|
|
|
|
return {}
|
|
|
|
|
try:
|
|
|
|
|
parsed = json.loads(value)
|
|
|
|
|
return parsed if isinstance(parsed, dict) else {}
|
|
|
|
|
except json.JSONDecodeError:
|
|
|
|
|
return {}
|
|
|
|
|
return {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _normalize_string_list(value: Any) -> list[str] | None:
|
|
|
|
|
if value is None:
|
|
|
|
|
return None
|
|
|
|
|
if isinstance(value, list):
|
|
|
|
|
values = [str(item).strip() for item in value if str(item).strip()]
|
|
|
|
|
return values or None
|
|
|
|
|
if isinstance(value, str):
|
|
|
|
|
values = [item.strip() for item in value.split(",") if item.strip()]
|
|
|
|
|
return values or None
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _normalize_int_list(value: Any) -> list[int] | None:
|
|
|
|
|
if value is None:
|
|
|
|
|
return None
|
|
|
|
|
if isinstance(value, list):
|
|
|
|
|
items = value
|
|
|
|
|
elif isinstance(value, str):
|
|
|
|
|
items = [item.strip() for item in value.split(",") if item.strip()]
|
|
|
|
|
else:
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
normalized = []
|
|
|
|
|
for item in items:
|
|
|
|
|
try:
|
|
|
|
|
normalized.append(int(item))
|
|
|
|
|
except (TypeError, ValueError):
|
|
|
|
|
continue
|
|
|
|
|
return normalized or None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _clean_extra_config(config: dict[str, Any]) -> dict[str, Any]:
|
|
|
|
|
cleaned: dict[str, Any] = {}
|
|
|
|
|
for key, value in (config or {}).items():
|
|
|
|
|
if value is None:
|
|
|
|
|
continue
|
|
|
|
|
if isinstance(value, str):
|
|
|
|
|
stripped = value.strip()
|
|
|
|
|
if stripped:
|
|
|
|
|
cleaned[key] = stripped
|
|
|
|
|
continue
|
|
|
|
|
if isinstance(value, list):
|
|
|
|
|
normalized_list = []
|
|
|
|
|
for item in value:
|
|
|
|
|
if item is None:
|
|
|
|
|
continue
|
|
|
|
|
if isinstance(item, str):
|
|
|
|
|
stripped = item.strip()
|
|
|
|
|
if stripped:
|
|
|
|
|
normalized_list.append(stripped)
|
|
|
|
|
else:
|
|
|
|
|
normalized_list.append(item)
|
|
|
|
|
if normalized_list:
|
|
|
|
|
cleaned[key] = normalized_list
|
|
|
|
|
continue
|
|
|
|
|
cleaned[key] = value
|
|
|
|
|
return cleaned
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _merge_audio_extra_config(
|
|
|
|
|
request: "AudioModelUpsertRequest",
|
|
|
|
|
vocabulary_id: str | None = None,
|
|
|
|
|
) -> dict[str, Any]:
|
|
|
|
|
extra_config = _parse_json_object(request.extra_config)
|
|
|
|
|
|
|
|
|
|
if request.audio_scene == "asr":
|
|
|
|
|
legacy_config = {
|
|
|
|
|
"model": request.asr_model_name,
|
|
|
|
|
"speaker_count": request.asr_speaker_count,
|
|
|
|
|
"language_hints": request.asr_language_hints,
|
|
|
|
|
"disfluency_removal_enabled": request.asr_disfluency_removal_enabled,
|
|
|
|
|
"diarization_enabled": request.asr_diarization_enabled,
|
|
|
|
|
}
|
|
|
|
|
else:
|
|
|
|
|
legacy_config = {
|
|
|
|
|
"model": request.model_name,
|
|
|
|
|
"template_text": request.vp_template_text,
|
|
|
|
|
"duration_seconds": request.vp_duration_seconds,
|
|
|
|
|
"sample_rate": request.vp_sample_rate,
|
|
|
|
|
"channels": request.vp_channels,
|
|
|
|
|
"max_size_bytes": request.vp_max_size_bytes,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
merged = {**legacy_config, **extra_config}
|
|
|
|
|
|
|
|
|
|
language_hints = _normalize_string_list(merged.get("language_hints"))
|
|
|
|
|
if language_hints is not None:
|
|
|
|
|
merged["language_hints"] = language_hints
|
|
|
|
|
|
|
|
|
|
channel_id = _normalize_int_list(merged.get("channel_id"))
|
|
|
|
|
if channel_id is not None:
|
|
|
|
|
merged["channel_id"] = channel_id
|
|
|
|
|
|
|
|
|
|
resolved_vocabulary_id = vocabulary_id or merged.get("vocabulary_id") or request.asr_vocabulary_id
|
|
|
|
|
if request.audio_scene == "asr" and resolved_vocabulary_id:
|
|
|
|
|
merged["vocabulary_id"] = resolved_vocabulary_id
|
|
|
|
|
|
|
|
|
|
return _clean_extra_config(merged)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _extract_legacy_audio_columns(audio_scene: str, extra_config: dict[str, Any]) -> dict[str, Any]:
|
|
|
|
|
extra_config = _parse_json_object(extra_config)
|
|
|
|
|
columns = {
|
|
|
|
|
"asr_model_name": None,
|
|
|
|
|
"asr_vocabulary_id": None,
|
|
|
|
|
"asr_speaker_count": None,
|
|
|
|
|
"asr_language_hints": None,
|
|
|
|
|
"asr_disfluency_removal_enabled": None,
|
|
|
|
|
"asr_diarization_enabled": None,
|
|
|
|
|
"vp_template_text": None,
|
|
|
|
|
"vp_duration_seconds": None,
|
|
|
|
|
"vp_sample_rate": None,
|
|
|
|
|
"vp_channels": None,
|
|
|
|
|
"vp_max_size_bytes": None,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if audio_scene == "asr":
|
|
|
|
|
language_hints = extra_config.get("language_hints")
|
|
|
|
|
if isinstance(language_hints, list):
|
|
|
|
|
language_hints = ",".join(str(item).strip() for item in language_hints if str(item).strip())
|
|
|
|
|
columns.update(
|
|
|
|
|
{
|
|
|
|
|
"asr_model_name": extra_config.get("model"),
|
|
|
|
|
"asr_vocabulary_id": extra_config.get("vocabulary_id"),
|
|
|
|
|
"asr_speaker_count": extra_config.get("speaker_count"),
|
|
|
|
|
"asr_language_hints": language_hints,
|
|
|
|
|
"asr_disfluency_removal_enabled": 1 if extra_config.get("disfluency_removal_enabled") is True else 0 if extra_config.get("disfluency_removal_enabled") is False else None,
|
|
|
|
|
"asr_diarization_enabled": 1 if extra_config.get("diarization_enabled") is True else 0 if extra_config.get("diarization_enabled") is False else None,
|
|
|
|
|
}
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
columns.update(
|
|
|
|
|
{
|
|
|
|
|
"vp_template_text": extra_config.get("template_text"),
|
|
|
|
|
"vp_duration_seconds": extra_config.get("duration_seconds"),
|
|
|
|
|
"vp_sample_rate": extra_config.get("sample_rate"),
|
|
|
|
|
"vp_channels": extra_config.get("channels"),
|
|
|
|
|
"vp_max_size_bytes": extra_config.get("max_size_bytes"),
|
|
|
|
|
}
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
return columns
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _normalize_audio_row(row: dict[str, Any]) -> dict[str, Any]:
|
|
|
|
|
extra_config = _parse_json_object(row.get("extra_config"))
|
|
|
|
|
|
|
|
|
|
if row.get("audio_scene") == "asr":
|
|
|
|
|
if extra_config.get("model") is None and row.get("asr_model_name") is not None:
|
|
|
|
|
extra_config["model"] = row["asr_model_name"]
|
|
|
|
|
if extra_config.get("vocabulary_id") is None and row.get("asr_vocabulary_id") is not None:
|
|
|
|
|
extra_config["vocabulary_id"] = row["asr_vocabulary_id"]
|
|
|
|
|
if extra_config.get("speaker_count") is None and row.get("asr_speaker_count") is not None:
|
|
|
|
|
extra_config["speaker_count"] = row["asr_speaker_count"]
|
|
|
|
|
if extra_config.get("language_hints") is None and row.get("asr_language_hints"):
|
|
|
|
|
extra_config["language_hints"] = _normalize_string_list(row["asr_language_hints"])
|
|
|
|
|
if extra_config.get("disfluency_removal_enabled") is None and row.get("asr_disfluency_removal_enabled") is not None:
|
|
|
|
|
extra_config["disfluency_removal_enabled"] = bool(row["asr_disfluency_removal_enabled"])
|
|
|
|
|
if extra_config.get("diarization_enabled") is None and row.get("asr_diarization_enabled") is not None:
|
|
|
|
|
extra_config["diarization_enabled"] = bool(row["asr_diarization_enabled"])
|
|
|
|
|
else:
|
|
|
|
|
if extra_config.get("model") is None and row.get("model_name"):
|
|
|
|
|
extra_config["model"] = row["model_name"]
|
|
|
|
|
if extra_config.get("template_text") is None and row.get("vp_template_text") is not None:
|
|
|
|
|
extra_config["template_text"] = row["vp_template_text"]
|
|
|
|
|
if extra_config.get("duration_seconds") is None and row.get("vp_duration_seconds") is not None:
|
|
|
|
|
extra_config["duration_seconds"] = row["vp_duration_seconds"]
|
|
|
|
|
if extra_config.get("sample_rate") is None and row.get("vp_sample_rate") is not None:
|
|
|
|
|
extra_config["sample_rate"] = row["vp_sample_rate"]
|
|
|
|
|
if extra_config.get("channels") is None and row.get("vp_channels") is not None:
|
|
|
|
|
extra_config["channels"] = row["vp_channels"]
|
|
|
|
|
if extra_config.get("max_size_bytes") is None and row.get("vp_max_size_bytes") is not None:
|
|
|
|
|
extra_config["max_size_bytes"] = row["vp_max_size_bytes"]
|
|
|
|
|
|
|
|
|
|
row["extra_config"] = extra_config
|
|
|
|
|
row["service_model_name"] = extra_config.get("model")
|
|
|
|
|
return row
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class ParameterUpsertRequest(BaseModel):
|
|
|
|
|
param_key: str
|
|
|
|
|
param_name: str
|
|
|
|
|
param_value: str
|
|
|
|
|
value_type: str = "string"
|
|
|
|
|
category: str = "system"
|
|
|
|
|
description: str | None = None
|
|
|
|
|
is_active: bool = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class LLMModelUpsertRequest(BaseModel):
|
|
|
|
|
model_code: str
|
|
|
|
|
model_name: str
|
|
|
|
|
provider: str | None = None
|
|
|
|
|
endpoint_url: str | None = None
|
|
|
|
|
api_key: str | None = None
|
|
|
|
|
llm_model_name: str
|
|
|
|
|
llm_timeout: int = 120
|
|
|
|
|
llm_temperature: float = 0.7
|
|
|
|
|
llm_top_p: float = 0.9
|
|
|
|
|
llm_max_tokens: int = 2048
|
|
|
|
|
llm_system_prompt: str | None = None
|
|
|
|
|
description: str | None = None
|
|
|
|
|
is_active: bool = True
|
|
|
|
|
is_default: bool = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class AudioModelUpsertRequest(BaseModel):
|
|
|
|
|
model_code: str
|
|
|
|
|
model_name: str
|
|
|
|
|
audio_scene: str # asr / voiceprint
|
|
|
|
|
provider: str | None = None
|
|
|
|
|
endpoint_url: str | None = None
|
|
|
|
|
api_key: str | None = None
|
|
|
|
|
extra_config: dict[str, Any] | None = None
|
|
|
|
|
asr_model_name: str | None = None
|
|
|
|
|
asr_vocabulary_id: str | None = None
|
|
|
|
|
hot_word_group_id: int | None = None
|
|
|
|
|
asr_speaker_count: int | None = None
|
|
|
|
|
asr_language_hints: str | None = None
|
|
|
|
|
asr_disfluency_removal_enabled: bool | None = None
|
|
|
|
|
asr_diarization_enabled: bool | None = None
|
|
|
|
|
vp_template_text: str | None = None
|
|
|
|
|
vp_duration_seconds: int | None = None
|
|
|
|
|
vp_sample_rate: int | None = None
|
|
|
|
|
vp_channels: int | None = None
|
|
|
|
|
vp_max_size_bytes: int | None = None
|
|
|
|
|
description: str | None = None
|
|
|
|
|
is_active: bool = True
|
|
|
|
|
is_default: bool = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class LLMModelTestRequest(LLMModelUpsertRequest):
|
|
|
|
|
test_prompt: str | None = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class AudioModelTestRequest(AudioModelUpsertRequest):
|
|
|
|
|
test_file_url: str | None = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@router.get("/admin/parameters")
|
|
|
|
|
async def list_parameters(
|
|
|
|
|
category: str | None = Query(None),
|
|
|
|
|
keyword: str | None = Query(None),
|
|
|
|
|
current_user=Depends(get_current_admin_user),
|
|
|
|
|
):
|
|
|
|
|
try:
|
|
|
|
|
with get_db_connection() as conn:
|
|
|
|
|
cursor = conn.cursor(dictionary=True)
|
|
|
|
|
query = """
|
|
|
|
|
SELECT param_id, param_key, param_name, param_value, value_type, category,
|
|
|
|
|
description, is_active, created_at, updated_at
|
|
|
|
|
FROM sys_system_parameters
|
|
|
|
|
WHERE 1=1
|
|
|
|
|
"""
|
|
|
|
|
params = []
|
|
|
|
|
if category:
|
|
|
|
|
query += " AND category = %s"
|
|
|
|
|
params.append(category)
|
|
|
|
|
if keyword:
|
|
|
|
|
like_pattern = f"%{keyword}%"
|
|
|
|
|
query += " AND (param_key LIKE %s OR param_name LIKE %s)"
|
|
|
|
|
params.extend([like_pattern, like_pattern])
|
|
|
|
|
|
|
|
|
|
query += " ORDER BY category ASC, param_key ASC"
|
|
|
|
|
cursor.execute(query, tuple(params))
|
|
|
|
|
rows = cursor.fetchall()
|
|
|
|
|
return create_api_response(
|
|
|
|
|
code="200",
|
|
|
|
|
message="获取参数列表成功",
|
|
|
|
|
data={"items": rows, "total": len(rows)},
|
|
|
|
|
)
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return create_api_response(code="500", message=f"获取参数列表失败: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@router.get("/admin/parameters/{param_key}")
|
|
|
|
|
async def get_parameter(param_key: str, current_user=Depends(get_current_admin_user)):
|
|
|
|
|
try:
|
|
|
|
|
with get_db_connection() as conn:
|
|
|
|
|
cursor = conn.cursor(dictionary=True)
|
|
|
|
|
cursor.execute(
|
|
|
|
|
"""
|
|
|
|
|
SELECT param_id, param_key, param_name, param_value, value_type, category,
|
|
|
|
|
description, is_active, created_at, updated_at
|
|
|
|
|
FROM sys_system_parameters
|
|
|
|
|
WHERE param_key = %s
|
|
|
|
|
LIMIT 1
|
|
|
|
|
""",
|
|
|
|
|
(param_key,),
|
|
|
|
|
)
|
|
|
|
|
row = cursor.fetchone()
|
|
|
|
|
if not row:
|
|
|
|
|
return create_api_response(code="404", message="参数不存在")
|
|
|
|
|
return create_api_response(code="200", message="获取参数成功", data=row)
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return create_api_response(code="500", message=f"获取参数失败: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@router.post("/admin/parameters")
|
|
|
|
|
async def create_parameter(request: ParameterUpsertRequest, current_user=Depends(get_current_admin_user)):
|
|
|
|
|
try:
|
|
|
|
|
with get_db_connection() as conn:
|
|
|
|
|
cursor = conn.cursor(dictionary=True)
|
|
|
|
|
cursor.execute("SELECT param_id FROM sys_system_parameters WHERE param_key = %s", (request.param_key,))
|
|
|
|
|
if cursor.fetchone():
|
|
|
|
|
return create_api_response(code="400", message="参数键已存在")
|
|
|
|
|
|
|
|
|
|
cursor.execute(
|
|
|
|
|
"""
|
|
|
|
|
INSERT INTO sys_system_parameters
|
|
|
|
|
(param_key, param_name, param_value, value_type, category, description, is_active)
|
|
|
|
|
VALUES (%s, %s, %s, %s, %s, %s, %s)
|
|
|
|
|
""",
|
|
|
|
|
(
|
|
|
|
|
request.param_key,
|
|
|
|
|
request.param_name,
|
|
|
|
|
request.param_value,
|
|
|
|
|
request.value_type,
|
|
|
|
|
request.category,
|
|
|
|
|
request.description,
|
|
|
|
|
1 if request.is_active else 0,
|
|
|
|
|
),
|
|
|
|
|
)
|
|
|
|
|
conn.commit()
|
|
|
|
|
return create_api_response(code="200", message="创建参数成功")
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return create_api_response(code="500", message=f"创建参数失败: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@router.put("/admin/parameters/{param_key}")
|
|
|
|
|
async def update_parameter(
|
|
|
|
|
param_key: str,
|
|
|
|
|
request: ParameterUpsertRequest,
|
|
|
|
|
current_user=Depends(get_current_admin_user),
|
|
|
|
|
):
|
|
|
|
|
try:
|
|
|
|
|
with get_db_connection() as conn:
|
|
|
|
|
cursor = conn.cursor(dictionary=True)
|
|
|
|
|
cursor.execute("SELECT param_id FROM sys_system_parameters WHERE param_key = %s", (param_key,))
|
|
|
|
|
existed = cursor.fetchone()
|
|
|
|
|
if not existed:
|
|
|
|
|
return create_api_response(code="404", message="参数不存在")
|
|
|
|
|
|
|
|
|
|
new_key = request.param_key or param_key
|
|
|
|
|
if new_key != param_key:
|
|
|
|
|
cursor.execute("SELECT param_id FROM sys_system_parameters WHERE param_key = %s", (new_key,))
|
|
|
|
|
if cursor.fetchone():
|
|
|
|
|
return create_api_response(code="400", message="新的参数键已存在")
|
|
|
|
|
|
|
|
|
|
cursor.execute(
|
|
|
|
|
"""
|
|
|
|
|
UPDATE sys_system_parameters
|
|
|
|
|
SET param_key = %s, param_name = %s, param_value = %s, value_type = %s,
|
|
|
|
|
category = %s, description = %s, is_active = %s
|
|
|
|
|
WHERE param_key = %s
|
|
|
|
|
""",
|
|
|
|
|
(
|
|
|
|
|
new_key,
|
|
|
|
|
request.param_name,
|
|
|
|
|
request.param_value,
|
|
|
|
|
request.value_type,
|
|
|
|
|
request.category,
|
|
|
|
|
request.description,
|
|
|
|
|
1 if request.is_active else 0,
|
|
|
|
|
param_key,
|
|
|
|
|
),
|
|
|
|
|
)
|
|
|
|
|
conn.commit()
|
|
|
|
|
return create_api_response(code="200", message="更新参数成功")
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return create_api_response(code="500", message=f"更新参数失败: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@router.delete("/admin/parameters/{param_key}")
|
|
|
|
|
async def delete_parameter(param_key: str, current_user=Depends(get_current_admin_user)):
|
|
|
|
|
try:
|
|
|
|
|
with get_db_connection() as conn:
|
|
|
|
|
cursor = conn.cursor(dictionary=True)
|
|
|
|
|
cursor.execute("SELECT param_id FROM sys_system_parameters WHERE param_key = %s", (param_key,))
|
|
|
|
|
existed = cursor.fetchone()
|
|
|
|
|
if not existed:
|
|
|
|
|
return create_api_response(code="404", message="参数不存在")
|
|
|
|
|
|
|
|
|
|
cursor.execute("DELETE FROM sys_system_parameters WHERE param_key = %s", (param_key,))
|
|
|
|
|
conn.commit()
|
|
|
|
|
return create_api_response(code="200", message="删除参数成功")
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return create_api_response(code="500", message=f"删除参数失败: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@router.get("/admin/model-configs/llm")
|
|
|
|
|
async def list_llm_model_configs(current_user=Depends(get_current_admin_user)):
|
|
|
|
|
try:
|
|
|
|
|
with get_db_connection() as conn:
|
|
|
|
|
cursor = conn.cursor(dictionary=True)
|
|
|
|
|
cursor.execute(
|
|
|
|
|
"""
|
|
|
|
|
SELECT config_id, model_code, model_name, provider, endpoint_url, api_key,
|
|
|
|
|
llm_model_name, llm_timeout, llm_temperature, llm_top_p, llm_max_tokens,
|
|
|
|
|
llm_system_prompt, description, is_active, is_default, created_at, updated_at
|
|
|
|
|
FROM llm_model_config
|
|
|
|
|
ORDER BY model_code ASC
|
|
|
|
|
"""
|
|
|
|
|
)
|
|
|
|
|
rows = cursor.fetchall()
|
|
|
|
|
return create_api_response(
|
|
|
|
|
code="200",
|
|
|
|
|
message="获取LLM模型配置成功",
|
|
|
|
|
data={"items": rows, "total": len(rows)},
|
|
|
|
|
)
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return create_api_response(code="500", message=f"获取LLM模型配置失败: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@router.post("/admin/model-configs/llm")
|
|
|
|
|
async def create_llm_model_config(request: LLMModelUpsertRequest, current_user=Depends(get_current_admin_user)):
|
|
|
|
|
try:
|
|
|
|
|
with get_db_connection() as conn:
|
|
|
|
|
cursor = conn.cursor(dictionary=True)
|
|
|
|
|
cursor.execute("SELECT config_id FROM llm_model_config WHERE model_code = %s", (request.model_code,))
|
|
|
|
|
if cursor.fetchone():
|
|
|
|
|
return create_api_response(code="400", message="模型编码已存在")
|
|
|
|
|
|
|
|
|
|
cursor.execute("SELECT COUNT(*) AS total FROM llm_model_config")
|
|
|
|
|
total_row = cursor.fetchone() or {"total": 0}
|
|
|
|
|
is_default = bool(request.is_default) or total_row["total"] == 0
|
|
|
|
|
if is_default:
|
|
|
|
|
cursor.execute("UPDATE llm_model_config SET is_default = 0 WHERE is_default = 1")
|
|
|
|
|
|
|
|
|
|
cursor.execute(
|
|
|
|
|
"""
|
|
|
|
|
INSERT INTO llm_model_config
|
|
|
|
|
(model_code, model_name, provider, endpoint_url, api_key, llm_model_name,
|
|
|
|
|
llm_timeout, llm_temperature, llm_top_p, llm_max_tokens, llm_system_prompt,
|
|
|
|
|
description, is_active, is_default)
|
|
|
|
|
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
|
|
|
|
|
""",
|
|
|
|
|
(
|
|
|
|
|
request.model_code,
|
|
|
|
|
request.model_name,
|
|
|
|
|
request.provider,
|
|
|
|
|
request.endpoint_url,
|
|
|
|
|
request.api_key,
|
|
|
|
|
request.llm_model_name,
|
|
|
|
|
request.llm_timeout,
|
|
|
|
|
request.llm_temperature,
|
|
|
|
|
request.llm_top_p,
|
|
|
|
|
request.llm_max_tokens,
|
|
|
|
|
request.llm_system_prompt,
|
|
|
|
|
request.description,
|
|
|
|
|
1 if request.is_active else 0,
|
|
|
|
|
1 if is_default else 0,
|
|
|
|
|
),
|
|
|
|
|
)
|
|
|
|
|
conn.commit()
|
|
|
|
|
return create_api_response(code="200", message="创建LLM模型配置成功")
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return create_api_response(code="500", message=f"创建LLM模型配置失败: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@router.put("/admin/model-configs/llm/{model_code}")
|
|
|
|
|
async def update_llm_model_config(
|
|
|
|
|
model_code: str,
|
|
|
|
|
request: LLMModelUpsertRequest,
|
|
|
|
|
current_user=Depends(get_current_admin_user),
|
|
|
|
|
):
|
|
|
|
|
try:
|
|
|
|
|
with get_db_connection() as conn:
|
|
|
|
|
cursor = conn.cursor(dictionary=True)
|
|
|
|
|
cursor.execute("SELECT config_id FROM llm_model_config WHERE model_code = %s", (model_code,))
|
|
|
|
|
existed = cursor.fetchone()
|
|
|
|
|
if not existed:
|
|
|
|
|
return create_api_response(code="404", message="模型配置不存在")
|
|
|
|
|
|
|
|
|
|
new_model_code = request.model_code or model_code
|
|
|
|
|
if new_model_code != model_code:
|
|
|
|
|
cursor.execute("SELECT config_id FROM llm_model_config WHERE model_code = %s", (new_model_code,))
|
|
|
|
|
duplicate_row = cursor.fetchone()
|
|
|
|
|
if duplicate_row and duplicate_row["config_id"] != existed["config_id"]:
|
|
|
|
|
return create_api_response(code="400", message="新的模型编码已存在")
|
|
|
|
|
|
|
|
|
|
if request.is_default:
|
|
|
|
|
cursor.execute("UPDATE llm_model_config SET is_default = 0 WHERE model_code <> %s AND is_default = 1", (model_code,))
|
|
|
|
|
|
|
|
|
|
cursor.execute(
|
|
|
|
|
"""
|
|
|
|
|
UPDATE llm_model_config
|
|
|
|
|
SET model_code = %s, model_name = %s, provider = %s, endpoint_url = %s, api_key = %s,
|
|
|
|
|
llm_model_name = %s, llm_timeout = %s, llm_temperature = %s, llm_top_p = %s,
|
|
|
|
|
llm_max_tokens = %s, llm_system_prompt = %s, description = %s, is_active = %s, is_default = %s
|
|
|
|
|
WHERE model_code = %s
|
|
|
|
|
""",
|
|
|
|
|
(
|
|
|
|
|
new_model_code,
|
|
|
|
|
request.model_name,
|
|
|
|
|
request.provider,
|
|
|
|
|
request.endpoint_url,
|
|
|
|
|
request.api_key,
|
|
|
|
|
request.llm_model_name,
|
|
|
|
|
request.llm_timeout,
|
|
|
|
|
request.llm_temperature,
|
|
|
|
|
request.llm_top_p,
|
|
|
|
|
request.llm_max_tokens,
|
|
|
|
|
request.llm_system_prompt,
|
|
|
|
|
request.description,
|
|
|
|
|
1 if request.is_active else 0,
|
|
|
|
|
1 if request.is_default else 0,
|
|
|
|
|
model_code,
|
|
|
|
|
),
|
|
|
|
|
)
|
|
|
|
|
conn.commit()
|
|
|
|
|
return create_api_response(code="200", message="更新LLM模型配置成功")
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return create_api_response(code="500", message=f"更新LLM模型配置失败: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@router.get("/admin/model-configs/audio")
|
|
|
|
|
async def list_audio_model_configs(
|
|
|
|
|
scene: str = Query("all"),
|
|
|
|
|
current_user=Depends(get_current_admin_user),
|
|
|
|
|
):
|
|
|
|
|
try:
|
|
|
|
|
with get_db_connection() as conn:
|
|
|
|
|
cursor = conn.cursor(dictionary=True)
|
|
|
|
|
sql = """
|
|
|
|
|
SELECT a.config_id, a.model_code, a.model_name, a.audio_scene, a.provider, a.endpoint_url, a.api_key,
|
|
|
|
|
a.asr_model_name, a.asr_vocabulary_id, a.hot_word_group_id, a.asr_speaker_count, a.asr_language_hints,
|
|
|
|
|
a.asr_disfluency_removal_enabled, a.asr_diarization_enabled,
|
|
|
|
|
a.vp_template_text, a.vp_duration_seconds, a.vp_sample_rate, a.vp_channels, a.vp_max_size_bytes,
|
|
|
|
|
a.extra_config, a.description, a.is_active, a.is_default, a.created_at, a.updated_at,
|
|
|
|
|
g.name AS hot_word_group_name, g.vocabulary_id AS hot_word_group_vocab_id
|
|
|
|
|
FROM audio_model_config a
|
|
|
|
|
LEFT JOIN hot_word_group g ON g.id = a.hot_word_group_id
|
|
|
|
|
"""
|
|
|
|
|
params = []
|
|
|
|
|
if scene in ("asr", "voiceprint"):
|
|
|
|
|
sql += " WHERE a.audio_scene = %s"
|
|
|
|
|
params.append(scene)
|
|
|
|
|
sql += " ORDER BY a.audio_scene ASC, a.model_code ASC"
|
|
|
|
|
cursor.execute(sql, tuple(params))
|
|
|
|
|
rows = [_normalize_audio_row(row) for row in cursor.fetchall()]
|
|
|
|
|
return create_api_response(code="200", message="获取音频模型配置成功", data={"items": rows, "total": len(rows)})
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return create_api_response(code="500", message=f"获取音频模型配置失败: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@router.post("/admin/model-configs/audio")
|
|
|
|
|
async def create_audio_model_config(request: AudioModelUpsertRequest, current_user=Depends(get_current_admin_user)):
|
|
|
|
|
try:
|
|
|
|
|
if request.audio_scene not in ("asr", "voiceprint"):
|
|
|
|
|
return create_api_response(code="400", message="audio_scene 仅支持 asr 或 voiceprint")
|
|
|
|
|
with get_db_connection() as conn:
|
|
|
|
|
cursor = conn.cursor(dictionary=True)
|
|
|
|
|
cursor.execute("SELECT config_id FROM audio_model_config WHERE model_code = %s", (request.model_code,))
|
|
|
|
|
if cursor.fetchone():
|
|
|
|
|
return create_api_response(code="400", message="模型编码已存在")
|
|
|
|
|
|
|
|
|
|
cursor.execute("SELECT COUNT(*) AS total FROM audio_model_config WHERE audio_scene = %s", (request.audio_scene,))
|
|
|
|
|
total_row = cursor.fetchone() or {"total": 0}
|
|
|
|
|
is_default = bool(request.is_default) or total_row["total"] == 0
|
|
|
|
|
if is_default:
|
|
|
|
|
cursor.execute("UPDATE audio_model_config SET is_default = 0 WHERE audio_scene = %s AND is_default = 1", (request.audio_scene,))
|
|
|
|
|
|
|
|
|
|
# 如果指定了热词组,从组中获取 vocabulary_id
|
|
|
|
|
asr_vocabulary_id = request.asr_vocabulary_id
|
|
|
|
|
if request.hot_word_group_id:
|
|
|
|
|
cursor.execute("SELECT vocabulary_id FROM hot_word_group WHERE id = %s", (request.hot_word_group_id,))
|
|
|
|
|
group_row = cursor.fetchone()
|
|
|
|
|
if group_row and group_row.get("vocabulary_id"):
|
|
|
|
|
asr_vocabulary_id = group_row["vocabulary_id"]
|
|
|
|
|
extra_config = _merge_audio_extra_config(request, vocabulary_id=asr_vocabulary_id)
|
|
|
|
|
legacy_columns = _extract_legacy_audio_columns(request.audio_scene, extra_config)
|
|
|
|
|
|
|
|
|
|
cursor.execute(
|
|
|
|
|
"""
|
|
|
|
|
INSERT INTO audio_model_config
|
|
|
|
|
(model_code, model_name, audio_scene, provider, endpoint_url, api_key,
|
|
|
|
|
asr_model_name, asr_vocabulary_id, hot_word_group_id, asr_speaker_count, asr_language_hints,
|
|
|
|
|
asr_disfluency_removal_enabled, asr_diarization_enabled,
|
|
|
|
|
vp_template_text, vp_duration_seconds, vp_sample_rate, vp_channels, vp_max_size_bytes,
|
|
|
|
|
extra_config, description, is_active, is_default)
|
|
|
|
|
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
|
|
|
|
|
""",
|
|
|
|
|
(
|
|
|
|
|
request.model_code,
|
|
|
|
|
request.model_name,
|
|
|
|
|
request.audio_scene,
|
|
|
|
|
request.provider,
|
|
|
|
|
request.endpoint_url,
|
|
|
|
|
request.api_key,
|
|
|
|
|
legacy_columns["asr_model_name"],
|
|
|
|
|
legacy_columns["asr_vocabulary_id"],
|
|
|
|
|
request.hot_word_group_id,
|
|
|
|
|
legacy_columns["asr_speaker_count"],
|
|
|
|
|
legacy_columns["asr_language_hints"],
|
|
|
|
|
legacy_columns["asr_disfluency_removal_enabled"],
|
|
|
|
|
legacy_columns["asr_diarization_enabled"],
|
|
|
|
|
legacy_columns["vp_template_text"],
|
|
|
|
|
legacy_columns["vp_duration_seconds"],
|
|
|
|
|
legacy_columns["vp_sample_rate"],
|
|
|
|
|
legacy_columns["vp_channels"],
|
|
|
|
|
legacy_columns["vp_max_size_bytes"],
|
|
|
|
|
json.dumps(extra_config, ensure_ascii=False),
|
|
|
|
|
request.description,
|
|
|
|
|
1 if request.is_active else 0,
|
|
|
|
|
1 if is_default else 0,
|
|
|
|
|
),
|
|
|
|
|
)
|
|
|
|
|
conn.commit()
|
|
|
|
|
return create_api_response(code="200", message="创建音频模型配置成功")
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return create_api_response(code="500", message=f"创建音频模型配置失败: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@router.put("/admin/model-configs/audio/{model_code}")
|
|
|
|
|
async def update_audio_model_config(
|
|
|
|
|
model_code: str,
|
|
|
|
|
request: AudioModelUpsertRequest,
|
|
|
|
|
current_user=Depends(get_current_admin_user),
|
|
|
|
|
):
|
|
|
|
|
try:
|
|
|
|
|
if request.audio_scene not in ("asr", "voiceprint"):
|
|
|
|
|
return create_api_response(code="400", message="audio_scene 仅支持 asr 或 voiceprint")
|
|
|
|
|
with get_db_connection() as conn:
|
|
|
|
|
cursor = conn.cursor(dictionary=True)
|
|
|
|
|
cursor.execute("SELECT config_id FROM audio_model_config WHERE model_code = %s", (model_code,))
|
|
|
|
|
existed = cursor.fetchone()
|
|
|
|
|
if not existed:
|
|
|
|
|
return create_api_response(code="404", message="模型配置不存在")
|
|
|
|
|
|
|
|
|
|
new_model_code = request.model_code or model_code
|
|
|
|
|
if new_model_code != model_code:
|
|
|
|
|
cursor.execute("SELECT config_id FROM audio_model_config WHERE model_code = %s", (new_model_code,))
|
|
|
|
|
duplicate_row = cursor.fetchone()
|
|
|
|
|
if duplicate_row and duplicate_row["config_id"] != existed["config_id"]:
|
|
|
|
|
return create_api_response(code="400", message="新的模型编码已存在")
|
|
|
|
|
|
|
|
|
|
if request.is_default:
|
|
|
|
|
cursor.execute(
|
|
|
|
|
"UPDATE audio_model_config SET is_default = 0 WHERE audio_scene = %s AND model_code <> %s AND is_default = 1",
|
|
|
|
|
(request.audio_scene, model_code),
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# 如果指定了热词组,从组中获取 vocabulary_id
|
|
|
|
|
asr_vocabulary_id = request.asr_vocabulary_id
|
|
|
|
|
if request.hot_word_group_id:
|
|
|
|
|
cursor.execute("SELECT vocabulary_id FROM hot_word_group WHERE id = %s", (request.hot_word_group_id,))
|
|
|
|
|
group_row = cursor.fetchone()
|
|
|
|
|
if group_row and group_row.get("vocabulary_id"):
|
|
|
|
|
asr_vocabulary_id = group_row["vocabulary_id"]
|
|
|
|
|
extra_config = _merge_audio_extra_config(request, vocabulary_id=asr_vocabulary_id)
|
|
|
|
|
legacy_columns = _extract_legacy_audio_columns(request.audio_scene, extra_config)
|
|
|
|
|
|
|
|
|
|
cursor.execute(
|
|
|
|
|
"""
|
|
|
|
|
UPDATE audio_model_config
|
|
|
|
|
SET model_code = %s, model_name = %s, audio_scene = %s, provider = %s, endpoint_url = %s, api_key = %s,
|
|
|
|
|
asr_model_name = %s, asr_vocabulary_id = %s, hot_word_group_id = %s, asr_speaker_count = %s, asr_language_hints = %s,
|
|
|
|
|
asr_disfluency_removal_enabled = %s, asr_diarization_enabled = %s,
|
|
|
|
|
vp_template_text = %s, vp_duration_seconds = %s, vp_sample_rate = %s, vp_channels = %s, vp_max_size_bytes = %s,
|
|
|
|
|
extra_config = %s, description = %s, is_active = %s, is_default = %s
|
|
|
|
|
WHERE model_code = %s
|
|
|
|
|
""",
|
|
|
|
|
(
|
|
|
|
|
new_model_code,
|
|
|
|
|
request.model_name,
|
|
|
|
|
request.audio_scene,
|
|
|
|
|
request.provider,
|
|
|
|
|
request.endpoint_url,
|
|
|
|
|
request.api_key,
|
|
|
|
|
legacy_columns["asr_model_name"],
|
|
|
|
|
legacy_columns["asr_vocabulary_id"],
|
|
|
|
|
request.hot_word_group_id,
|
|
|
|
|
legacy_columns["asr_speaker_count"],
|
|
|
|
|
legacy_columns["asr_language_hints"],
|
|
|
|
|
legacy_columns["asr_disfluency_removal_enabled"],
|
|
|
|
|
legacy_columns["asr_diarization_enabled"],
|
|
|
|
|
legacy_columns["vp_template_text"],
|
|
|
|
|
legacy_columns["vp_duration_seconds"],
|
|
|
|
|
legacy_columns["vp_sample_rate"],
|
|
|
|
|
legacy_columns["vp_channels"],
|
|
|
|
|
legacy_columns["vp_max_size_bytes"],
|
|
|
|
|
json.dumps(extra_config, ensure_ascii=False),
|
|
|
|
|
request.description,
|
|
|
|
|
1 if request.is_active else 0,
|
|
|
|
|
1 if request.is_default else 0,
|
|
|
|
|
model_code,
|
|
|
|
|
),
|
|
|
|
|
)
|
|
|
|
|
conn.commit()
|
|
|
|
|
return create_api_response(code="200", message="更新音频模型配置成功")
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return create_api_response(code="500", message=f"更新音频模型配置失败: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@router.delete("/admin/model-configs/llm/{model_code}")
|
|
|
|
|
async def delete_llm_model_config(model_code: str, current_user=Depends(get_current_admin_user)):
|
|
|
|
|
try:
|
|
|
|
|
with get_db_connection() as conn:
|
|
|
|
|
cursor = conn.cursor(dictionary=True)
|
|
|
|
|
cursor.execute("SELECT config_id FROM llm_model_config WHERE model_code = %s", (model_code,))
|
|
|
|
|
if not cursor.fetchone():
|
|
|
|
|
return create_api_response(code="404", message="模型配置不存在")
|
|
|
|
|
|
|
|
|
|
cursor.execute("DELETE FROM llm_model_config WHERE model_code = %s", (model_code,))
|
|
|
|
|
conn.commit()
|
|
|
|
|
return create_api_response(code="200", message="删除LLM模型配置成功")
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return create_api_response(code="500", message=f"删除LLM模型配置失败: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@router.delete("/admin/model-configs/audio/{model_code}")
|
|
|
|
|
async def delete_audio_model_config(model_code: str, current_user=Depends(get_current_admin_user)):
|
|
|
|
|
try:
|
|
|
|
|
with get_db_connection() as conn:
|
|
|
|
|
cursor = conn.cursor(dictionary=True)
|
|
|
|
|
cursor.execute("SELECT config_id FROM audio_model_config WHERE model_code = %s", (model_code,))
|
|
|
|
|
if not cursor.fetchone():
|
|
|
|
|
return create_api_response(code="404", message="模型配置不存在")
|
|
|
|
|
|
|
|
|
|
cursor.execute("DELETE FROM audio_model_config WHERE model_code = %s", (model_code,))
|
|
|
|
|
conn.commit()
|
|
|
|
|
return create_api_response(code="200", message="删除音频模型配置成功")
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return create_api_response(code="500", message=f"删除音频模型配置失败: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@router.post("/admin/model-configs/llm/test")
|
|
|
|
|
async def test_llm_model_config(request: LLMModelTestRequest, current_user=Depends(get_current_admin_user)):
|
|
|
|
|
try:
|
|
|
|
|
payload = request.model_dump() if hasattr(request, "model_dump") else request.dict()
|
|
|
|
|
result = llm_service.test_model(payload, prompt=request.test_prompt)
|
|
|
|
|
return create_api_response(code="200", message="LLM模型测试成功", data=result)
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return create_api_response(code="500", message=f"LLM模型测试失败: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@router.post("/admin/model-configs/audio/test")
|
|
|
|
|
async def test_audio_model_config(request: AudioModelTestRequest, current_user=Depends(get_current_admin_user)):
|
|
|
|
|
try:
|
|
|
|
|
if request.audio_scene != "asr":
|
|
|
|
|
return create_api_response(code="400", message="当前仅支持音频识别(ASR)测试")
|
|
|
|
|
|
|
|
|
|
vocabulary_id = request.asr_vocabulary_id
|
|
|
|
|
if request.hot_word_group_id:
|
|
|
|
|
with get_db_connection() as conn:
|
|
|
|
|
cursor = conn.cursor(dictionary=True)
|
|
|
|
|
cursor.execute("SELECT vocabulary_id FROM hot_word_group WHERE id = %s", (request.hot_word_group_id,))
|
|
|
|
|
group_row = cursor.fetchone()
|
|
|
|
|
cursor.close()
|
|
|
|
|
if group_row and group_row.get("vocabulary_id"):
|
|
|
|
|
vocabulary_id = group_row["vocabulary_id"]
|
|
|
|
|
|
|
|
|
|
extra_config = _merge_audio_extra_config(request, vocabulary_id=vocabulary_id)
|
|
|
|
|
runtime_config = {
|
|
|
|
|
"provider": request.provider,
|
|
|
|
|
"endpoint_url": request.endpoint_url,
|
|
|
|
|
"api_key": request.api_key,
|
|
|
|
|
"audio_scene": request.audio_scene,
|
|
|
|
|
"hot_word_group_id": request.hot_word_group_id,
|
|
|
|
|
**extra_config,
|
|
|
|
|
}
|
|
|
|
|
result = transcription_service.test_asr_model(runtime_config, test_file_url=request.test_file_url)
|
|
|
|
|
return create_api_response(code="200", message="音频模型测试任务已提交", data=result)
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return create_api_response(code="500", message=f"音频模型测试失败: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
2026-04-01 08:36:52 +00:00
|
|
|
@router.get("/system-config/public")
|
|
|
|
|
async def get_public_system_config():
|
|
|
|
|
try:
|
|
|
|
|
return create_api_response(
|
|
|
|
|
code="200",
|
|
|
|
|
message="获取公开配置成功",
|
|
|
|
|
data=SystemConfigService.get_branding_config()
|
|
|
|
|
)
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return create_api_response(code="500", message=f"获取公开配置失败: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
2026-03-26 06:55:12 +00:00
|
|
|
@router.get("/admin/system-config")
|
|
|
|
|
async def get_system_config_compat(current_user=Depends(get_current_admin_user)):
|
|
|
|
|
"""兼容旧前端的系统配置接口,数据来源为 sys_system_parameters。"""
|
|
|
|
|
try:
|
|
|
|
|
with get_db_connection() as conn:
|
|
|
|
|
cursor = conn.cursor(dictionary=True)
|
|
|
|
|
cursor.execute(
|
|
|
|
|
"""
|
|
|
|
|
SELECT param_key, param_value
|
|
|
|
|
FROM sys_system_parameters
|
|
|
|
|
WHERE is_active = 1
|
|
|
|
|
"""
|
|
|
|
|
)
|
|
|
|
|
rows = cursor.fetchall()
|
|
|
|
|
data = {row["param_key"]: row["param_value"] for row in rows}
|
|
|
|
|
|
|
|
|
|
# 兼容旧字段
|
|
|
|
|
if "max_audio_size" in data:
|
|
|
|
|
try:
|
|
|
|
|
data["MAX_FILE_SIZE"] = int(data["max_audio_size"]) * 1024 * 1024
|
|
|
|
|
except Exception:
|
|
|
|
|
data["MAX_FILE_SIZE"] = 100 * 1024 * 1024
|
|
|
|
|
if "max_image_size" in data:
|
|
|
|
|
try:
|
|
|
|
|
data["MAX_IMAGE_SIZE"] = int(data["max_image_size"]) * 1024 * 1024
|
|
|
|
|
except Exception:
|
|
|
|
|
data["MAX_IMAGE_SIZE"] = 10 * 1024 * 1024
|
|
|
|
|
else:
|
|
|
|
|
data.setdefault("MAX_IMAGE_SIZE", 10 * 1024 * 1024)
|
|
|
|
|
|
|
|
|
|
return create_api_response(code="200", message="获取系统配置成功", data=data)
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return create_api_response(code="500", message=f"获取系统配置失败: {str(e)}")
|