codex/dev
mula.liu 2026-04-13 09:03:23 +08:00
parent 2c505514a5
commit e71bd889b1
16 changed files with 1774 additions and 9337 deletions

View File

@ -24,7 +24,3 @@ BASE_URL=https://imeeting.unisspace.com
# 前端API地址通过Nginx代理访问后端
VITE_API_BASE_URL=/api
# ==================== LLM配置 ====================
# 通义千问API密钥请替换为实际密钥
QWEN_API_KEY=sk-c2bf06ea56b4491ea3d1e37fdb472b8f

View File

@ -15,7 +15,7 @@ REDIS_PASSWORD=Unis@123
# ==================== API配置 ====================
API_HOST=0.0.0.0
API_PORT=8001
API_PORT=8000
# ==================== 应用配置 ====================
# 应用访问地址(用于生成外部链接、二维码等)
@ -23,10 +23,6 @@ API_PORT=8001
# 生产环境: https://your-domain.com
BASE_URL=http://imeeting.unisspace.com
# ==================== LLM配置 ====================
# 通义千问API密钥请替换为实际密钥
QWEN_API_KEY=sk-c2bf06ea56b4491ea3d1e37fdb472b8f
# ==================== 转录轮询配置 ====================
TRANSCRIPTION_POLL_INTERVAL=10

View File

@ -24,6 +24,7 @@ from app.api.endpoints import (
)
from app.core.config import UPLOAD_DIR
from app.core.middleware import TerminalCheckMiddleware
from app.services.system_config_service import SystemConfigService
def create_app() -> FastAPI:
@ -87,4 +88,6 @@ def create_app() -> FastAPI:
"version": "1.1.0",
}
SystemConfigService.ensure_builtin_parameters()
return app

View File

@ -426,11 +426,15 @@ def _retry_summary_task(task_id: str):
prompt_id = _parse_optional_int(task_data.get("prompt_id"))
user_prompt = "" if task_data.get("user_prompt") in (None, "None") else str(task_data.get("user_prompt"))
model_code = "" if task_data.get("model_code") in (None, "None") else str(task_data.get("model_code"))
if not model_code:
redis_task_data = async_meeting_service.redis_client.hgetall(f"llm_task:{task_id}") or {}
model_code = redis_task_data.get("model_code") or ""
new_task_id, _ = async_meeting_service.enqueue_summary_generation(
meeting_id,
user_prompt=user_prompt,
prompt_id=prompt_id,
model_code=None,
model_code=model_code or None,
)
return create_api_response(
code="200",

View File

@ -12,6 +12,28 @@ llm_service = LLMService()
transcription_service = AsyncTranscriptionService()
def _validate_parameter_request(request):
param_key = str(request.param_key or "").strip()
if not param_key:
return "参数键不能为空"
if param_key == SystemConfigService.TOKEN_EXPIRE_DAYS:
if request.category != "system":
return "token_expire_days 必须归类为 system"
if request.value_type != "number":
return "token_expire_days 的值类型必须为 number"
try:
expire_days = int(str(request.param_value).strip())
except (TypeError, ValueError):
return "token_expire_days 必须为正整数"
if expire_days <= 0:
return "token_expire_days 必须大于 0"
if expire_days > 365:
return "token_expire_days 不能超过 365 天"
return None
def _parse_json_object(value: Any) -> dict[str, Any]:
if value is None:
return {}
@ -131,6 +153,7 @@ def _resolve_hot_word_vocabulary_id(cursor, request) -> str | None:
def list_parameters(category: str | None = None, keyword: str | None = None):
try:
SystemConfigService.ensure_builtin_parameters()
with get_db_connection() as conn:
cursor = conn.cursor(dictionary=True)
query = """
@ -162,6 +185,7 @@ def list_parameters(category: str | None = None, keyword: str | None = None):
def get_parameter(param_key: str):
try:
SystemConfigService.ensure_builtin_parameters()
with get_db_connection() as conn:
cursor = conn.cursor(dictionary=True)
cursor.execute(
@ -184,6 +208,10 @@ def get_parameter(param_key: str):
def create_parameter(request):
try:
validation_error = _validate_parameter_request(request)
if validation_error:
return create_api_response(code="400", message=validation_error)
with get_db_connection() as conn:
cursor = conn.cursor(dictionary=True)
cursor.execute("SELECT param_id FROM sys_system_parameters WHERE param_key = %s", (request.param_key,))
@ -215,6 +243,10 @@ def create_parameter(request):
def update_parameter(param_key: str, request):
try:
validation_error = _validate_parameter_request(request)
if validation_error:
return create_api_response(code="400", message=validation_error)
with get_db_connection() as conn:
cursor = conn.cursor(dictionary=True)
cursor.execute("SELECT param_id FROM sys_system_parameters WHERE param_key = %s", (param_key,))

View File

@ -8,7 +8,7 @@ from typing import Optional, Dict, Any, List
import redis
from app.core.database import get_db_connection
from app.services.llm_service import LLMService
from app.services.llm_service import LLMService, LLMServiceError
class AsyncKnowledgeBaseService:
"""异步知识库服务类 - 处理知识库相关的异步任务"""
@ -96,9 +96,7 @@ class AsyncKnowledgeBaseService:
# 4. 调用LLM API
self._update_task_status_in_redis(task_id, 'processing', 50, message="AI正在生成知识库...")
generated_content = self.llm_service._call_llm_api(full_prompt)
if not generated_content:
raise Exception("LLM API调用失败或返回空内容")
generated_content = self.llm_service.call_llm_api_or_raise(full_prompt)
# 5. 保存结果到数据库
self._update_task_status_in_redis(task_id, 'processing', 95, message="保存结果...")
@ -110,6 +108,11 @@ class AsyncKnowledgeBaseService:
print(f"Task {task_id} completed successfully")
except LLMServiceError as e:
error_msg = e.message or str(e)
print(f"Task {task_id} failed with LLM error: {error_msg}")
self._update_task_in_db(task_id, 'failed', 0, error_message=error_msg)
self._update_task_status_in_redis(task_id, 'failed', 0, error_message=error_msg)
except Exception as e:
error_msg = str(e)
print(f"Task {task_id} failed: {error_msg}")

View File

@ -4,7 +4,6 @@
"""
import uuid
import time
import os
import re
from datetime import datetime
from typing import Optional, Dict, Any, List
@ -15,7 +14,7 @@ from app.core.config import REDIS_CONFIG, TRANSCRIPTION_POLL_CONFIG, BACKGROUND_
from app.core.database import get_db_connection
from app.services.async_transcription_service import AsyncTranscriptionService
from app.services.background_task_runner import KeyedBackgroundTaskRunner
from app.services.llm_service import LLMService
from app.services.llm_service import LLMService, LLMServiceError
summary_task_runner = KeyedBackgroundTaskRunner(
@ -88,7 +87,7 @@ class AsyncMeetingService:
task_id = str(uuid.uuid4())
# 在数据库中创建任务记录
self._save_task_to_db(task_id, meeting_id, user_prompt, prompt_id)
self._save_task_to_db(task_id, meeting_id, user_prompt, prompt_id, model_code)
# 将任务详情存入Redis用于快速查询状态
current_time = datetime.now().isoformat()
@ -110,7 +109,7 @@ class AsyncMeetingService:
except Exception as e:
print(f"Error starting summary generation: {e}")
raise e
raise
def _process_task(self, task_id: str):
"""
@ -145,18 +144,13 @@ class AsyncMeetingService:
if not transcript_text:
raise Exception("无法获取会议转录内容")
# 3. 构建提示词
# 3. 构建消息
self._update_task_status_in_redis(task_id, 'processing', 40, message="准备AI提示词...")
full_prompt = self._build_prompt(meeting_id, transcript_text, user_prompt, prompt_id)
messages = self._build_messages(meeting_id, transcript_text, user_prompt, prompt_id)
# 4. 调用LLM API支持指定模型
self._update_task_status_in_redis(task_id, 'processing', 50, message="AI正在分析会议内容...")
if model_code:
summary_content = self._call_llm_with_model(full_prompt, model_code)
else:
summary_content = self.llm_service._call_llm_api(full_prompt)
if not summary_content:
raise Exception("LLM API调用失败或返回空内容")
summary_content = self.llm_service.call_llm_api_messages_or_raise(messages, model_code=model_code)
# 5. 保存结果到主表
self._update_task_status_in_redis(task_id, 'processing', 90, message="保存总结结果...")
@ -165,18 +159,22 @@ class AsyncMeetingService:
# 6. 导出MD文件到音频同目录
self._update_task_status_in_redis(task_id, 'processing', 95, message="导出Markdown文件...")
md_path = self._export_summary_md(meeting_id, summary_content, task_id=task_id)
if not md_path:
raise RuntimeError("导出Markdown文件失败未生成文件路径")
# 7. 任务完成result保存MD文件路径
self._update_task_in_db(task_id, 'completed', 100, result=md_path)
self._update_task_status_in_redis(task_id, 'completed', 100, result=md_path)
self._update_task_status_in_redis(task_id, 'completed', 100, message="任务已完成", result=md_path)
print(f"Task {task_id} completed successfully")
except LLMServiceError as e:
error_msg = e.message or str(e)
print(f"Task {task_id} failed with LLM error: {error_msg}")
self._mark_task_failed(task_id, error_msg)
except Exception as e:
error_msg = str(e)
print(f"Task {task_id} failed: {error_msg}")
# 更新失败状态
self._update_task_in_db(task_id, 'failed', 0, error_message=error_msg)
self._update_task_status_in_redis(task_id, 'failed', 0, error_message=error_msg)
self._mark_task_failed(task_id, error_msg)
finally:
self._release_lock(lock_key, lock_token)
@ -294,50 +292,7 @@ class AsyncMeetingService:
# --- 会议相关方法 ---
def _call_llm_with_model(self, prompt: str, model_code: str) -> Optional[str]:
"""使用指定模型编码调用LLM API"""
import requests
try:
with get_db_connection() as connection:
cursor = connection.cursor(dictionary=True)
cursor.execute(
"SELECT endpoint_url, api_key, llm_model_name, llm_timeout, llm_temperature, llm_top_p, llm_max_tokens FROM llm_model_config WHERE model_code = %s AND is_active = 1",
(model_code,)
)
config = cursor.fetchone()
if not config:
print(f"模型 {model_code} 未找到或未激活,回退到默认模型")
return self.llm_service._call_llm_api(prompt)
endpoint_url = (config['endpoint_url'] or '').rstrip('/')
if not endpoint_url.endswith('/chat/completions'):
endpoint_url = f"{endpoint_url}/chat/completions"
headers = {"Content-Type": "application/json"}
if config['api_key']:
headers["Authorization"] = f"Bearer {config['api_key']}"
payload = {
"model": config['llm_model_name'],
"messages": [{"role": "user", "content": prompt}],
"temperature": float(config.get('llm_temperature', 0.7)),
"top_p": float(config.get('llm_top_p', 0.9)),
"max_tokens": int(config.get('llm_max_tokens', 4096)),
"stream": False,
}
response = requests.post(
endpoint_url,
headers=headers,
json=payload,
timeout=int(config.get('llm_timeout', 120)),
)
response.raise_for_status()
return self.llm_service._extract_response_text(response.json())
except Exception as e:
print(f"使用模型 {model_code} 调用失败: {e}")
return None
def _export_summary_md(self, meeting_id: int, summary_content: str, task_id: Optional[str] = None) -> Optional[str]:
def _export_summary_md(self, meeting_id: int, summary_content: str, task_id: Optional[str] = None) -> str:
"""将总结内容导出为MD文件保存到音频同目录返回 /uploads/... 相对路径"""
try:
with get_db_connection() as connection:
@ -368,8 +323,7 @@ class AsyncMeetingService:
print(f"总结MD文件已保存: {relative_md_path}")
return relative_md_path
except Exception as e:
print(f"导出总结MD文件失败: {e}")
return None
raise RuntimeError(f"导出总结MD文件失败: {e}") from e
def _get_meeting_transcript(self, meeting_id: int) -> str:
"""从数据库获取会议转录内容"""
@ -495,10 +449,10 @@ class AsyncMeetingService:
rendered = rendered.replace(f"{{{{ {key} }}}}", value or '')
return rendered
def _build_prompt(self, meeting_id: int, transcript_text: str, user_prompt: str, prompt_id: Optional[int] = None) -> str:
def _build_messages(self, meeting_id: int, transcript_text: str, user_prompt: str, prompt_id: Optional[int] = None) -> List[Dict[str, str]]:
"""
构建完整的提示词
使用数据库中配置的MEETING_TASK提示词模板
构建会议总结消息数组
使用数据库中配置的 MEETING_TASK 提示词模板作为任务级 system 指令
Args:
meeting_id: 会议ID
@ -506,8 +460,7 @@ class AsyncMeetingService:
user_prompt: 用户额外提示词
prompt_id: 可选的提示词模版ID如果不指定则使用默认模版
"""
# 从数据库获取会议任务的提示词模板支持指定prompt_id
system_prompt = self.llm_service.get_task_prompt('MEETING_TASK', prompt_id=prompt_id)
task_prompt = self.llm_service.get_task_prompt('MEETING_TASK', prompt_id=prompt_id)
meeting_context = self._get_meeting_prompt_context(meeting_id)
prompt_variables = {
'meeting_id': str(meeting_id),
@ -517,25 +470,52 @@ class AsyncMeetingService:
'meeting_attendees': meeting_context.get('attendees', ''),
'meeting_time_value': meeting_context.get('meeting_time_value')
}
system_prompt = self._apply_prompt_variables(system_prompt, prompt_variables)
rendered_task_prompt = self._apply_prompt_variables(task_prompt, prompt_variables)
rendered_user_prompt = self._apply_prompt_variables(user_prompt, prompt_variables) if user_prompt else ''
prompt = f"{system_prompt}\n\n"
meeting_info_lines = [
f"会议ID{prompt_variables['meeting_id']}",
f"会议标题:{prompt_variables['meeting_title'] or '未提供'}",
f"会议时间:{prompt_variables['meeting_time'] or '未提供'}",
f"会议创建人:{prompt_variables['meeting_creator'] or '未提供'}",
f"参会人员:{prompt_variables['meeting_attendees'] or '未提供'}",
]
meeting_info_message = "\n".join(meeting_info_lines)
user_requirement_message = rendered_user_prompt or "无额外要求"
if rendered_user_prompt:
prompt += f"用户额外要求:{rendered_user_prompt}\n\n"
messages: List[Dict[str, str]] = []
if rendered_task_prompt:
messages.append({"role": "system", "content": rendered_task_prompt})
messages.append({
"role": "user",
"content": (
"以下是本次会议的上下文信息,请结合这些信息理解会议背景。\n\n"
f"{meeting_info_message}\n\n"
"以下是用户额外要求,如与事实冲突请以转录原文为准:\n"
f"{user_requirement_message}"
)
})
messages.append({
"role": "user",
"content": (
"以下是会议转录原文,请严格依据原文生成会议总结。\n"
"如果信息不足,请明确写出“原文未明确”或“需人工确认”。\n\n"
"<meeting_transcript>\n"
f"{transcript_text}\n"
"</meeting_transcript>"
)
})
prompt += f"会议转录内容:\n{transcript_text}\n\n请根据以上内容生成会议总结:"
return messages
return prompt
def _save_summary_to_db(self, meeting_id: int, summary_content: str, user_prompt: str, prompt_id: Optional[int] = None) -> Optional[int]:
def _save_summary_to_db(self, meeting_id: int, summary_content: str, user_prompt: str, prompt_id: Optional[int] = None) -> int:
"""保存总结到数据库 - 更新meetings表的summary、user_prompt、prompt_id和updated_at字段"""
try:
with get_db_connection() as connection:
cursor = connection.cursor()
cursor.execute("SELECT 1 FROM meetings WHERE meeting_id = %s LIMIT 1", (meeting_id,))
if not cursor.fetchone():
raise RuntimeError(f"会议不存在,无法保存总结: meeting_id={meeting_id}")
# 更新meetings表的summary、user_prompt、prompt_id和updated_at字段
update_query = """
UPDATE meetings
SET summary = %s, user_prompt = %s, prompt_id = %s, updated_at = NOW()
@ -547,10 +527,6 @@ class AsyncMeetingService:
print(f"成功保存会议总结到meetings表meeting_id: {meeting_id}, prompt_id: {prompt_id}")
return meeting_id
except Exception as e:
print(f"保存总结到数据库错误: {e}")
return None
# --- 状态查询和数据库操作方法 ---
def get_task_status(self, task_id: str) -> Dict[str, Any]:
@ -569,11 +545,12 @@ class AsyncMeetingService:
'status': task_data.get('status', 'unknown'),
'progress': int(task_data.get('progress', 0)),
'meeting_id': int(task_data.get('meeting_id', 0)),
'model_code': self._normalize_optional_text(task_data.get('model_code')),
'created_at': task_data.get('created_at'),
'updated_at': task_data.get('updated_at'),
'message': task_data.get('message'),
'result': task_data.get('result'),
'error_message': task_data.get('error_message')
'result': self._normalize_optional_text(task_data.get('result')),
'error_message': self._normalize_optional_text(task_data.get('error_message'))
}
except Exception as e:
print(f"Error getting task status: {e}")
@ -611,7 +588,12 @@ class AsyncMeetingService:
try:
with get_db_connection() as connection:
cursor = connection.cursor(dictionary=True)
query = "SELECT task_id, status, progress, user_prompt, created_at, completed_at, error_message FROM llm_tasks WHERE meeting_id = %s ORDER BY created_at DESC"
query = """
SELECT task_id, status, progress, user_prompt, model_code, result, created_at, completed_at, error_message
FROM llm_tasks
WHERE meeting_id = %s
ORDER BY created_at DESC
"""
cursor.execute(query, (meeting_id,))
tasks = cursor.fetchall()
for task in tasks:
@ -638,7 +620,7 @@ class AsyncMeetingService:
# 查询最新的LLM任务
query = """
SELECT task_id, status, progress, created_at, completed_at, error_message
SELECT task_id, status, progress, model_code, result, created_at, completed_at, error_message
FROM llm_tasks
WHERE meeting_id = %s
ORDER BY created_at DESC
@ -664,8 +646,10 @@ class AsyncMeetingService:
'status': task_record['status'],
'progress': task_record['progress'] or 0,
'meeting_id': meeting_id,
'model_code': task_record.get('model_code'),
'created_at': task_record['created_at'].isoformat() if task_record['created_at'] else None,
'completed_at': task_record['completed_at'].isoformat() if task_record['completed_at'] else None,
'result': task_record.get('result'),
'error_message': task_record['error_message']
}
@ -676,19 +660,31 @@ class AsyncMeetingService:
def _update_task_status_in_redis(self, task_id: str, status: str, progress: int, message: str = None, result: str = None, error_message: str = None):
"""更新Redis中的任务状态"""
try:
redis_key = f"llm_task:{task_id}"
update_data = {
'status': status,
'progress': str(progress),
'updated_at': datetime.now().isoformat()
}
if message: update_data['message'] = message
if result: update_data['result'] = result
if error_message: update_data['error_message'] = error_message
self.redis_client.hset(f"llm_task:{task_id}", mapping=update_data)
if result is not None: update_data['result'] = result
if error_message is not None: update_data['error_message'] = error_message
self.redis_client.hset(redis_key, mapping=update_data)
if status == 'failed':
self.redis_client.hdel(redis_key, 'result')
elif status == 'completed':
self.redis_client.hdel(redis_key, 'error_message')
except Exception as e:
print(f"Error updating task status in Redis: {e}")
def _save_task_to_db(self, task_id: str, meeting_id: int, user_prompt: str, prompt_id: Optional[int] = None):
def _save_task_to_db(
self,
task_id: str,
meeting_id: int,
user_prompt: str,
prompt_id: Optional[int] = None,
model_code: Optional[str] = None
):
"""保存任务到数据库
Args:
@ -696,12 +692,16 @@ class AsyncMeetingService:
meeting_id: 会议ID
user_prompt: 用户额外提示词
prompt_id: 可选的提示词模版ID如果为None则使用默认模版
model_code: 可选的模型编码用于恢复/重试时复用原始模型
"""
try:
with get_db_connection() as connection:
cursor = connection.cursor()
insert_query = "INSERT INTO llm_tasks (task_id, meeting_id, user_prompt, prompt_id, status, progress, created_at) VALUES (%s, %s, %s, %s, 'pending', 0, NOW())"
cursor.execute(insert_query, (task_id, meeting_id, user_prompt, prompt_id))
insert_query = """
INSERT INTO llm_tasks (task_id, meeting_id, user_prompt, prompt_id, model_code, status, progress, created_at)
VALUES (%s, %s, %s, %s, %s, 'pending', 0, NOW())
"""
cursor.execute(insert_query, (task_id, meeting_id, user_prompt, prompt_id, model_code))
connection.commit()
print(f"[Meeting Service] Task saved successfully to database")
except Exception as e:
@ -710,20 +710,42 @@ class AsyncMeetingService:
def _update_task_in_db(self, task_id: str, status: str, progress: int, result: str = None, error_message: str = None):
"""更新数据库中的任务状态"""
try:
with get_db_connection() as connection:
cursor = connection.cursor()
if status == 'completed':
query = "UPDATE llm_tasks SET status = %s, progress = %s, result = %s, error_message = NULL, completed_at = NOW() WHERE task_id = %s"
query = """
UPDATE llm_tasks
SET status = %s, progress = %s, result = %s, error_message = NULL, completed_at = NOW()
WHERE task_id = %s
"""
params = (status, progress, result, task_id)
else:
query = "UPDATE llm_tasks SET status = %s, progress = %s, error_message = %s WHERE task_id = %s"
query = """
UPDATE llm_tasks
SET status = %s, progress = %s, result = NULL, error_message = %s, completed_at = NULL
WHERE task_id = %s
"""
params = (status, progress, error_message, task_id)
cursor.execute(query, params)
if cursor.rowcount == 0:
raise RuntimeError(f"更新LLM任务状态失败任务不存在: task_id={task_id}")
connection.commit()
except Exception as e:
print(f"Error updating task in database: {e}")
def _mark_task_failed(self, task_id: str, error_message: str) -> None:
"""尽力持久化失败状态,避免原始异常被状态更新失败覆盖。"""
try:
self._update_task_in_db(task_id, 'failed', 0, error_message=error_message)
except Exception as update_error:
print(f"Error updating failed task in database: {update_error}")
self._update_task_status_in_redis(
task_id,
'failed',
0,
message="任务执行失败",
error_message=error_message,
)
def _get_task_from_db(self, task_id: str) -> Optional[Dict[str, str]]:
"""从数据库获取任务信息"""
@ -734,13 +756,21 @@ class AsyncMeetingService:
cursor.execute(query, (task_id,))
task = cursor.fetchone()
if task:
# 确保所有字段都是字符串以匹配Redis的行为
return {k: v.isoformat() if isinstance(v, datetime) else str(v) for k, v in task.items()}
return {
key: value.isoformat() if isinstance(value, datetime) else (None if value is None else str(value))
for key, value in task.items()
}
return None
except Exception as e:
print(f"Error getting task from database: {e}")
return None
@staticmethod
def _normalize_optional_text(value: Any) -> Optional[str]:
if value in (None, "", "None"):
return None
return str(value)
def _get_existing_summary_task(self, meeting_id: int) -> Optional[str]:
"""
检查会议是否已经有总结任务用于并发控制

View File

@ -3,21 +3,27 @@ import redis
from datetime import datetime, timedelta
from typing import Optional, Dict, Any
from app.core.config import REDIS_CONFIG
from app.services.system_config_service import SystemConfigService
import os
# JWT配置
JWT_SECRET_KEY = os.getenv('JWT_SECRET_KEY', 'your-super-secret-key-change-in-production')
JWT_ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 60 * 24 * 7 # 7天
class JWTService:
def __init__(self):
self.redis_client = redis.Redis(**REDIS_CONFIG)
@staticmethod
def _get_access_token_expire_minutes() -> int:
expire_days = SystemConfigService.get_token_expire_days(default=7)
return max(1, expire_days) * 24 * 60
def create_access_token(self, data: Dict[str, Any]) -> str:
"""创建JWT访问令牌"""
to_encode = data.copy()
expire = datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
expire_minutes = self._get_access_token_expire_minutes()
expire = datetime.utcnow() + timedelta(minutes=expire_minutes)
to_encode.update({"exp": expire, "type": "access"})
encoded_jwt = jwt.encode(to_encode, JWT_SECRET_KEY, algorithm=JWT_ALGORITHM)
@ -27,7 +33,7 @@ class JWTService:
if user_id:
self.redis_client.setex(
f"token:{user_id}:{encoded_jwt}",
ACCESS_TOKEN_EXPIRE_MINUTES * 60, # Redis需要秒
expire_minutes * 60, # Redis需要秒
"active"
)

View File

@ -1,40 +1,124 @@
import json
import os
from typing import Optional, Dict, Generator, Any
from typing import Optional, Dict, Generator, Any, List
import requests
import httpx
import app.core.config as config_module
from app.core.database import get_db_connection
from app.services.system_config_service import SystemConfigService
class LLMServiceError(Exception):
"""LLM 调用失败时抛出的结构化异常。"""
def __init__(self, message: str, *, status_code: Optional[int] = None):
super().__init__(message)
self.message = message
self.status_code = status_code
class LLMService:
"""LLM服务 - 专注于大模型API调用和提示词管理"""
@staticmethod
def _create_requests_session() -> requests.Session:
session = requests.Session()
session.trust_env = os.getenv("IMEETING_USE_SYSTEM_PROXY", "").lower() in {"1", "true", "yes", "on"}
return session
def _use_system_proxy() -> bool:
return os.getenv("IMEETING_USE_SYSTEM_PROXY", "").lower() in {"1", "true", "yes", "on"}
@staticmethod
def build_call_params_from_config(config: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
config = config or {}
endpoint_url = config.get("endpoint_url") or SystemConfigService.get_llm_endpoint_url()
api_key = config.get("api_key")
def _create_httpx_client() -> httpx.Client:
return httpx.Client(
trust_env=LLMService._use_system_proxy()
)
@staticmethod
def _coerce_int(value: Any, default: int, minimum: Optional[int] = None) -> int:
try:
normalized = int(value)
except (TypeError, ValueError):
normalized = default
if minimum is not None:
normalized = max(minimum, normalized)
return normalized
@staticmethod
def _coerce_float(value: Any, default: float) -> float:
try:
return float(value)
except (TypeError, ValueError):
return default
@staticmethod
def _build_timeout(timeout_seconds: int) -> httpx.Timeout:
normalized_timeout = max(1, int(timeout_seconds))
connect_timeout = min(10.0, float(normalized_timeout))
return httpx.Timeout(
connect=connect_timeout,
read=float(normalized_timeout),
write=float(normalized_timeout),
pool=connect_timeout,
)
@staticmethod
def _normalize_api_key(api_key: Optional[Any]) -> Optional[str]:
if api_key is None:
api_key = SystemConfigService.get_llm_api_key(config_module.QWEN_API_KEY)
return None
normalized = str(api_key).strip()
return normalized or None
@staticmethod
def _normalize_model_code(model_code: Optional[Any]) -> Optional[str]:
if model_code is None:
return None
normalized = str(model_code).strip()
return normalized or None
@classmethod
def build_call_params_from_config(cls, config: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
config = config or {}
endpoint_url = str(config.get("endpoint_url") or SystemConfigService.get_llm_endpoint_url() or "").strip()
api_key = cls._normalize_api_key(config.get("api_key"))
if api_key is None:
api_key = cls._normalize_api_key(SystemConfigService.get_llm_api_key(config_module.QWEN_API_KEY))
default_model = SystemConfigService.get_llm_model_name()
default_timeout = SystemConfigService.get_llm_timeout()
default_temperature = SystemConfigService.get_llm_temperature()
default_top_p = SystemConfigService.get_llm_top_p()
default_max_tokens = SystemConfigService.get_llm_max_tokens()
default_system_prompt = SystemConfigService.get_llm_system_prompt(None)
return {
"endpoint_url": endpoint_url,
"api_key": api_key,
"model": config.get("llm_model_name") or config.get("model") or SystemConfigService.get_llm_model_name(),
"timeout": int(config.get("llm_timeout") or config.get("timeout") or SystemConfigService.get_llm_timeout()),
"temperature": float(config.get("llm_temperature") if config.get("llm_temperature") is not None else config.get("temperature", SystemConfigService.get_llm_temperature())),
"top_p": float(config.get("llm_top_p") if config.get("llm_top_p") is not None else config.get("top_p", SystemConfigService.get_llm_top_p())),
"max_tokens": int(config.get("llm_max_tokens") or config.get("max_tokens") or SystemConfigService.get_llm_max_tokens()),
"system_prompt": config.get("llm_system_prompt") or config.get("system_prompt") or SystemConfigService.get_llm_system_prompt(None),
"model": str(
config.get("llm_model_name")
or config.get("model")
or config.get("model_name")
or default_model
).strip(),
"timeout": cls._coerce_int(
config.get("llm_timeout")
or config.get("timeout")
or config.get("time_out")
or default_timeout,
default_timeout,
minimum=1,
),
"temperature": cls._coerce_float(
config.get("llm_temperature") if config.get("llm_temperature") is not None else config.get("temperature"),
default_temperature,
),
"top_p": cls._coerce_float(
config.get("llm_top_p") if config.get("llm_top_p") is not None else config.get("top_p"),
default_top_p,
),
"max_tokens": cls._coerce_int(
config.get("llm_max_tokens") or config.get("max_tokens") or default_max_tokens,
default_max_tokens,
minimum=1,
),
"system_prompt": config.get("llm_system_prompt") or config.get("system_prompt") or default_system_prompt,
}
def _get_llm_call_params(self) -> Dict[str, Any]:
@ -60,16 +144,74 @@ class LLMService:
headers["Authorization"] = f"Bearer {api_key}"
return headers
def _build_payload(self, prompt: str, stream: bool = False, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
def _normalize_messages(
self,
prompt: Optional[str] = None,
messages: Optional[List[Dict[str, Any]]] = None,
) -> List[Dict[str, str]]:
normalized_messages: List[Dict[str, str]] = []
if messages is not None:
for message in messages:
if not isinstance(message, dict):
continue
role = str(message.get("role") or "").strip()
if not role:
continue
content = self._normalize_content(message.get("content"))
if not content:
continue
normalized_messages.append({"role": role, "content": content})
return normalized_messages
if prompt is not None:
prompt_content = self._normalize_content(prompt)
if prompt_content:
normalized_messages.append({"role": "user", "content": prompt_content})
return normalized_messages
@staticmethod
def _merge_system_messages(
messages: List[Dict[str, str]],
base_system_prompt: Optional[str],
) -> List[Dict[str, str]]:
merged_messages: List[Dict[str, str]] = []
merged_system_parts: List[str] = []
if isinstance(base_system_prompt, str) and base_system_prompt.strip():
merged_system_parts.append(base_system_prompt.strip())
index = 0
while index < len(messages) and messages[index].get("role") == "system":
content = str(messages[index].get("content") or "").strip()
if content:
merged_system_parts.append(content)
index += 1
if merged_system_parts:
merged_messages.append({"role": "system", "content": "\n\n".join(merged_system_parts)})
merged_messages.extend(messages[index:])
return merged_messages
def _build_payload(
self,
prompt: Optional[str] = None,
messages: Optional[List[Dict[str, Any]]] = None,
stream: bool = False,
params: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
params = params or self._get_llm_call_params()
messages = []
system_prompt = params.get("system_prompt")
if system_prompt:
messages.append({"role": "system", "content": system_prompt})
messages.append({"role": "user", "content": prompt})
normalized_messages = self._normalize_messages(prompt=prompt, messages=messages)
normalized_messages = self._merge_system_messages(normalized_messages, params.get("system_prompt"))
if not normalized_messages:
raise ValueError("缺少 prompt 或 messages")
payload = {
"model": params["model"],
"messages": messages,
"messages": normalized_messages,
"temperature": params["temperature"],
"top_p": params["top_p"],
"max_tokens": params["max_tokens"],
@ -111,6 +253,62 @@ class LLMService:
return ""
def _validate_call_params(self, params: Dict[str, Any]) -> Optional[str]:
if not params.get("endpoint_url"):
return "缺少 endpoint_url"
if not params.get("model"):
return "缺少 model"
if not params.get("api_key"):
return "缺少API Key"
return None
@staticmethod
def _extract_error_message_from_response(response: httpx.Response) -> str:
try:
payload = response.json()
except ValueError:
payload = None
if isinstance(payload, dict):
error = payload.get("error")
if isinstance(error, dict):
parts = [
str(error.get("message") or "").strip(),
str(error.get("type") or "").strip(),
str(error.get("code") or "").strip(),
]
message = " / ".join(part for part in parts if part)
if message:
return message
if isinstance(error, str) and error.strip():
return error.strip()
message = payload.get("message")
if isinstance(message, str) and message.strip():
return message.strip()
text = (response.text or "").strip()
return text[:500] if text else f"HTTP {response.status_code}"
def get_call_params_by_model_code(self, model_code: Optional[str] = None) -> Dict[str, Any]:
normalized_model_code = self._normalize_model_code(model_code)
if not normalized_model_code:
return self._get_llm_call_params()
runtime_config = SystemConfigService.get_model_runtime_config(normalized_model_code)
if not runtime_config:
raise LLMServiceError(f"指定模型不可用: {normalized_model_code}")
return self.build_call_params_from_config(runtime_config)
def _resolve_call_params(
self,
model_code: Optional[str] = None,
config: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
if config is not None:
return self.build_call_params_from_config(config)
return self.get_call_params_by_model_code(model_code)
def get_task_prompt(self, task_type: str, cursor=None, prompt_id: Optional[int] = None) -> str:
"""
统一的提示词获取方法
@ -121,7 +319,7 @@ class LLMService:
prompt_id: 可选的提示词ID如果指定则使用该提示词否则使用默认提示词
Returns:
str: 提示词内容如果未找到返回默认提示词
str: 提示词内容
"""
# 如果指定了 prompt_id直接获取该提示词
if prompt_id:
@ -157,38 +355,36 @@ class LLMService:
if result:
return result['content']
# 返回默认提示词
return self._get_default_prompt(task_type)
prompt_label = f"ID={prompt_id}" if prompt_id else f"task_type={task_type} 的默认模版"
raise LLMServiceError(f"未找到可用提示词模版:{prompt_label}")
def _get_default_prompt(self, task_name: str) -> str:
"""获取默认提示词"""
system_prompt = SystemConfigService.get_llm_system_prompt("请根据提供的内容进行总结和分析。")
default_prompts = {
'MEETING_TASK': system_prompt,
'KNOWLEDGE_TASK': "请根据提供的信息生成知识库文章。",
}
return default_prompts.get(task_name, "请根据提供的内容进行总结和分析。")
def _call_llm_api_stream(self, prompt: str) -> Generator[str, None, None]:
"""流式调用 OpenAI 兼容大模型API"""
params = self._get_llm_call_params()
if not params["api_key"]:
yield "error: 缺少API Key"
def stream_llm_api(
self,
prompt: Optional[str] = None,
model_code: Optional[str] = None,
config: Optional[Dict[str, Any]] = None,
messages: Optional[List[Dict[str, Any]]] = None,
) -> Generator[str, None, None]:
"""流式调用 OpenAI 兼容大模型API。"""
try:
params = self._resolve_call_params(model_code=model_code, config=config)
validation_error = self._validate_call_params(params)
if validation_error:
yield f"error: {validation_error}"
return
try:
session = self._create_requests_session()
try:
response = session.post(
timeout = self._build_timeout(params["timeout"])
with self._create_httpx_client() as client:
with client.stream(
"POST",
self._build_chat_url(params["endpoint_url"]),
headers=self._build_headers(params["api_key"]),
json=self._build_payload(prompt, stream=True),
timeout=params["timeout"],
stream=True,
)
json=self._build_payload(prompt=prompt, messages=messages, stream=True, params=params),
timeout=timeout,
) as response:
response.raise_for_status()
for line in response.iter_lines(decode_unicode=True):
for line in response.iter_lines():
if not line or not line.startswith("data:"):
continue
@ -204,49 +400,139 @@ class LLMService:
new_content = self._extract_response_text(data)
if new_content:
yield new_content
finally:
session.close()
except LLMServiceError as e:
error_msg = e.message or str(e)
print(f"流式调用大模型API错误: {error_msg}")
yield f"error: {error_msg}"
except httpx.HTTPStatusError as e:
detail = self._extract_error_message_from_response(e.response)
error_msg = f"流式调用大模型API错误: HTTP {e.response.status_code} - {detail}"
print(error_msg)
yield f"error: {error_msg}"
except httpx.TimeoutException:
error_msg = f"流式调用大模型API超时: timeout={params['timeout']}s"
print(error_msg)
yield f"error: {error_msg}"
except httpx.RequestError as e:
error_msg = f"流式调用大模型API网络错误: {e}"
print(error_msg)
yield f"error: {error_msg}"
except Exception as e:
error_msg = f"流式调用大模型API错误: {e}"
print(error_msg)
yield f"error: {error_msg}"
def _call_llm_api(self, prompt: str) -> Optional[str]:
"""调用 OpenAI 兼容大模型API非流式"""
params = self._get_llm_call_params()
return self.call_llm_api_with_config(params, prompt)
def _call_llm_api_stream(self, prompt: str) -> Generator[str, None, None]:
"""兼容旧调用入口。"""
return self.stream_llm_api(prompt)
def call_llm_api_with_config(self, params: Dict[str, Any], prompt: str) -> Optional[str]:
"""使用指定配置调用 OpenAI 兼容大模型API非流式"""
if not params["api_key"]:
print("调用大模型API错误: 缺少API Key")
def call_llm_api(
self,
prompt: Optional[str] = None,
model_code: Optional[str] = None,
config: Optional[Dict[str, Any]] = None,
messages: Optional[List[Dict[str, Any]]] = None,
) -> Optional[str]:
"""调用 OpenAI 兼容大模型API非流式"""
try:
return self.call_llm_api_or_raise(
prompt=prompt,
model_code=model_code,
config=config,
messages=messages,
)
except LLMServiceError as e:
print(f"调用大模型API错误: {e}")
return None
def call_llm_api_or_raise(
self,
prompt: Optional[str] = None,
model_code: Optional[str] = None,
config: Optional[Dict[str, Any]] = None,
messages: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""调用 OpenAI 兼容大模型API非流式失败时抛出结构化异常。"""
params = self._resolve_call_params(model_code=model_code, config=config)
return self.call_llm_api_with_config_or_raise(params, prompt=prompt, messages=messages)
def call_llm_api_messages(
self,
messages: List[Dict[str, Any]],
model_code: Optional[str] = None,
config: Optional[Dict[str, Any]] = None,
) -> Optional[str]:
"""使用多消息结构调用 OpenAI 兼容大模型API非流式"""
return self.call_llm_api(prompt=None, model_code=model_code, config=config, messages=messages)
def call_llm_api_messages_or_raise(
self,
messages: List[Dict[str, Any]],
model_code: Optional[str] = None,
config: Optional[Dict[str, Any]] = None,
) -> str:
"""使用多消息结构调用 OpenAI 兼容大模型API非流式失败时抛出结构化异常。"""
return self.call_llm_api_or_raise(prompt=None, model_code=model_code, config=config, messages=messages)
def call_llm_api_with_config(
self,
params: Dict[str, Any],
prompt: Optional[str] = None,
messages: Optional[List[Dict[str, Any]]] = None,
) -> Optional[str]:
"""使用指定配置调用 OpenAI 兼容大模型API非流式"""
try:
session = self._create_requests_session()
return self.call_llm_api_with_config_or_raise(params, prompt=prompt, messages=messages)
except LLMServiceError as e:
print(f"调用大模型API错误: {e}")
return None
def call_llm_api_with_config_or_raise(
self,
params: Dict[str, Any],
prompt: Optional[str] = None,
messages: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""使用指定配置调用 OpenAI 兼容大模型API非流式失败时抛出结构化异常。"""
validation_error = self._validate_call_params(params)
if validation_error:
raise LLMServiceError(validation_error)
timeout = self._build_timeout(params["timeout"])
try:
response = session.post(
with self._create_httpx_client() as client:
response = client.post(
self._build_chat_url(params["endpoint_url"]),
headers=self._build_headers(params["api_key"]),
json=self._build_payload(prompt, params=params),
timeout=params["timeout"],
json=self._build_payload(prompt=prompt, messages=messages, params=params),
timeout=timeout,
)
response.raise_for_status()
content = self._extract_response_text(response.json())
finally:
session.close()
if content:
return content
print("API调用失败: 返回内容为空")
return None
raise LLMServiceError("API调用失败: 返回内容为空")
except httpx.HTTPStatusError as e:
detail = self._extract_error_message_from_response(e.response)
raise LLMServiceError(
f"HTTP {e.response.status_code} - {detail}",
status_code=e.response.status_code,
) from e
except httpx.TimeoutException:
raise LLMServiceError(f"调用超时: timeout={params['timeout']}s")
except httpx.RequestError as e:
raise LLMServiceError(f"网络错误: {e}") from e
except Exception as e:
print(f"调用大模型API错误: {e}")
return None
raise LLMServiceError(str(e)) from e
def _call_llm_api(self, prompt: str) -> Optional[str]:
"""兼容旧调用入口。"""
return self.call_llm_api(prompt)
def test_model(self, config: Dict[str, Any], prompt: Optional[str] = None) -> Dict[str, Any]:
params = self.build_call_params_from_config(config)
test_prompt = prompt or "请用一句中文回复LLM测试成功。"
content = self.call_llm_api_with_config(params, test_prompt)
content = self.call_llm_api_with_config_or_raise(params, test_prompt)
if not content:
raise Exception("模型无有效返回内容")

View File

@ -4,7 +4,6 @@ from threading import RLock
from typing import Optional, Dict, Any
from app.core.database import get_db_connection
class SystemConfigService:
"""系统配置服务 - 优先从新配置表读取,兼容 dict_data(system_config) 回退"""
@ -18,6 +17,7 @@ class SystemConfigService:
PAGE_SIZE = 'page_size'
DEFAULT_RESET_PASSWORD = 'default_reset_password'
MAX_AUDIO_SIZE = 'max_audio_size'
TOKEN_EXPIRE_DAYS = 'token_expire_days'
# 品牌配置
APP_NAME = 'app_name'
@ -352,6 +352,11 @@ class SystemConfigService:
print(f"Error getting config attribute {dict_code}.{attr_name}: {e}")
return default_value
@classmethod
def get_model_runtime_config(cls, model_code: str) -> Optional[Dict[str, Any]]:
"""获取模型运行时配置,优先从新模型配置表读取。"""
return cls._get_model_config_json(model_code)
@classmethod
def set_config(cls, dict_code: str, value: Any, label_cn: str = None) -> bool:
"""
@ -590,6 +595,48 @@ class SystemConfigService:
success = False
return success
@classmethod
def ensure_builtin_parameters(cls) -> None:
"""确保内建系统参数存在,避免后台参数页缺少关键配置项。"""
builtin_parameters = [
{
"param_key": cls.TOKEN_EXPIRE_DAYS,
"param_name": "Token过期时间",
"param_value": "7",
"value_type": "number",
"category": "system",
"description": "控制登录 token 的过期时间,单位:天。",
"is_active": 1,
},
]
try:
with get_db_connection() as conn:
cursor = conn.cursor()
for item in builtin_parameters:
cursor.execute(
"""
INSERT INTO sys_system_parameters
(param_key, param_name, param_value, value_type, category, description, is_active)
VALUES (%s, %s, %s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE
param_name = param_name
""",
(
item["param_key"],
item["param_name"],
item["param_value"],
item["value_type"],
item["category"],
item["description"],
item["is_active"],
),
)
conn.commit()
cursor.close()
except Exception as e:
print(f"Error ensuring builtin parameters: {e}")
# 便捷方法:获取特定配置
@classmethod
def get_asr_vocabulary_id(cls) -> Optional[str]:
@ -669,6 +716,16 @@ class SystemConfigService:
except (ValueError, TypeError):
return default
@classmethod
def get_token_expire_days(cls, default: int = 7) -> int:
"""获取访问 token 过期时间(天)。"""
value = cls.get_config(cls.TOKEN_EXPIRE_DAYS, str(default))
try:
normalized = int(value)
except (ValueError, TypeError):
return default
return normalized if normalized > 0 else default
@classmethod
def get_public_configs(cls) -> Dict[str, Any]:
"""获取提供给前端初始化使用的公开参数。"""
@ -772,7 +829,7 @@ class SystemConfigService:
return default
@classmethod
def get_llm_system_prompt(cls, default: str = "请根据提供的内容进行总结和分析。") -> str:
def get_llm_system_prompt(cls, default: Optional[str] = None) -> Optional[str]:
"""获取LLM系统提示词"""
value = cls.get_config_attribute('llm_model', 'system_prompt', default)
return value if isinstance(value, str) and value.strip() else default

View File

@ -8,6 +8,7 @@ redis
# Services & External APIs
requests
httpx
dashscope
PyJWT
qiniu

View File

@ -176,10 +176,11 @@
| `task_id` | VARCHAR(100) | PRIMARY KEY | 业务任务唯一ID (UUID) |
| `meeting_id` | INT | NOT NULL, FK | 关联的会议ID |
| `prompt_id` | INT | NOT NULL, FK | 关联的提示词模版ID |
| `model_code` | VARCHAR(100) | NULL | 本次任务使用的模型编码,用于服务恢复和任务重试时复用原模型 |
| `user_prompt` | TEXT | NULL | 用户输入的额外提示 |
| `status` | ENUM(...) | DEFAULT 'pending' | 任务状态: 'pending', 'processing', 'completed', 'failed' |
| `progress` | INT | DEFAULT 0 | 任务进度百分比 (0-100) |
| `result` | TEXT | NULL | 成功时存储生成的纪要内容 |
| `result` | TEXT | NULL | 成功时存储导出的 Markdown 文件路径(如 `/uploads/...` |
| `created_at` | TIMESTAMP | DEFAULT CURRENT_TIMESTAMP | 任务创建时间 |
| `completed_at` | TIMESTAMP | NULL | 任务完成时间 |
| `error_message` | TEXT | NULL | 错误信息记录 |

View File

@ -4,6 +4,15 @@
会议总结提示词模板支持使用变量占位符。只有在模板内容中显式写入变量时,系统才会替换对应值;未使用的变量不会自动追加到提示词中。
当前会议总结链路的推荐组装方式如下:
- `base_system_prompt`:放全局稳定规则,例如准确性要求、格式遵循、禁止编造;不要放会议业务指令
- 会议模板(`MEETING_TASK`):放任务级总结模板,会作为会议总结专用的 `system` 指令
- `user_prompt`:放本次用户的临时补充要求,例如侧重点、篇幅、风格
- 会议转录:单独作为用户消息输入,作为总结事实来源
这样可以把“规则”和“材料”分层,减少模型将转录内容误当成指令的概率。
## 支持的变量
### 1. 会议 ID

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -48,13 +48,13 @@
### 4.2. AI总结 (Async LLM Summary)
- **触发**: 用户在会议详情页点击“生成纪要” (`POST /meetings/{meeting_id}/generate-summary-async`)。
- **实现**: `AsyncLLMService` 利用 FastAPI 的 `BackgroundTasks` 实现轻量级异步处理
1. **创建任务**: 生成一个任务ID (`task_id`)并将任务元数据会议ID、用户提示词等存入 `llm_tasks` 表和Redis缓存立即返回任务ID给前端。
2. **后台执行**: `_process_task` 函数在后台运行,它按顺序执行:获取转录稿、构建Prompt、调用LLM API、保存结果
- **实现**: `AsyncMeetingService` 利用后台任务执行器处理会议纪要异步任务
1. **创建任务**: 生成一个任务ID (`task_id`)并将任务元数据会议ID、用户提示词、模型编码等)存入 `llm_tasks` 表和 Redis 缓存立即返回任务ID给前端。
2. **后台执行**: `_process_task` 函数在后台运行,它按顺序执行:获取转录稿、构建消息、调用 LLM API、保存总结并导出 Markdown 文件
3. **状态与进度更新**: 在执行的每个关键步骤都会更新Redis中的任务状态和进度百分比 (`processing`, `10%`, `30%` ...)。
4. **状态查询**: 前端通过 `GET /llm-tasks/{task_id}/status` 轮询任务状态和进度。
5. **结果获取**: 任务完成后,状态变为 `completed`结果会保存在 `llm_tasks` 表的 `result` 字段,并可通过状态查询接口获取
- **关键代码**: `backend/app/services/async_llm_service.py`
5. **结果获取**: 任务完成后,状态变为 `completed``llm_tasks.result` 保存导出的 Markdown 文件路径,失败原因则记录在 `error_message`
- **关键代码**: `backend/app/services/async_meeting_service.py`
### 4.3. 声纹采集
## 前端UI要素