codex/dev
mula.liu 2026-04-09 19:43:00 +08:00
parent 3fe28934cc
commit 861d7e3463
11 changed files with 386 additions and 260 deletions

View File

@ -4,9 +4,7 @@ from app.core.config import BASE_DIR, AUDIO_DIR, TEMP_UPLOAD_DIR
from app.core.auth import get_current_user from app.core.auth import get_current_user
from app.core.response import create_api_response from app.core.response import create_api_response
from app.services.async_transcription_service import AsyncTranscriptionService from app.services.async_transcription_service import AsyncTranscriptionService
from app.services.async_meeting_service import async_meeting_service from app.services.audio_upload_task_service import audio_upload_task_service
from app.services.audio_preprocess_service import audio_preprocess_service
from app.services.audio_service import handle_audio_upload
from pydantic import BaseModel from pydantic import BaseModel
from typing import Optional, List from typing import Optional, List
from datetime import datetime, timedelta from datetime import datetime, timedelta
@ -456,83 +454,25 @@ async def complete_upload(
} }
) )
# 6. 对合并后的音频执行统一预处理 # 6. 提交后台任务,异步执行预处理和转录启动
full_path = BASE_DIR / file_path.lstrip('/') full_path = BASE_DIR / file_path.lstrip('/')
try: transcription_task_id = audio_upload_task_service.enqueue_upload_processing(
preprocess_result = audio_preprocess_service.preprocess(full_path)
processed_full_path = preprocess_result.file_path
file_size = preprocess_result.file_size
file_name = preprocess_result.file_name
audio_duration = preprocess_result.metadata.duration_seconds
file_path = f"/{processed_full_path.relative_to(BASE_DIR)}"
print(
f"流式上传音频预处理完成: source={full_path.name}, "
f"target={processed_full_path.name}, duration={audio_duration}s, "
f"applied={preprocess_result.applied}"
)
except Exception as e:
if full_path.exists():
try:
os.remove(full_path)
except OSError:
pass
return create_api_response(
code="500",
message=f"音频预处理失败: {str(e)}"
)
# 7. 调用 audio_service 处理文件(数据库更新、启动转录和总结)
result = handle_audio_upload(
file_path=file_path,
file_name=file_name,
file_size=file_size,
meeting_id=request.meeting_id, meeting_id=request.meeting_id,
original_file_path=file_path,
current_user=current_user, current_user=current_user,
auto_summarize=request.auto_summarize, auto_summarize=request.auto_summarize,
background_tasks=background_tasks, prompt_id=request.prompt_id,
prompt_id=request.prompt_id, # 传递提示词模版ID
duration=audio_duration # 传递时长参数
) )
# 如果处理失败,返回错误
if not result["success"]:
cleanup_paths = [processed_full_path]
if processed_full_path != full_path:
cleanup_paths.append(full_path)
for cleanup_path in cleanup_paths:
if cleanup_path.exists():
try:
os.remove(cleanup_path)
except OSError:
pass
return result["response"]
if preprocess_result.applied and processed_full_path != full_path and full_path.exists():
try:
os.remove(full_path)
except OSError:
pass
# 8. 返回成功响应
transcription_task_id = result["transcription_task_id"]
message_suffix = ""
if transcription_task_id:
if request.auto_summarize:
message_suffix = ",正在进行转录和总结"
else:
message_suffix = ",正在进行转录"
return create_api_response( return create_api_response(
code="200", code="200",
message="音频上传完成" + message_suffix, message="音频上传完成,后台正在处理音频" + ("并准备总结" if request.auto_summarize else ""),
data={ data={
"meeting_id": request.meeting_id, "meeting_id": request.meeting_id,
"file_path": file_path, "file_path": file_path,
"file_size": file_size,
"duration": audio_duration,
"task_id": transcription_task_id, "task_id": transcription_task_id,
"task_status": "pending" if transcription_task_id else None, "task_status": "processing",
"background_processing": True,
"auto_summarize": request.auto_summarize "auto_summarize": request.auto_summarize
} }
) )

View File

@ -25,12 +25,12 @@ def get_voiceprint_template(current_user: dict = Depends(get_current_user)):
""" """
try: try:
template_data = VoiceprintTemplate( template_data = VoiceprintTemplate(
template_text=SystemConfigService.get_voiceprint_template(), content=SystemConfigService.get_voiceprint_template(),
duration_seconds=SystemConfigService.get_voiceprint_duration(), duration_seconds=SystemConfigService.get_voiceprint_duration(),
sample_rate=SystemConfigService.get_voiceprint_sample_rate(), sample_rate=SystemConfigService.get_voiceprint_sample_rate(),
channels=SystemConfigService.get_voiceprint_channels() channels=SystemConfigService.get_voiceprint_channels()
) )
return create_api_response(code="200", message="获取朗读模板成功", data=template_data.dict()) return create_api_response(code="200", message="获取朗读模板成功", data=template_data.model_dump())
except Exception as e: except Exception as e:
return create_api_response(code="500", message=f"获取朗读模板失败: {str(e)}") return create_api_response(code="500", message=f"获取朗读模板失败: {str(e)}")

View File

@ -261,6 +261,8 @@ class VoiceprintStatus(BaseModel):
class VoiceprintTemplate(BaseModel): class VoiceprintTemplate(BaseModel):
content: str content: str
duration_seconds: int duration_seconds: int
sample_rate: int
channels: int
# 菜单权限相关模型 # 菜单权限相关模型
class MenuInfo(BaseModel): class MenuInfo(BaseModel):

View File

@ -153,7 +153,7 @@ class AsyncTranscriptionService:
"used_params": call_params, "used_params": call_params,
} }
def start_transcription(self, meeting_id: int, audio_file_path: str) -> str: def start_transcription(self, meeting_id: int, audio_file_path: str, business_task_id: Optional[str] = None) -> str:
""" """
启动异步转录任务 启动异步转录任务
@ -175,7 +175,13 @@ class AsyncTranscriptionService:
deleted_segments = cursor.rowcount deleted_segments = cursor.rowcount
print(f"Deleted {deleted_segments} old transcript segments") print(f"Deleted {deleted_segments} old transcript segments")
# 删除旧的转录任务记录 # 删除旧的转录任务记录;如果已创建本地占位任务,则保留当前任务记录
if business_task_id:
cursor.execute(
"DELETE FROM transcript_tasks WHERE meeting_id = %s AND task_id <> %s",
(meeting_id, business_task_id),
)
else:
cursor.execute("DELETE FROM transcript_tasks WHERE meeting_id = %s", (meeting_id,)) cursor.execute("DELETE FROM transcript_tasks WHERE meeting_id = %s", (meeting_id,))
deleted_tasks = cursor.rowcount deleted_tasks = cursor.rowcount
print(f"Deleted {deleted_tasks} old transcript tasks") print(f"Deleted {deleted_tasks} old transcript tasks")
@ -215,12 +221,12 @@ class AsyncTranscriptionService:
raise Exception(f"Transcription API error: {task_response.message}") raise Exception(f"Transcription API error: {task_response.message}")
paraformer_task_id = task_response.output.task_id paraformer_task_id = task_response.output.task_id
business_task_id = str(uuid.uuid4()) final_business_task_id = business_task_id or str(uuid.uuid4())
# 4. 在Redis中存储任务映射 # 4. 在Redis中存储任务映射
current_time = datetime.now().isoformat() current_time = datetime.now().isoformat()
task_data = { task_data = {
'business_task_id': business_task_id, 'business_task_id': final_business_task_id,
'paraformer_task_id': paraformer_task_id, 'paraformer_task_id': paraformer_task_id,
'meeting_id': str(meeting_id), 'meeting_id': str(meeting_id),
'file_url': file_url, 'file_url': file_url,
@ -231,19 +237,75 @@ class AsyncTranscriptionService:
} }
# 存储到Redis过期时间24小时 # 存储到Redis过期时间24小时
self.redis_client.hset(f"task:{business_task_id}", mapping=task_data) self.redis_client.hset(f"task:{final_business_task_id}", mapping=task_data)
self.redis_client.expire(f"task:{business_task_id}", 86400) self.redis_client.expire(f"task:{final_business_task_id}", 86400)
# 5. 在数据库中创建任务记录 # 5. 在数据库中创建任务记录
self._save_task_to_db(business_task_id, paraformer_task_id, meeting_id, audio_file_path) self._save_task_to_db(final_business_task_id, paraformer_task_id, meeting_id, audio_file_path)
print(f"Transcription task created: {business_task_id}") print(f"Transcription task created: {final_business_task_id}")
return business_task_id return final_business_task_id
except Exception as e: except Exception as e:
print(f"Error starting transcription: {e}") print(f"Error starting transcription: {e}")
raise e raise e
def create_local_processing_task(
self,
meeting_id: int,
status: str = "processing",
progress: int = 0,
error_message: Optional[str] = None,
) -> str:
business_task_id = str(uuid.uuid4())
current_time = datetime.now().isoformat()
task_data = {
"business_task_id": business_task_id,
"paraformer_task_id": "",
"meeting_id": str(meeting_id),
"status": status,
"progress": str(progress),
"created_at": current_time,
"updated_at": current_time,
"error_message": error_message or "",
}
self.redis_client.hset(f"task:{business_task_id}", mapping=task_data)
self.redis_client.expire(f"task:{business_task_id}", 86400)
with get_db_connection() as connection:
cursor = connection.cursor()
cursor.execute(
"""
INSERT INTO transcript_tasks (task_id, paraformer_task_id, meeting_id, status, progress, created_at, error_message)
VALUES (%s, NULL, %s, %s, %s, NOW(), %s)
""",
(business_task_id, meeting_id, status, progress, error_message),
)
connection.commit()
cursor.close()
return business_task_id
def update_local_processing_task(
self,
business_task_id: str,
status: str,
progress: int,
error_message: Optional[str] = None,
) -> None:
updated_at = datetime.now().isoformat()
self.redis_client.hset(
f"task:{business_task_id}",
mapping={
"status": status,
"progress": str(progress),
"updated_at": updated_at,
"error_message": error_message or "",
},
)
self.redis_client.expire(f"task:{business_task_id}", 86400)
self._update_task_status_in_db(business_task_id, status, progress, error_message)
def get_task_status(self, business_task_id: str) -> Dict[str, Any]: def get_task_status(self, business_task_id: str) -> Dict[str, Any]:
""" """
获取任务状态 获取任务状态
@ -270,6 +332,13 @@ class AsyncTranscriptionService:
progress = int(task_data.get('progress') or 0) progress = int(task_data.get('progress') or 0)
error_message = task_data.get('error_message') or None error_message = task_data.get('error_message') or None
updated_at = task_data.get('updated_at') or updated_at updated_at = task_data.get('updated_at') or updated_at
else:
paraformer_task_id = task_data.get('paraformer_task_id')
if not paraformer_task_id:
current_status = task_data.get('status') or 'processing'
progress = int(task_data.get('progress') or 0)
error_message = task_data.get('error_message') or None
updated_at = task_data.get('updated_at') or updated_at
else: else:
cached_status = self.redis_client.hgetall(status_cache_key) cached_status = self.redis_client.hgetall(status_cache_key)
if cached_status and cached_status.get('status') in {'pending', 'processing'}: if cached_status and cached_status.get('status') in {'pending', 'processing'}:
@ -278,8 +347,6 @@ class AsyncTranscriptionService:
error_message = cached_status.get('error_message') or None error_message = cached_status.get('error_message') or None
updated_at = cached_status.get('updated_at') or updated_at updated_at = cached_status.get('updated_at') or updated_at
else: else:
paraformer_task_id = task_data['paraformer_task_id']
# 2. 查询外部API获取状态 # 2. 查询外部API获取状态
try: try:
audio_config = SystemConfigService.get_active_audio_model_config("asr") audio_config = SystemConfigService.get_active_audio_model_config("asr")
@ -293,7 +360,7 @@ class AsyncTranscriptionService:
paraformer_status = paraformer_response.output.task_status paraformer_status = paraformer_response.output.task_status
current_status = self._map_paraformer_status(paraformer_status) current_status = self._map_paraformer_status(paraformer_status)
progress = self._calculate_progress(paraformer_status) progress = self._calculate_progress(paraformer_status)
error_message = None #执行成功,清除初始状态 error_message = None
except Exception as e: except Exception as e:
current_status = 'failed' current_status = 'failed'
@ -302,11 +369,8 @@ class AsyncTranscriptionService:
# 3. 如果任务完成,处理结果 # 3. 如果任务完成,处理结果
if current_status == 'completed' and paraformer_response.output.get('results'): if current_status == 'completed' and paraformer_response.output.get('results'):
# 防止并发处理:先检查数据库中的状态
db_task_status = self._get_task_status_from_db(business_task_id) db_task_status = self._get_task_status_from_db(business_task_id)
if db_task_status != 'completed': if db_task_status != 'completed':
# 只有当数据库中状态不是completed时才处理
# 先将状态更新为completed作为分布式锁
self._update_task_status_in_db(business_task_id, 'completed', 100, None) self._update_task_status_in_db(business_task_id, 'completed', 100, None)
try: try:
@ -317,7 +381,7 @@ class AsyncTranscriptionService:
) )
except Exception as e: except Exception as e:
current_status = 'failed' current_status = 'failed'
progress = 100 # 进度为100但状态是失败 progress = 100
error_message = f"Error processing transcription result: {e}" error_message = f"Error processing transcription result: {e}"
print(error_message) print(error_message)
else: else:
@ -459,7 +523,19 @@ class AsyncTranscriptionService:
with get_db_connection() as connection: with get_db_connection() as connection:
cursor = connection.cursor() cursor = connection.cursor()
# 插入转录任务记录 cursor.execute("SELECT task_id FROM transcript_tasks WHERE task_id = %s", (business_task_id,))
existing = cursor.fetchone()
if existing:
cursor.execute(
"""
UPDATE transcript_tasks
SET paraformer_task_id = %s, meeting_id = %s, status = 'pending', progress = 0,
completed_at = NULL, error_message = NULL
WHERE task_id = %s
""",
(paraformer_task_id, meeting_id, business_task_id),
)
else:
insert_task_query = """ insert_task_query = """
INSERT INTO transcript_tasks (task_id, paraformer_task_id, meeting_id, status, progress, created_at) INSERT INTO transcript_tasks (task_id, paraformer_task_id, meeting_id, status, progress, created_at)
VALUES (%s, %s, %s, 'pending', 0, NOW()) VALUES (%s, %s, %s, 'pending', 0, NOW())

View File

@ -26,7 +26,8 @@ def handle_audio_upload(
background_tasks: BackgroundTasks = None, background_tasks: BackgroundTasks = None,
prompt_id: int = None, prompt_id: int = None,
model_code: str = None, model_code: str = None,
duration: int = 0 duration: int = 0,
transcription_task_id: str = None,
) -> dict: ) -> dict:
""" """
处理已保存的完整音频文件 处理已保存的完整音频文件
@ -49,6 +50,7 @@ def handle_audio_upload(
prompt_id: 提示词模版ID可选如果不指定则使用默认模版 prompt_id: 提示词模版ID可选如果不指定则使用默认模版
model_code: 总结模型编码可选如果不指定则使用默认模型 model_code: 总结模型编码可选如果不指定则使用默认模型
duration: 音频时长 duration: 音频时长
transcription_task_id: 预先创建的本地任务ID可选用于异步上传场景
Returns: Returns:
dict: { dict: {
@ -138,7 +140,11 @@ def handle_audio_upload(
# 4. 启动转录任务 # 4. 启动转录任务
try: try:
transcription_task_id = transcription_service.start_transcription(meeting_id, file_path) transcription_task_id = transcription_service.start_transcription(
meeting_id,
file_path,
business_task_id=transcription_task_id,
)
print(f"Transcription task {transcription_task_id} started for meeting {meeting_id}") print(f"Transcription task {transcription_task_id} started for meeting {meeting_id}")
# 5. 如果启用自动总结,则提交后台监控任务 # 5. 如果启用自动总结,则提交后台监控任务

View File

@ -0,0 +1,154 @@
"""
音频上传后台处理服务
将上传后的重操作放到后台线程执行避免请求长时间阻塞
1. 音频预处理
2. 更新音频文件记录
3. 启动转录
4. 启动自动总结监控
"""
from __future__ import annotations
import json
import os
from pathlib import Path
from typing import Optional
from app.core.config import BACKGROUND_TASK_CONFIG, BASE_DIR
from app.services.async_transcription_service import AsyncTranscriptionService
from app.services.audio_preprocess_service import audio_preprocess_service
from app.services.audio_service import handle_audio_upload
from app.services.background_task_runner import KeyedBackgroundTaskRunner
upload_task_runner = KeyedBackgroundTaskRunner(
max_workers=max(1, int(BACKGROUND_TASK_CONFIG.get("upload_workers", 2))),
thread_name_prefix="imeeting-audio-upload",
)
class AudioUploadTaskService:
def __init__(self):
self.transcription_service = AsyncTranscriptionService()
def enqueue_upload_processing(
self,
*,
meeting_id: int,
original_file_path: str,
current_user: dict,
auto_summarize: bool,
prompt_id: Optional[int] = None,
model_code: Optional[str] = None,
) -> str:
task_id = self.transcription_service.create_local_processing_task(
meeting_id=meeting_id,
status="processing",
progress=5,
)
upload_task_runner.submit(
f"audio-upload:{task_id}",
self._process_uploaded_audio,
task_id,
meeting_id,
original_file_path,
current_user,
auto_summarize,
prompt_id,
model_code,
)
return task_id
def _process_uploaded_audio(
self,
task_id: str,
meeting_id: int,
original_file_path: str,
current_user: dict,
auto_summarize: bool,
prompt_id: Optional[int],
model_code: Optional[str],
) -> None:
source_absolute_path = BASE_DIR / original_file_path.lstrip("/")
processed_absolute_path: Optional[Path] = None
handoff_to_audio_service = False
try:
self.transcription_service.update_local_processing_task(task_id, "processing", 15, None)
preprocess_result = audio_preprocess_service.preprocess(source_absolute_path)
processed_absolute_path = preprocess_result.file_path
audio_duration = preprocess_result.metadata.duration_seconds
file_path = "/" + str(processed_absolute_path.relative_to(BASE_DIR))
print(
f"[AudioUploadTaskService] 音频预处理完成: source={source_absolute_path.name}, "
f"target={processed_absolute_path.name}, duration={audio_duration}s, "
f"applied={preprocess_result.applied}"
)
self.transcription_service.update_local_processing_task(task_id, "processing", 40, None)
handoff_to_audio_service = True
result = handle_audio_upload(
file_path=file_path,
file_name=preprocess_result.file_name,
file_size=preprocess_result.file_size,
meeting_id=meeting_id,
current_user=current_user,
auto_summarize=auto_summarize,
background_tasks=None,
prompt_id=prompt_id,
model_code=model_code,
duration=audio_duration,
transcription_task_id=task_id,
)
if not result["success"]:
raise RuntimeError(self._extract_response_message(result["response"]))
if preprocess_result.applied and processed_absolute_path != source_absolute_path and source_absolute_path.exists():
try:
os.remove(source_absolute_path)
except OSError:
pass
except Exception as exc:
error_message = str(exc)
print(f"[AudioUploadTaskService] 音频后台处理失败, task_id={task_id}, meeting_id={meeting_id}: {error_message}")
self.transcription_service.update_local_processing_task(task_id, "failed", 0, error_message)
if handoff_to_audio_service:
return
cleanup_targets = []
if processed_absolute_path:
cleanup_targets.append(processed_absolute_path)
if source_absolute_path.exists():
cleanup_targets.append(source_absolute_path)
deduped_targets: list[Path] = []
for target in cleanup_targets:
if target not in deduped_targets:
deduped_targets.append(target)
for target in deduped_targets:
if target.exists():
try:
os.remove(target)
except OSError:
pass
@staticmethod
def _extract_response_message(response) -> str:
body = getattr(response, "body", None)
if not body:
return "音频处理失败"
try:
payload = json.loads(body.decode("utf-8"))
return payload.get("message") or "音频处理失败"
except Exception:
return "音频处理失败"
audio_upload_task_service = AudioUploadTaskService()

View File

@ -7,8 +7,7 @@ import app.core.config as config_module
from app.services.llm_service import LLMService from app.services.llm_service import LLMService
from app.services.async_transcription_service import AsyncTranscriptionService from app.services.async_transcription_service import AsyncTranscriptionService
from app.services.async_meeting_service import async_meeting_service from app.services.async_meeting_service import async_meeting_service
from app.services.audio_service import handle_audio_upload from app.services.audio_upload_task_service import audio_upload_task_service
from app.services.audio_preprocess_service import audio_preprocess_service
from app.services.system_config_service import SystemConfigService from app.services.system_config_service import SystemConfigService
from app.core.auth import get_current_user, get_optional_current_user from app.core.auth import get_current_user, get_optional_current_user
from app.core.response import create_api_response from app.core.response import create_api_response
@ -733,88 +732,28 @@ async def upload_audio(
except Exception as e: except Exception as e:
return create_api_response(code="500", message=f"保存文件失败: {str(e)}") return create_api_response(code="500", message=f"保存文件失败: {str(e)}")
# 3.5 统一做音频预处理 # 4. 提交后台任务,异步执行预处理和转录启动
try: transcription_task_id = audio_upload_task_service.enqueue_upload_processing(
preprocess_result = audio_preprocess_service.preprocess(absolute_path)
processed_absolute_path = preprocess_result.file_path
audio_duration = preprocess_result.metadata.duration_seconds
print(
f"音频预处理完成: source={absolute_path.name}, "
f"target={processed_absolute_path.name}, duration={audio_duration}s, "
f"applied={preprocess_result.applied}"
)
except Exception as e:
if absolute_path.exists():
try:
os.remove(absolute_path)
except OSError:
pass
return create_api_response(code="500", message=f"音频预处理失败: {str(e)}")
processed_relative_path = processed_absolute_path.relative_to(BASE_DIR)
file_path = '/' + str(processed_relative_path)
file_name = preprocess_result.file_name
file_size = preprocess_result.file_size
# 4. 调用 audio_service 处理文件(权限检查、数据库更新、启动转录)
result = handle_audio_upload(
file_path=file_path,
file_name=file_name,
file_size=file_size,
meeting_id=meeting_id, meeting_id=meeting_id,
original_file_path='/' + str(absolute_path.relative_to(BASE_DIR)),
current_user=current_user, current_user=current_user,
auto_summarize=auto_summarize_bool, auto_summarize=auto_summarize_bool,
background_tasks=background_tasks,
prompt_id=prompt_id, prompt_id=prompt_id,
model_code=model_code, model_code=model_code,
duration=audio_duration # 传递时长参数
) )
# 如果不成功,删除已保存的文件并返回错误
if not result["success"]:
cleanup_paths = [processed_absolute_path]
if processed_absolute_path != absolute_path:
cleanup_paths.append(absolute_path)
for cleanup_path in cleanup_paths:
if cleanup_path.exists():
try:
os.remove(cleanup_path)
print(f"Deleted file due to processing error: {cleanup_path}")
except Exception as e:
print(f"Warning: Failed to delete file {cleanup_path}: {e}")
return result["response"]
if preprocess_result.applied and processed_absolute_path != absolute_path and absolute_path.exists():
try:
os.remove(absolute_path)
print(f"Deleted original uploaded audio after preprocessing: {absolute_path}")
except Exception as e:
print(f"Warning: Failed to delete original uploaded audio {absolute_path}: {e}")
# 5. 返回成功响应
transcription_task_id = result["transcription_task_id"]
message_suffix = ""
if transcription_task_id:
if auto_summarize_bool:
message_suffix = ",正在进行转录和总结"
else:
message_suffix = ",正在进行转录"
return create_api_response( return create_api_response(
code="200", code="200",
message="Audio file uploaded successfully" + message="音频上传成功,后台正在处理音频" + ("并准备总结" if auto_summarize_bool else ""),
(" and replaced existing file" if result["replaced_existing"] else "") +
message_suffix,
data={ data={
"file_name": result["file_info"]["file_name"],
"file_path": result["file_info"]["file_path"],
"task_id": transcription_task_id, "task_id": transcription_task_id,
"transcription_started": transcription_task_id is not None, "task_status": "processing",
"transcription_started": False,
"background_processing": True,
"auto_summarize": auto_summarize_bool, "auto_summarize": auto_summarize_bool,
"model_code": model_code, "model_code": model_code,
"replaced_existing": result["replaced_existing"], "file_path": '/' + str(absolute_path.relative_to(BASE_DIR)),
"previous_transcription_cleared": result["replaced_existing"] and result["has_transcription"] "file_name": absolute_path.name,
} }
) )

View File

@ -111,7 +111,7 @@ const VoiceprintCollectionModal = ({ isOpen, onClose, onSuccess, templateConfig
<Card size="small" style={{ background: '#f9fafb', textAlign: 'center', padding: '24px 0', border: '1px dashed #d9d9d9' }}> <Card size="small" style={{ background: '#f9fafb', textAlign: 'center', padding: '24px 0', border: '1px dashed #d9d9d9' }}>
<Paragraph type="secondary">请用自然语速朗读以下文字</Paragraph> <Paragraph type="secondary">请用自然语速朗读以下文字</Paragraph>
<Title level={4} style={{ margin: '12px 0 24px' }}> <Title level={4} style={{ margin: '12px 0 24px' }}>
{templateConfig?.content || "我正在使用 iMeeting 智能会议系统进行声纹采样。"} {templateConfig?.content || templateConfig?.template_text || "我正在使用 iMeeting 智能会议系统进行声纹采样。"}
</Title> </Title>
<div style={{ marginBottom: 20 }}> <div style={{ marginBottom: 20 }}>

View File

@ -1,4 +1,4 @@
import { useEffect, useEffectEvent, useRef, useState } from 'react'; import { useCallback, useEffect, useRef, useState } from 'react';
import { App } from 'antd'; import { App } from 'antd';
import { useNavigate, useParams } from 'react-router-dom'; import { useNavigate, useParams } from 'react-router-dom';
import apiClient from '../utils/apiClient'; import apiClient from '../utils/apiClient';
@ -143,16 +143,16 @@ export default function useMeetingDetailsPage({ user }) {
} }
}; };
const loadAudioUploadConfig = async () => { const loadAudioUploadConfig = useCallback(async () => {
try { try {
const nextMaxAudioSize = await configService.getMaxAudioSize(); const nextMaxAudioSize = await configService.getMaxAudioSize();
setMaxAudioSize(nextMaxAudioSize || 100 * 1024 * 1024); setMaxAudioSize(nextMaxAudioSize || 100 * 1024 * 1024);
} catch { } catch {
setMaxAudioSize(100 * 1024 * 1024); setMaxAudioSize(100 * 1024 * 1024);
} }
}; }, []);
const fetchPromptList = async () => { const fetchPromptList = useCallback(async () => {
try { try {
const res = await apiClient.get(buildApiUrl(API_ENDPOINTS.PROMPTS.ACTIVE('MEETING_TASK'))); const res = await apiClient.get(buildApiUrl(API_ENDPOINTS.PROMPTS.ACTIVE('MEETING_TASK')));
setPromptList(res.data.prompts || []); setPromptList(res.data.prompts || []);
@ -163,9 +163,9 @@ export default function useMeetingDetailsPage({ user }) {
} catch (error) { } catch (error) {
console.debug('加载提示词列表失败:', error); console.debug('加载提示词列表失败:', error);
} }
}; }, []);
const fetchLlmModels = async () => { const fetchLlmModels = useCallback(async () => {
try { try {
const res = await apiClient.get(buildApiUrl(API_ENDPOINTS.MEETINGS.LLM_MODELS)); const res = await apiClient.get(buildApiUrl(API_ENDPOINTS.MEETINGS.LLM_MODELS));
const models = Array.isArray(res.data) ? res.data : (res.data?.models || []); const models = Array.isArray(res.data) ? res.data : (res.data?.models || []);
@ -177,9 +177,9 @@ export default function useMeetingDetailsPage({ user }) {
} catch (error) { } catch (error) {
console.debug('加载模型列表失败:', error); console.debug('加载模型列表失败:', error);
} }
}; }, []);
const fetchSummaryResources = async () => { const fetchSummaryResources = useCallback(async () => {
setSummaryResourcesLoading(true); setSummaryResourcesLoading(true);
try { try {
await Promise.allSettled([ await Promise.allSettled([
@ -189,9 +189,9 @@ export default function useMeetingDetailsPage({ user }) {
} finally { } finally {
setSummaryResourcesLoading(false); setSummaryResourcesLoading(false);
} }
}; }, [fetchLlmModels, fetchPromptList, llmModels.length, promptList.length]);
const fetchTranscript = async () => { const fetchTranscript = useCallback(async () => {
setTranscriptLoading(true); setTranscriptLoading(true);
try { try {
const res = await apiClient.get(buildApiUrl(API_ENDPOINTS.MEETINGS.TRANSCRIPT(meetingId))); const res = await apiClient.get(buildApiUrl(API_ENDPOINTS.MEETINGS.TRANSCRIPT(meetingId)));
@ -208,9 +208,9 @@ export default function useMeetingDetailsPage({ user }) {
} finally { } finally {
setTranscriptLoading(false); setTranscriptLoading(false);
} }
}; }, [meetingId]);
const fetchMeetingDetails = async (options = {}) => { const fetchMeetingDetails = useCallback(async (options = {}) => {
const { showPageLoading = true } = options; const { showPageLoading = true } = options;
try { try {
if (showPageLoading) { if (showPageLoading) {
@ -278,9 +278,9 @@ export default function useMeetingDetailsPage({ user }) {
setLoading(false); setLoading(false);
} }
} }
}; }, [meetingId, message]);
const startStatusPolling = (taskId) => { const startStatusPolling = useCallback((taskId) => {
if (statusCheckIntervalRef.current) { if (statusCheckIntervalRef.current) {
clearInterval(statusCheckIntervalRef.current); clearInterval(statusCheckIntervalRef.current);
} }
@ -309,9 +309,9 @@ export default function useMeetingDetailsPage({ user }) {
}, 3000); }, 3000);
statusCheckIntervalRef.current = interval; statusCheckIntervalRef.current = interval;
}; }, [fetchMeetingDetails, fetchTranscript]);
const startSummaryPolling = (taskId, options = {}) => { const startSummaryPolling = useCallback((taskId, options = {}) => {
const { closeDrawerOnComplete = false } = options; const { closeDrawerOnComplete = false } = options;
if (!taskId) { if (!taskId) {
return; return;
@ -364,9 +364,9 @@ export default function useMeetingDetailsPage({ user }) {
const interval = setInterval(poll, 3000); const interval = setInterval(poll, 3000);
summaryPollIntervalRef.current = interval; summaryPollIntervalRef.current = interval;
poll(); poll();
}; }, [fetchMeetingDetails, message]);
const scheduleSummaryBootstrapPolling = (attempt = 0) => { const scheduleSummaryBootstrapPolling = useCallback((attempt = 0) => {
if (summaryPollIntervalRef.current || activeSummaryTaskIdRef.current) { if (summaryPollIntervalRef.current || activeSummaryTaskIdRef.current) {
return; return;
} }
@ -402,9 +402,10 @@ export default function useMeetingDetailsPage({ user }) {
scheduleSummaryBootstrapPolling(attempt + 1); scheduleSummaryBootstrapPolling(attempt + 1);
}, attempt === 0 ? 1200 : 2000); }, attempt === 0 ? 1200 : 2000);
}; }, [fetchMeetingDetails, meetingId, startSummaryPolling]);
const bootstrapMeetingPage = useEffectEvent(async () => { useEffect(() => {
const bootstrapMeetingPage = async () => {
const meetingData = await fetchMeetingDetails(); const meetingData = await fetchMeetingDetails();
await fetchTranscript(); await fetchTranscript();
await loadAudioUploadConfig(); await loadAudioUploadConfig();
@ -418,9 +419,8 @@ export default function useMeetingDetailsPage({ user }) {
} else if (meetingData?.transcription_status?.status === 'completed' && !meetingData.summary) { } else if (meetingData?.transcription_status?.status === 'completed' && !meetingData.summary) {
scheduleSummaryBootstrapPolling(); scheduleSummaryBootstrapPolling();
} }
}); };
useEffect(() => {
bootstrapMeetingPage(); bootstrapMeetingPage();
return () => { return () => {
@ -434,11 +434,15 @@ export default function useMeetingDetailsPage({ user }) {
clearTimeout(summaryBootstrapTimeoutRef.current); clearTimeout(summaryBootstrapTimeoutRef.current);
} }
}; };
}, [bootstrapMeetingPage, meetingId]); }, [
fetchMeetingDetails,
const openSummaryResources = useEffectEvent(() => { fetchTranscript,
fetchSummaryResources(); loadAudioUploadConfig,
}); meetingId,
scheduleSummaryBootstrapPolling,
startStatusPolling,
startSummaryPolling,
]);
useEffect(() => { useEffect(() => {
if (!showSummaryDrawer) { if (!showSummaryDrawer) {
@ -447,8 +451,8 @@ export default function useMeetingDetailsPage({ user }) {
if (promptList.length > 0 && llmModels.length > 0) { if (promptList.length > 0 && llmModels.length > 0) {
return; return;
} }
openSummaryResources(); fetchSummaryResources();
}, [llmModels.length, openSummaryResources, promptList.length, showSummaryDrawer]); }, [fetchSummaryResources, llmModels.length, promptList.length, showSummaryDrawer]);
useEffect(() => { useEffect(() => {
transcriptRefs.current = []; transcriptRefs.current = [];
@ -494,7 +498,7 @@ export default function useMeetingDetailsPage({ user }) {
setUploadStatusMessage('正在上传音频文件...'); setUploadStatusMessage('正在上传音频文件...');
try { try {
await uploadMeetingAudio({ const response = await uploadMeetingAudio({
meetingId, meetingId,
file, file,
promptId: meeting?.prompt_id, promptId: meeting?.prompt_id,
@ -508,13 +512,18 @@ export default function useMeetingDetailsPage({ user }) {
}); });
setUploadProgress(100); setUploadProgress(100);
setUploadStatusMessage('上传完成,正在启动转录任务...'); setUploadStatusMessage('上传完成,后台正在处理音频...');
message.success('音频上传成功'); message.success(response?.message || '音频上传成功,后台正在处理音频');
setTranscript([]); setTranscript([]);
setSpeakerList([]); setSpeakerList([]);
setEditingSpeakers({}); setEditingSpeakers({});
await fetchMeetingDetails({ showPageLoading: false }); if (response?.data?.task_id) {
await fetchTranscript(); const nextStatus = { task_id: response.data.task_id, status: 'processing', progress: 5 };
setTranscriptionStatus(nextStatus);
setTranscriptionProgress(5);
setMeeting((prev) => (prev ? { ...prev, transcription_status: nextStatus, llm_status: null, summary: null } : prev));
startStatusPolling(response.data.task_id);
}
} catch (error) { } catch (error) {
message.error(error?.response?.data?.message || error?.response?.data?.detail || '上传失败'); message.error(error?.response?.data?.message || error?.response?.data?.detail || '上传失败');
throw error; throw error;

View File

@ -127,7 +127,7 @@ export default function useMeetingFormDrawer({ open, onClose, onSuccess, meeting
setAudioUploadProgress(0); setAudioUploadProgress(0);
setAudioUploadMessage('正在上传音频文件...'); setAudioUploadMessage('正在上传音频文件...');
try { try {
await uploadMeetingAudio({ const uploadResponse = await uploadMeetingAudio({
meetingId: newMeetingId, meetingId: newMeetingId,
file: selectedAudioFile, file: selectedAudioFile,
promptId: values.prompt_id, promptId: values.prompt_id,
@ -139,8 +139,8 @@ export default function useMeetingFormDrawer({ open, onClose, onSuccess, meeting
}, },
}); });
setAudioUploadProgress(100); setAudioUploadProgress(100);
setAudioUploadMessage('上传完成,正在启动转录任务...'); setAudioUploadMessage('上传完成,后台正在处理音频...');
message.success('会议创建成功,音频已开始上传处理'); message.success(uploadResponse?.message || '会议创建成功,音频已进入后台处理');
} catch (uploadError) { } catch (uploadError) {
message.warning(uploadError?.response?.data?.message || uploadError?.response?.data?.detail || '会议已创建,但音频上传失败,请在详情页重试'); message.warning(uploadError?.response?.data?.message || uploadError?.response?.data?.detail || '会议已创建,但音频上传失败,请在详情页重试');
} finally { } finally {

View File

@ -97,7 +97,7 @@ const CreateMeeting = () => {
setAudioUploadProgress(0); setAudioUploadProgress(0);
setAudioUploadMessage('正在上传音频文件...'); setAudioUploadMessage('正在上传音频文件...');
try { try {
await uploadMeetingAudio({ const uploadResponse = await uploadMeetingAudio({
meetingId, meetingId,
file: selectedAudioFile, file: selectedAudioFile,
promptId: values.prompt_id, promptId: values.prompt_id,
@ -109,8 +109,8 @@ const CreateMeeting = () => {
}, },
}); });
setAudioUploadProgress(100); setAudioUploadProgress(100);
setAudioUploadMessage('上传完成,正在启动转录任务...'); setAudioUploadMessage('上传完成,后台正在处理音频...');
message.success('会议创建成功,音频已开始上传处理'); message.success(uploadResponse?.message || '会议创建成功,音频已进入后台处理');
} catch (uploadError) { } catch (uploadError) {
message.warning(uploadError.response?.data?.message || uploadError.response?.data?.detail || '会议已创建,但音频上传失败,请在详情页重试'); message.warning(uploadError.response?.data?.message || uploadError.response?.data?.detail || '会议已创建,但音频上传失败,请在详情页重试');
} finally { } finally {