codex/dev
mula.liu 2026-04-09 19:43:00 +08:00
parent 3fe28934cc
commit 861d7e3463
11 changed files with 386 additions and 260 deletions

View File

@ -4,9 +4,7 @@ from app.core.config import BASE_DIR, AUDIO_DIR, TEMP_UPLOAD_DIR
from app.core.auth import get_current_user
from app.core.response import create_api_response
from app.services.async_transcription_service import AsyncTranscriptionService
from app.services.async_meeting_service import async_meeting_service
from app.services.audio_preprocess_service import audio_preprocess_service
from app.services.audio_service import handle_audio_upload
from app.services.audio_upload_task_service import audio_upload_task_service
from pydantic import BaseModel
from typing import Optional, List
from datetime import datetime, timedelta
@ -456,83 +454,25 @@ async def complete_upload(
}
)
# 6. 对合并后的音频执行统一预处理
# 6. 提交后台任务,异步执行预处理和转录启动
full_path = BASE_DIR / file_path.lstrip('/')
try:
preprocess_result = audio_preprocess_service.preprocess(full_path)
processed_full_path = preprocess_result.file_path
file_size = preprocess_result.file_size
file_name = preprocess_result.file_name
audio_duration = preprocess_result.metadata.duration_seconds
file_path = f"/{processed_full_path.relative_to(BASE_DIR)}"
print(
f"流式上传音频预处理完成: source={full_path.name}, "
f"target={processed_full_path.name}, duration={audio_duration}s, "
f"applied={preprocess_result.applied}"
)
except Exception as e:
if full_path.exists():
try:
os.remove(full_path)
except OSError:
pass
return create_api_response(
code="500",
message=f"音频预处理失败: {str(e)}"
)
# 7. 调用 audio_service 处理文件(数据库更新、启动转录和总结)
result = handle_audio_upload(
file_path=file_path,
file_name=file_name,
file_size=file_size,
transcription_task_id = audio_upload_task_service.enqueue_upload_processing(
meeting_id=request.meeting_id,
original_file_path=file_path,
current_user=current_user,
auto_summarize=request.auto_summarize,
background_tasks=background_tasks,
prompt_id=request.prompt_id, # 传递提示词模版ID
duration=audio_duration # 传递时长参数
prompt_id=request.prompt_id,
)
# 如果处理失败,返回错误
if not result["success"]:
cleanup_paths = [processed_full_path]
if processed_full_path != full_path:
cleanup_paths.append(full_path)
for cleanup_path in cleanup_paths:
if cleanup_path.exists():
try:
os.remove(cleanup_path)
except OSError:
pass
return result["response"]
if preprocess_result.applied and processed_full_path != full_path and full_path.exists():
try:
os.remove(full_path)
except OSError:
pass
# 8. 返回成功响应
transcription_task_id = result["transcription_task_id"]
message_suffix = ""
if transcription_task_id:
if request.auto_summarize:
message_suffix = ",正在进行转录和总结"
else:
message_suffix = ",正在进行转录"
return create_api_response(
code="200",
message="音频上传完成" + message_suffix,
message="音频上传完成,后台正在处理音频" + ("并准备总结" if request.auto_summarize else ""),
data={
"meeting_id": request.meeting_id,
"file_path": file_path,
"file_size": file_size,
"duration": audio_duration,
"task_id": transcription_task_id,
"task_status": "pending" if transcription_task_id else None,
"task_status": "processing",
"background_processing": True,
"auto_summarize": request.auto_summarize
}
)

View File

@ -25,12 +25,12 @@ def get_voiceprint_template(current_user: dict = Depends(get_current_user)):
"""
try:
template_data = VoiceprintTemplate(
template_text=SystemConfigService.get_voiceprint_template(),
content=SystemConfigService.get_voiceprint_template(),
duration_seconds=SystemConfigService.get_voiceprint_duration(),
sample_rate=SystemConfigService.get_voiceprint_sample_rate(),
channels=SystemConfigService.get_voiceprint_channels()
)
return create_api_response(code="200", message="获取朗读模板成功", data=template_data.dict())
return create_api_response(code="200", message="获取朗读模板成功", data=template_data.model_dump())
except Exception as e:
return create_api_response(code="500", message=f"获取朗读模板失败: {str(e)}")

View File

@ -261,6 +261,8 @@ class VoiceprintStatus(BaseModel):
class VoiceprintTemplate(BaseModel):
content: str
duration_seconds: int
sample_rate: int
channels: int
# 菜单权限相关模型
class MenuInfo(BaseModel):

View File

@ -153,7 +153,7 @@ class AsyncTranscriptionService:
"used_params": call_params,
}
def start_transcription(self, meeting_id: int, audio_file_path: str) -> str:
def start_transcription(self, meeting_id: int, audio_file_path: str, business_task_id: Optional[str] = None) -> str:
"""
启动异步转录任务
@ -175,7 +175,13 @@ class AsyncTranscriptionService:
deleted_segments = cursor.rowcount
print(f"Deleted {deleted_segments} old transcript segments")
# 删除旧的转录任务记录
# 删除旧的转录任务记录;如果已创建本地占位任务,则保留当前任务记录
if business_task_id:
cursor.execute(
"DELETE FROM transcript_tasks WHERE meeting_id = %s AND task_id <> %s",
(meeting_id, business_task_id),
)
else:
cursor.execute("DELETE FROM transcript_tasks WHERE meeting_id = %s", (meeting_id,))
deleted_tasks = cursor.rowcount
print(f"Deleted {deleted_tasks} old transcript tasks")
@ -215,12 +221,12 @@ class AsyncTranscriptionService:
raise Exception(f"Transcription API error: {task_response.message}")
paraformer_task_id = task_response.output.task_id
business_task_id = str(uuid.uuid4())
final_business_task_id = business_task_id or str(uuid.uuid4())
# 4. 在Redis中存储任务映射
current_time = datetime.now().isoformat()
task_data = {
'business_task_id': business_task_id,
'business_task_id': final_business_task_id,
'paraformer_task_id': paraformer_task_id,
'meeting_id': str(meeting_id),
'file_url': file_url,
@ -231,19 +237,75 @@ class AsyncTranscriptionService:
}
# 存储到Redis过期时间24小时
self.redis_client.hset(f"task:{business_task_id}", mapping=task_data)
self.redis_client.expire(f"task:{business_task_id}", 86400)
self.redis_client.hset(f"task:{final_business_task_id}", mapping=task_data)
self.redis_client.expire(f"task:{final_business_task_id}", 86400)
# 5. 在数据库中创建任务记录
self._save_task_to_db(business_task_id, paraformer_task_id, meeting_id, audio_file_path)
self._save_task_to_db(final_business_task_id, paraformer_task_id, meeting_id, audio_file_path)
print(f"Transcription task created: {business_task_id}")
return business_task_id
print(f"Transcription task created: {final_business_task_id}")
return final_business_task_id
except Exception as e:
print(f"Error starting transcription: {e}")
raise e
def create_local_processing_task(
self,
meeting_id: int,
status: str = "processing",
progress: int = 0,
error_message: Optional[str] = None,
) -> str:
business_task_id = str(uuid.uuid4())
current_time = datetime.now().isoformat()
task_data = {
"business_task_id": business_task_id,
"paraformer_task_id": "",
"meeting_id": str(meeting_id),
"status": status,
"progress": str(progress),
"created_at": current_time,
"updated_at": current_time,
"error_message": error_message or "",
}
self.redis_client.hset(f"task:{business_task_id}", mapping=task_data)
self.redis_client.expire(f"task:{business_task_id}", 86400)
with get_db_connection() as connection:
cursor = connection.cursor()
cursor.execute(
"""
INSERT INTO transcript_tasks (task_id, paraformer_task_id, meeting_id, status, progress, created_at, error_message)
VALUES (%s, NULL, %s, %s, %s, NOW(), %s)
""",
(business_task_id, meeting_id, status, progress, error_message),
)
connection.commit()
cursor.close()
return business_task_id
def update_local_processing_task(
self,
business_task_id: str,
status: str,
progress: int,
error_message: Optional[str] = None,
) -> None:
updated_at = datetime.now().isoformat()
self.redis_client.hset(
f"task:{business_task_id}",
mapping={
"status": status,
"progress": str(progress),
"updated_at": updated_at,
"error_message": error_message or "",
},
)
self.redis_client.expire(f"task:{business_task_id}", 86400)
self._update_task_status_in_db(business_task_id, status, progress, error_message)
def get_task_status(self, business_task_id: str) -> Dict[str, Any]:
"""
获取任务状态
@ -270,6 +332,13 @@ class AsyncTranscriptionService:
progress = int(task_data.get('progress') or 0)
error_message = task_data.get('error_message') or None
updated_at = task_data.get('updated_at') or updated_at
else:
paraformer_task_id = task_data.get('paraformer_task_id')
if not paraformer_task_id:
current_status = task_data.get('status') or 'processing'
progress = int(task_data.get('progress') or 0)
error_message = task_data.get('error_message') or None
updated_at = task_data.get('updated_at') or updated_at
else:
cached_status = self.redis_client.hgetall(status_cache_key)
if cached_status and cached_status.get('status') in {'pending', 'processing'}:
@ -278,8 +347,6 @@ class AsyncTranscriptionService:
error_message = cached_status.get('error_message') or None
updated_at = cached_status.get('updated_at') or updated_at
else:
paraformer_task_id = task_data['paraformer_task_id']
# 2. 查询外部API获取状态
try:
audio_config = SystemConfigService.get_active_audio_model_config("asr")
@ -293,7 +360,7 @@ class AsyncTranscriptionService:
paraformer_status = paraformer_response.output.task_status
current_status = self._map_paraformer_status(paraformer_status)
progress = self._calculate_progress(paraformer_status)
error_message = None #执行成功,清除初始状态
error_message = None
except Exception as e:
current_status = 'failed'
@ -302,11 +369,8 @@ class AsyncTranscriptionService:
# 3. 如果任务完成,处理结果
if current_status == 'completed' and paraformer_response.output.get('results'):
# 防止并发处理:先检查数据库中的状态
db_task_status = self._get_task_status_from_db(business_task_id)
if db_task_status != 'completed':
# 只有当数据库中状态不是completed时才处理
# 先将状态更新为completed作为分布式锁
self._update_task_status_in_db(business_task_id, 'completed', 100, None)
try:
@ -317,7 +381,7 @@ class AsyncTranscriptionService:
)
except Exception as e:
current_status = 'failed'
progress = 100 # 进度为100但状态是失败
progress = 100
error_message = f"Error processing transcription result: {e}"
print(error_message)
else:
@ -459,7 +523,19 @@ class AsyncTranscriptionService:
with get_db_connection() as connection:
cursor = connection.cursor()
# 插入转录任务记录
cursor.execute("SELECT task_id FROM transcript_tasks WHERE task_id = %s", (business_task_id,))
existing = cursor.fetchone()
if existing:
cursor.execute(
"""
UPDATE transcript_tasks
SET paraformer_task_id = %s, meeting_id = %s, status = 'pending', progress = 0,
completed_at = NULL, error_message = NULL
WHERE task_id = %s
""",
(paraformer_task_id, meeting_id, business_task_id),
)
else:
insert_task_query = """
INSERT INTO transcript_tasks (task_id, paraformer_task_id, meeting_id, status, progress, created_at)
VALUES (%s, %s, %s, 'pending', 0, NOW())

View File

@ -26,7 +26,8 @@ def handle_audio_upload(
background_tasks: BackgroundTasks = None,
prompt_id: int = None,
model_code: str = None,
duration: int = 0
duration: int = 0,
transcription_task_id: str = None,
) -> dict:
"""
处理已保存的完整音频文件
@ -49,6 +50,7 @@ def handle_audio_upload(
prompt_id: 提示词模版ID可选如果不指定则使用默认模版
model_code: 总结模型编码可选如果不指定则使用默认模型
duration: 音频时长
transcription_task_id: 预先创建的本地任务ID可选用于异步上传场景
Returns:
dict: {
@ -138,7 +140,11 @@ def handle_audio_upload(
# 4. 启动转录任务
try:
transcription_task_id = transcription_service.start_transcription(meeting_id, file_path)
transcription_task_id = transcription_service.start_transcription(
meeting_id,
file_path,
business_task_id=transcription_task_id,
)
print(f"Transcription task {transcription_task_id} started for meeting {meeting_id}")
# 5. 如果启用自动总结,则提交后台监控任务

View File

@ -0,0 +1,154 @@
"""
音频上传后台处理服务
将上传后的重操作放到后台线程执行避免请求长时间阻塞
1. 音频预处理
2. 更新音频文件记录
3. 启动转录
4. 启动自动总结监控
"""
from __future__ import annotations
import json
import os
from pathlib import Path
from typing import Optional
from app.core.config import BACKGROUND_TASK_CONFIG, BASE_DIR
from app.services.async_transcription_service import AsyncTranscriptionService
from app.services.audio_preprocess_service import audio_preprocess_service
from app.services.audio_service import handle_audio_upload
from app.services.background_task_runner import KeyedBackgroundTaskRunner
upload_task_runner = KeyedBackgroundTaskRunner(
max_workers=max(1, int(BACKGROUND_TASK_CONFIG.get("upload_workers", 2))),
thread_name_prefix="imeeting-audio-upload",
)
class AudioUploadTaskService:
def __init__(self):
self.transcription_service = AsyncTranscriptionService()
def enqueue_upload_processing(
self,
*,
meeting_id: int,
original_file_path: str,
current_user: dict,
auto_summarize: bool,
prompt_id: Optional[int] = None,
model_code: Optional[str] = None,
) -> str:
task_id = self.transcription_service.create_local_processing_task(
meeting_id=meeting_id,
status="processing",
progress=5,
)
upload_task_runner.submit(
f"audio-upload:{task_id}",
self._process_uploaded_audio,
task_id,
meeting_id,
original_file_path,
current_user,
auto_summarize,
prompt_id,
model_code,
)
return task_id
def _process_uploaded_audio(
self,
task_id: str,
meeting_id: int,
original_file_path: str,
current_user: dict,
auto_summarize: bool,
prompt_id: Optional[int],
model_code: Optional[str],
) -> None:
source_absolute_path = BASE_DIR / original_file_path.lstrip("/")
processed_absolute_path: Optional[Path] = None
handoff_to_audio_service = False
try:
self.transcription_service.update_local_processing_task(task_id, "processing", 15, None)
preprocess_result = audio_preprocess_service.preprocess(source_absolute_path)
processed_absolute_path = preprocess_result.file_path
audio_duration = preprocess_result.metadata.duration_seconds
file_path = "/" + str(processed_absolute_path.relative_to(BASE_DIR))
print(
f"[AudioUploadTaskService] 音频预处理完成: source={source_absolute_path.name}, "
f"target={processed_absolute_path.name}, duration={audio_duration}s, "
f"applied={preprocess_result.applied}"
)
self.transcription_service.update_local_processing_task(task_id, "processing", 40, None)
handoff_to_audio_service = True
result = handle_audio_upload(
file_path=file_path,
file_name=preprocess_result.file_name,
file_size=preprocess_result.file_size,
meeting_id=meeting_id,
current_user=current_user,
auto_summarize=auto_summarize,
background_tasks=None,
prompt_id=prompt_id,
model_code=model_code,
duration=audio_duration,
transcription_task_id=task_id,
)
if not result["success"]:
raise RuntimeError(self._extract_response_message(result["response"]))
if preprocess_result.applied and processed_absolute_path != source_absolute_path and source_absolute_path.exists():
try:
os.remove(source_absolute_path)
except OSError:
pass
except Exception as exc:
error_message = str(exc)
print(f"[AudioUploadTaskService] 音频后台处理失败, task_id={task_id}, meeting_id={meeting_id}: {error_message}")
self.transcription_service.update_local_processing_task(task_id, "failed", 0, error_message)
if handoff_to_audio_service:
return
cleanup_targets = []
if processed_absolute_path:
cleanup_targets.append(processed_absolute_path)
if source_absolute_path.exists():
cleanup_targets.append(source_absolute_path)
deduped_targets: list[Path] = []
for target in cleanup_targets:
if target not in deduped_targets:
deduped_targets.append(target)
for target in deduped_targets:
if target.exists():
try:
os.remove(target)
except OSError:
pass
@staticmethod
def _extract_response_message(response) -> str:
body = getattr(response, "body", None)
if not body:
return "音频处理失败"
try:
payload = json.loads(body.decode("utf-8"))
return payload.get("message") or "音频处理失败"
except Exception:
return "音频处理失败"
audio_upload_task_service = AudioUploadTaskService()

View File

@ -7,8 +7,7 @@ import app.core.config as config_module
from app.services.llm_service import LLMService
from app.services.async_transcription_service import AsyncTranscriptionService
from app.services.async_meeting_service import async_meeting_service
from app.services.audio_service import handle_audio_upload
from app.services.audio_preprocess_service import audio_preprocess_service
from app.services.audio_upload_task_service import audio_upload_task_service
from app.services.system_config_service import SystemConfigService
from app.core.auth import get_current_user, get_optional_current_user
from app.core.response import create_api_response
@ -733,88 +732,28 @@ async def upload_audio(
except Exception as e:
return create_api_response(code="500", message=f"保存文件失败: {str(e)}")
# 3.5 统一做音频预处理
try:
preprocess_result = audio_preprocess_service.preprocess(absolute_path)
processed_absolute_path = preprocess_result.file_path
audio_duration = preprocess_result.metadata.duration_seconds
print(
f"音频预处理完成: source={absolute_path.name}, "
f"target={processed_absolute_path.name}, duration={audio_duration}s, "
f"applied={preprocess_result.applied}"
)
except Exception as e:
if absolute_path.exists():
try:
os.remove(absolute_path)
except OSError:
pass
return create_api_response(code="500", message=f"音频预处理失败: {str(e)}")
processed_relative_path = processed_absolute_path.relative_to(BASE_DIR)
file_path = '/' + str(processed_relative_path)
file_name = preprocess_result.file_name
file_size = preprocess_result.file_size
# 4. 调用 audio_service 处理文件(权限检查、数据库更新、启动转录)
result = handle_audio_upload(
file_path=file_path,
file_name=file_name,
file_size=file_size,
# 4. 提交后台任务,异步执行预处理和转录启动
transcription_task_id = audio_upload_task_service.enqueue_upload_processing(
meeting_id=meeting_id,
original_file_path='/' + str(absolute_path.relative_to(BASE_DIR)),
current_user=current_user,
auto_summarize=auto_summarize_bool,
background_tasks=background_tasks,
prompt_id=prompt_id,
model_code=model_code,
duration=audio_duration # 传递时长参数
)
# 如果不成功,删除已保存的文件并返回错误
if not result["success"]:
cleanup_paths = [processed_absolute_path]
if processed_absolute_path != absolute_path:
cleanup_paths.append(absolute_path)
for cleanup_path in cleanup_paths:
if cleanup_path.exists():
try:
os.remove(cleanup_path)
print(f"Deleted file due to processing error: {cleanup_path}")
except Exception as e:
print(f"Warning: Failed to delete file {cleanup_path}: {e}")
return result["response"]
if preprocess_result.applied and processed_absolute_path != absolute_path and absolute_path.exists():
try:
os.remove(absolute_path)
print(f"Deleted original uploaded audio after preprocessing: {absolute_path}")
except Exception as e:
print(f"Warning: Failed to delete original uploaded audio {absolute_path}: {e}")
# 5. 返回成功响应
transcription_task_id = result["transcription_task_id"]
message_suffix = ""
if transcription_task_id:
if auto_summarize_bool:
message_suffix = ",正在进行转录和总结"
else:
message_suffix = ",正在进行转录"
return create_api_response(
code="200",
message="Audio file uploaded successfully" +
(" and replaced existing file" if result["replaced_existing"] else "") +
message_suffix,
message="音频上传成功,后台正在处理音频" + ("并准备总结" if auto_summarize_bool else ""),
data={
"file_name": result["file_info"]["file_name"],
"file_path": result["file_info"]["file_path"],
"task_id": transcription_task_id,
"transcription_started": transcription_task_id is not None,
"task_status": "processing",
"transcription_started": False,
"background_processing": True,
"auto_summarize": auto_summarize_bool,
"model_code": model_code,
"replaced_existing": result["replaced_existing"],
"previous_transcription_cleared": result["replaced_existing"] and result["has_transcription"]
"file_path": '/' + str(absolute_path.relative_to(BASE_DIR)),
"file_name": absolute_path.name,
}
)

View File

@ -111,7 +111,7 @@ const VoiceprintCollectionModal = ({ isOpen, onClose, onSuccess, templateConfig
<Card size="small" style={{ background: '#f9fafb', textAlign: 'center', padding: '24px 0', border: '1px dashed #d9d9d9' }}>
<Paragraph type="secondary">请用自然语速朗读以下文字</Paragraph>
<Title level={4} style={{ margin: '12px 0 24px' }}>
{templateConfig?.content || "我正在使用 iMeeting 智能会议系统进行声纹采样。"}
{templateConfig?.content || templateConfig?.template_text || "我正在使用 iMeeting 智能会议系统进行声纹采样。"}
</Title>
<div style={{ marginBottom: 20 }}>

View File

@ -1,4 +1,4 @@
import { useEffect, useEffectEvent, useRef, useState } from 'react';
import { useCallback, useEffect, useRef, useState } from 'react';
import { App } from 'antd';
import { useNavigate, useParams } from 'react-router-dom';
import apiClient from '../utils/apiClient';
@ -143,16 +143,16 @@ export default function useMeetingDetailsPage({ user }) {
}
};
const loadAudioUploadConfig = async () => {
const loadAudioUploadConfig = useCallback(async () => {
try {
const nextMaxAudioSize = await configService.getMaxAudioSize();
setMaxAudioSize(nextMaxAudioSize || 100 * 1024 * 1024);
} catch {
setMaxAudioSize(100 * 1024 * 1024);
}
};
}, []);
const fetchPromptList = async () => {
const fetchPromptList = useCallback(async () => {
try {
const res = await apiClient.get(buildApiUrl(API_ENDPOINTS.PROMPTS.ACTIVE('MEETING_TASK')));
setPromptList(res.data.prompts || []);
@ -163,9 +163,9 @@ export default function useMeetingDetailsPage({ user }) {
} catch (error) {
console.debug('加载提示词列表失败:', error);
}
};
}, []);
const fetchLlmModels = async () => {
const fetchLlmModels = useCallback(async () => {
try {
const res = await apiClient.get(buildApiUrl(API_ENDPOINTS.MEETINGS.LLM_MODELS));
const models = Array.isArray(res.data) ? res.data : (res.data?.models || []);
@ -177,9 +177,9 @@ export default function useMeetingDetailsPage({ user }) {
} catch (error) {
console.debug('加载模型列表失败:', error);
}
};
}, []);
const fetchSummaryResources = async () => {
const fetchSummaryResources = useCallback(async () => {
setSummaryResourcesLoading(true);
try {
await Promise.allSettled([
@ -189,9 +189,9 @@ export default function useMeetingDetailsPage({ user }) {
} finally {
setSummaryResourcesLoading(false);
}
};
}, [fetchLlmModels, fetchPromptList, llmModels.length, promptList.length]);
const fetchTranscript = async () => {
const fetchTranscript = useCallback(async () => {
setTranscriptLoading(true);
try {
const res = await apiClient.get(buildApiUrl(API_ENDPOINTS.MEETINGS.TRANSCRIPT(meetingId)));
@ -208,9 +208,9 @@ export default function useMeetingDetailsPage({ user }) {
} finally {
setTranscriptLoading(false);
}
};
}, [meetingId]);
const fetchMeetingDetails = async (options = {}) => {
const fetchMeetingDetails = useCallback(async (options = {}) => {
const { showPageLoading = true } = options;
try {
if (showPageLoading) {
@ -278,9 +278,9 @@ export default function useMeetingDetailsPage({ user }) {
setLoading(false);
}
}
};
}, [meetingId, message]);
const startStatusPolling = (taskId) => {
const startStatusPolling = useCallback((taskId) => {
if (statusCheckIntervalRef.current) {
clearInterval(statusCheckIntervalRef.current);
}
@ -309,9 +309,9 @@ export default function useMeetingDetailsPage({ user }) {
}, 3000);
statusCheckIntervalRef.current = interval;
};
}, [fetchMeetingDetails, fetchTranscript]);
const startSummaryPolling = (taskId, options = {}) => {
const startSummaryPolling = useCallback((taskId, options = {}) => {
const { closeDrawerOnComplete = false } = options;
if (!taskId) {
return;
@ -364,9 +364,9 @@ export default function useMeetingDetailsPage({ user }) {
const interval = setInterval(poll, 3000);
summaryPollIntervalRef.current = interval;
poll();
};
}, [fetchMeetingDetails, message]);
const scheduleSummaryBootstrapPolling = (attempt = 0) => {
const scheduleSummaryBootstrapPolling = useCallback((attempt = 0) => {
if (summaryPollIntervalRef.current || activeSummaryTaskIdRef.current) {
return;
}
@ -402,9 +402,10 @@ export default function useMeetingDetailsPage({ user }) {
scheduleSummaryBootstrapPolling(attempt + 1);
}, attempt === 0 ? 1200 : 2000);
};
}, [fetchMeetingDetails, meetingId, startSummaryPolling]);
const bootstrapMeetingPage = useEffectEvent(async () => {
useEffect(() => {
const bootstrapMeetingPage = async () => {
const meetingData = await fetchMeetingDetails();
await fetchTranscript();
await loadAudioUploadConfig();
@ -418,9 +419,8 @@ export default function useMeetingDetailsPage({ user }) {
} else if (meetingData?.transcription_status?.status === 'completed' && !meetingData.summary) {
scheduleSummaryBootstrapPolling();
}
});
};
useEffect(() => {
bootstrapMeetingPage();
return () => {
@ -434,11 +434,15 @@ export default function useMeetingDetailsPage({ user }) {
clearTimeout(summaryBootstrapTimeoutRef.current);
}
};
}, [bootstrapMeetingPage, meetingId]);
const openSummaryResources = useEffectEvent(() => {
fetchSummaryResources();
});
}, [
fetchMeetingDetails,
fetchTranscript,
loadAudioUploadConfig,
meetingId,
scheduleSummaryBootstrapPolling,
startStatusPolling,
startSummaryPolling,
]);
useEffect(() => {
if (!showSummaryDrawer) {
@ -447,8 +451,8 @@ export default function useMeetingDetailsPage({ user }) {
if (promptList.length > 0 && llmModels.length > 0) {
return;
}
openSummaryResources();
}, [llmModels.length, openSummaryResources, promptList.length, showSummaryDrawer]);
fetchSummaryResources();
}, [fetchSummaryResources, llmModels.length, promptList.length, showSummaryDrawer]);
useEffect(() => {
transcriptRefs.current = [];
@ -494,7 +498,7 @@ export default function useMeetingDetailsPage({ user }) {
setUploadStatusMessage('正在上传音频文件...');
try {
await uploadMeetingAudio({
const response = await uploadMeetingAudio({
meetingId,
file,
promptId: meeting?.prompt_id,
@ -508,13 +512,18 @@ export default function useMeetingDetailsPage({ user }) {
});
setUploadProgress(100);
setUploadStatusMessage('上传完成,正在启动转录任务...');
message.success('音频上传成功');
setUploadStatusMessage('上传完成,后台正在处理音频...');
message.success(response?.message || '音频上传成功,后台正在处理音频');
setTranscript([]);
setSpeakerList([]);
setEditingSpeakers({});
await fetchMeetingDetails({ showPageLoading: false });
await fetchTranscript();
if (response?.data?.task_id) {
const nextStatus = { task_id: response.data.task_id, status: 'processing', progress: 5 };
setTranscriptionStatus(nextStatus);
setTranscriptionProgress(5);
setMeeting((prev) => (prev ? { ...prev, transcription_status: nextStatus, llm_status: null, summary: null } : prev));
startStatusPolling(response.data.task_id);
}
} catch (error) {
message.error(error?.response?.data?.message || error?.response?.data?.detail || '上传失败');
throw error;

View File

@ -127,7 +127,7 @@ export default function useMeetingFormDrawer({ open, onClose, onSuccess, meeting
setAudioUploadProgress(0);
setAudioUploadMessage('正在上传音频文件...');
try {
await uploadMeetingAudio({
const uploadResponse = await uploadMeetingAudio({
meetingId: newMeetingId,
file: selectedAudioFile,
promptId: values.prompt_id,
@ -139,8 +139,8 @@ export default function useMeetingFormDrawer({ open, onClose, onSuccess, meeting
},
});
setAudioUploadProgress(100);
setAudioUploadMessage('上传完成,正在启动转录任务...');
message.success('会议创建成功,音频已开始上传处理');
setAudioUploadMessage('上传完成,后台正在处理音频...');
message.success(uploadResponse?.message || '会议创建成功,音频已进入后台处理');
} catch (uploadError) {
message.warning(uploadError?.response?.data?.message || uploadError?.response?.data?.detail || '会议已创建,但音频上传失败,请在详情页重试');
} finally {

View File

@ -97,7 +97,7 @@ const CreateMeeting = () => {
setAudioUploadProgress(0);
setAudioUploadMessage('正在上传音频文件...');
try {
await uploadMeetingAudio({
const uploadResponse = await uploadMeetingAudio({
meetingId,
file: selectedAudioFile,
promptId: values.prompt_id,
@ -109,8 +109,8 @@ const CreateMeeting = () => {
},
});
setAudioUploadProgress(100);
setAudioUploadMessage('上传完成,正在启动转录任务...');
message.success('会议创建成功,音频已开始上传处理');
setAudioUploadMessage('上传完成,后台正在处理音频...');
message.success(uploadResponse?.message || '会议创建成功,音频已进入后台处理');
} catch (uploadError) {
message.warning(uploadError.response?.data?.message || uploadError.response?.data?.detail || '会议已创建,但音频上传失败,请在详情页重试');
} finally {