From aa99ee1f6ae8d29024cbb96fcaef8786cb30980a Mon Sep 17 00:00:00 2001 From: "mula.liu" Date: Wed, 8 Apr 2026 14:14:37 +0800 Subject: [PATCH] fix(asr): avoid serializing requests session in dashscope calls --- .../services/async_transcription_service.py | 57 ++++++++++--------- 1 file changed, 30 insertions(+), 27 deletions(-) diff --git a/backend/app/services/async_transcription_service.py b/backend/app/services/async_transcription_service.py index 0e127cd..42ccdac 100644 --- a/backend/app/services/async_transcription_service.py +++ b/backend/app/services/async_transcription_service.py @@ -1,5 +1,4 @@ import uuid -import json import os import redis import requests @@ -39,6 +38,25 @@ class AsyncTranscriptionService: normalized = normalized[: -len(suffix)] return normalized or None + @staticmethod + def _resolve_dashscope_api_key(audio_config: Optional[Dict[str, Any]] = None) -> str: + api_key = (audio_config or {}).get("api_key") or QWEN_API_KEY + if isinstance(api_key, str): + api_key = api_key.strip() + if not api_key: + raise Exception("未配置 DashScope API Key") + return api_key + + def _build_dashscope_request_options(self, audio_config: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: + request_options: Dict[str, Any] = { + "api_key": self._resolve_dashscope_api_key(audio_config), + } + endpoint_url = (audio_config or {}).get("endpoint_url") + base_address = self._normalize_dashscope_base_address(endpoint_url) + if base_address: + request_options["base_address"] = base_address + return request_options + @staticmethod def _build_dashscope_call_params(audio_config: Dict[str, Any], file_url: str) -> Dict[str, Any]: model_name = audio_config.get("model") or "paraformer-v2" @@ -74,22 +92,14 @@ class AsyncTranscriptionService: if provider != "dashscope": raise Exception(f"当前仅支持 DashScope 音频识别测试,暂不支持供应商: {provider}") - dashscope.api_key = audio_config.get("api_key") or QWEN_API_KEY + request_options = self._build_dashscope_request_options(audio_config) + dashscope.api_key = request_options["api_key"] target_file_url = ( test_file_url or "https://dashscope.oss-cn-beijing.aliyuncs.com/samples/audio/paraformer/hello_world_female2.wav" ) call_params = self._build_dashscope_call_params(audio_config, target_file_url) - base_address = self._normalize_dashscope_base_address(audio_config.get("endpoint_url")) - - session = self._create_requests_session() - try: - if base_address: - response = Transcription.async_call(base_address=base_address, session=session, **call_params) - else: - response = Transcription.async_call(session=session, **call_params) - finally: - session.close() + response = Transcription.async_call(**request_options, **call_params) if response.status_code != HTTPStatus.OK: raise Exception(response.message or "音频模型测试失败") @@ -143,9 +153,9 @@ class AsyncTranscriptionService: if provider != "dashscope": raise Exception(f"当前仅支持 DashScope 音频识别,暂不支持供应商: {provider}") - dashscope.api_key = audio_config.get("api_key") or QWEN_API_KEY + request_options = self._build_dashscope_request_options(audio_config) + dashscope.api_key = request_options["api_key"] call_params = self._build_dashscope_call_params(audio_config, file_url) - base_address = self._normalize_dashscope_base_address(audio_config.get("endpoint_url")) print( f"Starting transcription for meeting_id: {meeting_id}, " @@ -154,14 +164,7 @@ class AsyncTranscriptionService: ) # 3. 调用Paraformer异步API - session = self._create_requests_session() - try: - if base_address: - task_response = Transcription.async_call(base_address=base_address, session=session, **call_params) - else: - task_response = Transcription.async_call(session=session, **call_params) - finally: - session.close() + task_response = Transcription.async_call(**request_options, **call_params) if task_response.status_code != HTTPStatus.OK: print(f"Failed to start transcription: {task_response.status_code}, {task_response.message}") @@ -235,11 +238,11 @@ class AsyncTranscriptionService: # 2. 查询外部API获取状态 try: - session = self._create_requests_session() - try: - paraformer_response = Transcription.fetch(task=paraformer_task_id, session=session) - finally: - session.close() + request_options = self._build_dashscope_request_options( + SystemConfigService.get_active_audio_model_config("asr") + ) + dashscope.api_key = request_options["api_key"] + paraformer_response = Transcription.fetch(task=paraformer_task_id, **request_options) if paraformer_response.status_code != HTTPStatus.OK: raise Exception(f"Failed to fetch task status from provider: {paraformer_response.message}")