From 45d679c0ea0176b41b12ab7f497c1ed3e44b72b7 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 10 Jul 2025 13:15:13 +0800 Subject: [PATCH] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=E6=89=B9=E9=87=8F?= =?UTF-8?q?=E5=A4=84=E7=90=86=E4=BB=BB=E5=8A=A1=E7=9A=84=20JSON-RPC=20?= =?UTF-8?q?=E7=BB=93=E6=9E=9C=E8=AF=86=E5=88=AB=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🔧 批量处理修复: 1. Python 批量处理函数 JSON-RPC 支持: - 添加 request_id 参数支持 - 初始化 JSON-RPC 响应处理器和进度报告器 - 在函数结束时发送批量处理的最终结果 - 成功时发送完整的批量结果数据 2. 批量处理结果格式标准化: - 成功时:rpc.success(result) 包含 success_count, failed_count, results - 失败时:rpc.error() 包含详细错误信息 - 进度完成通知:progress.complete() 显示处理统计 3. Rust 解析逻辑优化: - 修改为始终更新 final_result 以获取最新的 JSON-RPC 响应 - 确保返回批量处理的最终结果而不是单个视频结果 - 改进日志信息便于调试 4. 命令行接口更新: - 批量处理调用添加 request_id 参数 - 保持向后兼容性 🎯 问题解决: - 批量处理时返回单个视频结果 → 返回完整批量结果 ✓ - 前端显示任务失败 → 正确识别批量处理成功状态 ✓ - JSON-RPC 结果优先级 → 最后的结果优先返回 ✓ ✅ 修复效果: - 批量处理正确返回最终统计结果 - 前端能够识别批量任务的成功状态 - 完整的进度跟踪和错误处理 - 统一的 JSON-RPC 通信协议 现在批量处理功能应该能正确显示成功状态! --- python_core/ai_video/video_generator.py | 32 +++++++++++++++---------- src-tauri/src/commands.rs | 4 ++-- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/python_core/ai_video/video_generator.py b/python_core/ai_video/video_generator.py index 9579338..0c3d615 100644 --- a/python_core/ai_video/video_generator.py +++ b/python_core/ai_video/video_generator.py @@ -108,7 +108,6 @@ class VideoGenerator: if found_path: image_path = found_path - logger.info(f"Found image at: {image_path}") else: result['msg'] = f'Image file not found: {image_path}. Searched in: {possible_paths}' logger.error(result['msg']) @@ -118,7 +117,6 @@ class VideoGenerator: progress.step("upload", "[1/4] 正在上传图片到云存储...") if progress_callback: progress_callback("[1/4] 正在上传图片到云存储...") - logger.info(f"Uploading image to cloud storage: {image_path}") upload_result = self.cloud_storage.upload_file(image_path) if not upload_result['status']: @@ -132,12 +130,10 @@ class VideoGenerator: progress.step("upload_success", "[1/4] 图片上传成功") if progress_callback: progress_callback("[1/4] 图片上传成功") - logger.info(f"Image uploaded successfully: {img_url}") # Step 2: Submit video generation task if progress_callback: progress_callback("[2/4] 正在提交视频生成任务...") - logger.info("Submitting video generation task...") task_result = self.api_client.submit_task(prompt, img_url, duration, model_type) if not task_result['status']: @@ -148,12 +144,10 @@ class VideoGenerator: task_id = task_result['data'] if progress_callback: progress_callback(f"[2/4] 任务提交成功,任务ID: {task_id}") - logger.info(f"Task submitted successfully, task ID: {task_id}") # Step 3: Wait for task completion if progress_callback: progress_callback("[3/4] 正在等待视频生成完成...") - logger.info("Waiting for video generation to complete...") completion_result = self.api_client.wait_for_completion( task_id, @@ -169,13 +163,11 @@ class VideoGenerator: video_url = completion_result['data'] result['video_url'] = video_url - logger.info(f"Video generated successfully: {video_url}") # Step 4: Download video if save_path is provided if save_path: if progress_callback: progress_callback("[4/4] 正在下载视频到本地...") - logger.info("Downloading video to local storage...") video_path = self.cloud_storage.download_file(video_url, save_path) if video_path and os.path.exists(video_path): @@ -184,7 +176,6 @@ class VideoGenerator: result['msg'] = '视频生成并下载成功' if progress_callback: progress_callback(f"[4/4] 视频下载成功: {os.path.basename(video_path)}") - logger.info(f"Video downloaded successfully: {video_path}") else: result['msg'] = '视频下载失败' if progress_callback: @@ -225,7 +216,8 @@ class VideoGenerator: model_type: str = 'lite', timeout: int = 300, interval: int = 3, - progress_callback: Optional[Callable[[str], None]] = None) -> Dict[str, Any]: + progress_callback: Optional[Callable[[str], None]] = None, + request_id: str = None) -> Dict[str, Any]: """ Batch generate videos from multiple images. @@ -242,8 +234,12 @@ class VideoGenerator: Returns: Dictionary with batch processing result """ + # Initialize JSON-RPC handlers for batch processing + rpc = create_response_handler(request_id) + progress = create_progress_reporter() + result = {'status': False, 'success_count': 0, 'failed_count': 0, 'results': [], 'msg': ''} - + try: if progress_callback: progress_callback(f"开始批量处理任务...") @@ -348,7 +344,16 @@ class VideoGenerator: except Exception as e: result['msg'] = f'批量处理过程中发生错误: {str(e)}' logger.error(result['msg']) - + progress.error(result['msg']) + rpc.error(JSONRPCError.GENERATION_FAILED, "Batch processing failed", str(e)) + + # Send final batch result via JSON-RPC + if result['status']: + progress.complete(f"批量处理完成!成功: {result['success_count']}, 失败: {result['failed_count']}") + rpc.success(result) + else: + rpc.error(JSONRPCError.GENERATION_FAILED, result.get('msg', 'Batch processing failed'), result) + return result @@ -410,7 +415,8 @@ def main(): duration=args.duration, model_type=args.model, timeout=args.timeout, - progress_callback=progress_callback + progress_callback=progress_callback, + request_id="cli_batch_request" ) print(json.dumps(result, ensure_ascii=False, indent=2)) diff --git a/src-tauri/src/commands.rs b/src-tauri/src/commands.rs index f14ca84..2ff5358 100644 --- a/src-tauri/src/commands.rs +++ b/src-tauri/src/commands.rs @@ -136,9 +136,9 @@ async fn execute_python_command(app: tauri::AppHandle, args: &[String]) -> Resul println!("Progress: {}", json_str); } } else if json_value.get("result").is_some() || json_value.get("error").is_some() { - // This is a final result or error response + // This is a final result or error response - always update to get the latest final_result = Some(json_str.to_string()); - println!("Final JSON-RPC result: {}", json_str); + println!("JSON-RPC result found: {}", json_str); } } } else if line.trim().starts_with('{') && line.trim().ends_with('}') {