fix FastAPI模型FFMPEGSubtitleOverlayRequest解析后没有返回Self以及可选参数没有给default值
This commit is contained in:
parent
d37da25a82
commit
39044f22dc
|
|
@ -20,6 +20,7 @@ class WorkerConfig(BaseSettings):
|
|||
modal_product_kv_name: str = Field(default='live-product-cache', description="Modal抖音直播间商品缓存KV库")
|
||||
modal_environment: str = Field(default="dev", description="Modal worker运行环境")
|
||||
modal_app_name: str = Field(default='bowong-ai-video', description="Modal App集群名称")
|
||||
modal_is_local: bool = Field(default=False, description="本地开发环境")
|
||||
comfyui_s3_input: Optional[str] = Field(default="comfyui-input", description="ComfyUI input S3文件夹名")
|
||||
comfyui_s3_output: Optional[str] = Field(default="comfyui-output", description="ComfyUI output S3文件夹名")
|
||||
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ class MediaSource(BaseModel):
|
|||
cache_path = os.path.join(s3_mount_path, media_source.cache_filepath)
|
||||
|
||||
# 校验媒体文件是否存在缓存中
|
||||
if not os.path.exists(cache_path):
|
||||
if not config.modal_is_local and not os.path.exists(cache_path):
|
||||
raise ValueError(f"媒体文件 {media_source.cache_filepath} 不存在于缓存中")
|
||||
|
||||
return media_source
|
||||
|
|
|
|||
|
|
@ -286,9 +286,9 @@ class FFMPEGOverlayGifTaskStatusResponse(BaseFFMPEGTaskStatusResponse):
|
|||
|
||||
class FFMPEGSubtitleOverlayRequest(BaseFFMPEGTaskRequest):
|
||||
media: MediaSource = Field(description="需要处理的媒体源")
|
||||
subtitle: Optional[MediaSource] = Field(description="需要叠加的字幕文件")
|
||||
embedded_subtitle: Optional[MediaSource] = Field(description="需要内嵌的字幕文件")
|
||||
fonts: Optional[List[MediaSource]] = Field(description="字幕文件内使用到的字体文件")
|
||||
subtitle: Optional[MediaSource] = Field(default=None, description="需要叠加的字幕文件")
|
||||
embedded_subtitle: Optional[MediaSource] = Field(default=None, description="需要内嵌的字幕文件")
|
||||
fonts: Optional[List[MediaSource]] = Field(default=None, description="字幕文件内使用到的字体文件")
|
||||
|
||||
@field_validator('media', mode='before')
|
||||
@classmethod
|
||||
|
|
@ -351,6 +351,7 @@ class FFMPEGSubtitleOverlayRequest(BaseFFMPEGTaskRequest):
|
|||
raise pydantic.ValidationError("至少需要提供一个有效字幕")
|
||||
if self.subtitle is not None and self.fonts is None:
|
||||
raise pydantic.ValidationError("使用叠加字幕时需要指定使用的字体文件")
|
||||
return self
|
||||
|
||||
|
||||
class FFMPEGSubtitleTaskStatusResponse(BaseFFMPEGTaskStatusResponse):
|
||||
|
|
@ -504,7 +505,7 @@ class GeminiRequest(BaseFFMPEGTaskRequest):
|
|||
end_time: str = Field(default="00:20:00.000", description="结束时间(hls)")
|
||||
options: FFMPEGSliceOptions = Field(default=FFMPEGSliceOptions(), description="输出质量选项")
|
||||
scale: float = Field(default=0.85, description="视频尺寸缩放倍率")
|
||||
last_product_text:str = Field(default="", description="上一段视频结尾介绍的商品以及标签")
|
||||
last_product_text: str = Field(default="", description="上一段视频结尾介绍的商品以及标签")
|
||||
|
||||
@field_validator('media_hls_url', mode='before')
|
||||
@classmethod
|
||||
|
|
@ -673,6 +674,7 @@ class GeminiSecondStagePromptVariables(BaseModel):
|
|||
@computed_field(description="xml格式排列的识别出的商品JSON列表")
|
||||
@property
|
||||
def product_json_list_xml(self) -> str:
|
||||
xml_items = [f" <product>{json.dumps(product,ensure_ascii=False)}</product>" for product in self.product_json_list]
|
||||
xml_items = [f" <product>{json.dumps(product, ensure_ascii=False)}</product>" for product in
|
||||
self.product_json_list]
|
||||
xml_string = "\n".join(xml_items)
|
||||
return f"<products>\n{xml_string}\n </products>"
|
||||
return f"<products>\n{xml_string}\n </products>"
|
||||
|
|
|
|||
|
|
@ -111,22 +111,17 @@ class FFMPEGTestCase(unittest.IsolatedAsyncioTestCase):
|
|||
|
||||
async def test_ffmpeg_slice_stream(self):
|
||||
from pydantic import TypeAdapter
|
||||
# #EXT-X-PROGRAM-DATE-TIME:2025-06-23T06:20:30.720+0000
|
||||
# #EXT-X-PROGRAM-DATE-TIME:2025-06-23T10:09:38.476+0000
|
||||
|
||||
stream_url = "https://cdn.roasmax.cn/test/records/hls/fc-01JXY1AS1HDGS300EQ54ATAKHF/playlist.m3u8"
|
||||
# stream_url = "https://cdn.roasmax.cn/test/records/hls/fc-01JXY1AS1HDGS300EQ54ATAKHF/playlist.m3u8"
|
||||
stream_url = "https://cdn.roasmax.cn/test/records/hls/fc-01JYDQ3RVMPAEKMYHA0HQ14WAJ/playlist.m3u8"
|
||||
adapter = TypeAdapter(List[FFMpegSliceSegment])
|
||||
segments = adapter.validate_json("""[
|
||||
{
|
||||
"start": 920.425,
|
||||
"end": 921.688
|
||||
},
|
||||
{
|
||||
"start": 922.425,
|
||||
"end": 923.688
|
||||
},
|
||||
{
|
||||
"start": 940.425,
|
||||
"end": 941.688
|
||||
}
|
||||
"start": 12600,
|
||||
"end": 13500
|
||||
}
|
||||
]""")
|
||||
for segment in segments:
|
||||
logger.info(f"{segment.start.toFormatStr()} --> {segment.end.toFormatStr()}")
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import unittest
|
|||
|
||||
import httpx
|
||||
from loguru import logger
|
||||
from BowongModalFunctions.models.web_model import FFMPEGSubtitleOverlayRequest
|
||||
|
||||
|
||||
class PydanticModelTestCase(unittest.TestCase):
|
||||
|
|
@ -25,6 +26,14 @@ class PydanticModelTestCase(unittest.TestCase):
|
|||
logger.info(f"Login response: {session_json}")
|
||||
self.assertEqual(True, True)
|
||||
|
||||
def test_subtitle_apply(self):
|
||||
subtitle_apply_input = """
|
||||
{\"media\": \"s3://ap-northeast-2/modal-media-cache/test/bgm_nosie_reduce/outputs/fc-01JYG6NVQ1AEJMARECGN8FXDYH/output.mp4\",\n \"subtitle\": \"s3://ap-northeast-2/modal-media-cache/upload/2d5e2674-2c03-4a83-a589-3dce96003470/1183fed3-770d-4ded-a314-ef47c37d84d7.ass\",\n \"fonts\": [\n \"s3://ap-northeast-2/modal-media-cache/upload/test/fonts/荆南俊俊体.ttf\"\n ]}
|
||||
"""
|
||||
|
||||
request = FFMPEGSubtitleOverlayRequest.model_validate_json(subtitle_apply_input)
|
||||
logger.info(f"request = {request.model_dump_json(indent=2, exclude_none=True)}")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
|
|||
Loading…
Reference in New Issue