From b4d094ce64da36d14793f30554c683ba21bc90ce Mon Sep 17 00:00:00 2001 From: "kyj@bowong.ai" Date: Wed, 30 Jul 2025 15:18:48 +0800 Subject: [PATCH] =?UTF-8?q?ADD=20mj=E7=94=9F=E5=9B=BE=E5=A2=9E=E5=8A=A0pro?= =?UTF-8?q?vider=E9=80=89=E9=A1=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ext/comfyui_modal_deploy.py | 2 +- ext/comfyui_modal_deploy_4ui.py | 103 ++++++++++++++++++++++++++++++++ nodes/image_modal_nodes.py | 73 +++++++++++++++------- 3 files changed, 157 insertions(+), 21 deletions(-) create mode 100644 ext/comfyui_modal_deploy_4ui.py diff --git a/ext/comfyui_modal_deploy.py b/ext/comfyui_modal_deploy.py index dc3408b..1a27b2e 100644 --- a/ext/comfyui_modal_deploy.py +++ b/ext/comfyui_modal_deploy.py @@ -12,7 +12,7 @@ image = ( .pip_install("comfy_cli==0.0.0", index_url="https://packages-1747622887395:0ee15474ccd7b27b57ca63a9306327678e6c2631@g-ldyi2063-pypi.pkg.coding.net/dev/packages/simple") .run_commands( - "comfy --skip-prompt install --fast-deps --nvidia --version 0.3.40" + "comfy --skip-prompt install --fast-deps --nvidia --version 0.3.46" ) .pip_install_from_pyproject(os.path.join(os.path.dirname(__file__), "pyproject.toml")) .run_commands("comfy node install https://github.com/yolain/ComfyUI-Easy-Use.git") diff --git a/ext/comfyui_modal_deploy_4ui.py b/ext/comfyui_modal_deploy_4ui.py new file mode 100644 index 0000000..62f5979 --- /dev/null +++ b/ext/comfyui_modal_deploy_4ui.py @@ -0,0 +1,103 @@ +# 文件名 comfyui_v2.py +import os +import subprocess + +import modal + +image = ( + modal.Image.debian_slim( + python_version="3.10" + ) + .apt_install("git", "gcc", "libportaudio2", "ffmpeg") + .pip_install("comfy_cli==0.0.0", + index_url="https://packages-1747622887395:0ee15474ccd7b27b57ca63a9306327678e6c2631@g-ldyi2063-pypi.pkg.coding.net/dev/packages/simple") + .run_commands( + "comfy --skip-prompt install --fast-deps --nvidia --version 0.3.40" + ) + .pip_install_from_pyproject(os.path.join(os.path.dirname(__file__), "pyproject.toml")) + .run_commands("comfy node install https://github.com/yolain/ComfyUI-Easy-Use.git") + .run_commands("comfy node install https://github.com/crystian/ComfyUI-Crystools.git") + .run_commands("comfy node install https://github.com/pythongosssss/ComfyUI-Custom-Scripts.git") + .run_commands("comfy node install https://github.com/kijai/ComfyUI-KJNodes.git") + .run_commands("comfy node install https://github.com/Kosinkadink/ComfyUI-VideoHelperSuite.git") + .run_commands("comfy node install https://github.com/WASasquatch/was-node-suite-comfyui.git") + .run_commands("comfy node install https://github.com/cubiq/ComfyUI_essentials.git") + .run_commands("comfy node install https://github.com/jamesWalker55/comfyui-various.git") + .run_commands("comfy node install https://gitea.bowongai.com/Polaris/ComfyUI-CustomNode.git") + .run_commands("comfy node install https://github.com/rgthree/rgthree-comfy.git") + .run_commands("rm -rf /root/comfy/ComfyUI/models&&ln -s /models /root/comfy/ComfyUI/models") + .run_commands("rm -rf /root/comfy/ComfyUI/input&&ln -s /models/input /root/comfy/ComfyUI/input") + .run_commands("rm -rf /root/comfy/ComfyUI/output&&ln -s /models/output /root/comfy/ComfyUI/output") +) +app = modal.App(image=image) +custom_secret = modal.Secret.from_name("comfyui-custom-secret", environment_name="dev") +vol = modal.Volume.from_name("comfy_model", environment_name="dev", create_if_missing=True) + + +@app.function( + min_containers=0, + buffer_containers=0, + max_containers=1, + scaledown_window=600, + secrets=[custom_secret], + volumes={ + "/models": vol + } +) +@modal.concurrent( + max_inputs=10 +) +@modal.web_server(8000, startup_timeout=120) +def ui_1(): + subprocess.Popen("comfy launch -- --cpu --listen 0.0.0.0 --port 8000", shell=True) + +@app.function( + min_containers=0, + buffer_containers=0, + max_containers=1, + scaledown_window=600, + secrets=[custom_secret], + volumes={ + "/models": vol + } +) +@modal.concurrent( + max_inputs=10 +) +@modal.web_server(8000, startup_timeout=120) +def ui_2(): + subprocess.Popen("comfy launch -- --cpu --listen 0.0.0.0 --port 8000", shell=True) + +@app.function( + min_containers=0, + buffer_containers=0, + max_containers=1, + scaledown_window=600, + secrets=[custom_secret], + volumes={ + "/models": vol + } +) +@modal.concurrent( + max_inputs=10 +) +@modal.web_server(8000, startup_timeout=120) +def ui_3(): + subprocess.Popen("comfy launch -- --cpu --listen 0.0.0.0 --port 8000", shell=True) + +@app.function( + min_containers=0, + buffer_containers=0, + max_containers=1, + scaledown_window=600, + secrets=[custom_secret], + volumes={ + "/models": vol + } +) +@modal.concurrent( + max_inputs=10 +) +@modal.web_server(8000, startup_timeout=120) +def ui_4(): + subprocess.Popen("comfy launch -- --cpu --listen 0.0.0.0 --port 8000", shell=True) \ No newline at end of file diff --git a/nodes/image_modal_nodes.py b/nodes/image_modal_nodes.py index c369c9a..c9c605d 100644 --- a/nodes/image_modal_nodes.py +++ b/nodes/image_modal_nodes.py @@ -209,6 +209,7 @@ class ModalMidJourneyGenerateImage: return { "required": { "prompt": ("STRING", {"default": "一幅宏大壮美的山川画卷", "multiline": True}), + "provider":(["ttapi","302ai"],), "endpoint": ("STRING", {"default": "bowongai-test--text-video-agent-fastapi-app.modal.run"}), "timeout": ("INT", {"default": 300, "min": 10, "max": 1200}), }, @@ -223,7 +224,7 @@ class ModalMidJourneyGenerateImage: OUTPUT_NODE = False CATEGORY = "不忘科技-自定义节点🚩/图片/Midjourney" - def process(self, prompt: str, endpoint: str, timeout: int, **kwargs): + def process(self, prompt: str, provider:str, endpoint: str, timeout: int, **kwargs): try: logger.info("请求同步接口") format = "PNG" @@ -234,25 +235,57 @@ class ModalMidJourneyGenerateImage: f'image/{format.lower()}')} else: files = None - job_resp = send_request("post", f"https://{endpoint}/api/union/img/sync/generate/image", - headers={'Authorization': 'Bearer bowong7777'}, - data={"prompt": prompt}, - files=files, - timeout=timeout) - job_resp.raise_for_status() - job_resp = job_resp.json() - if not job_resp["status"]: - raise Exception("生成失败, 可能因为风控") - result_url = job_resp["data"] - if isinstance(result_url, list): - result_list = [] - for url in result_url: - logger.success("img_url: " + url) - result_list.append(url_to_tensor(url).squeeze(0)) - result_list = torch.stack(result_list, dim=0) - return (result_list,) - logger.success("img_url: " + result_url) - return (url_to_tensor(result_url),) + if provider == "302ai": + job_resp = send_request("post", f"https://{endpoint}/api/union/img/sync/generate/image", + headers={'Authorization': 'Bearer bowong7777'}, + data={"prompt": prompt}, + files=files, + timeout=timeout) + job_resp.raise_for_status() + job_resp = job_resp.json() + if not job_resp["status"]: + raise Exception("生成失败, 可能因为风控") + result_url = job_resp["data"] + if isinstance(result_url, list): + result_list = [] + for url in result_url: + logger.success("img_url: " + url) + result_list.append(url_to_tensor(url).squeeze(0)) + result_list = torch.stack(result_list, dim=0) + return (result_list,) + logger.success("img_url: " + result_url) + return (url_to_tensor(result_url),) + elif provider == "ttapi": + interval = 3 + job_resp = send_request("post", f"https://{endpoint}/api/mj/async/generate/image?prompt={prompt}", + headers={'Authorization': 'Bearer bowong7777'}, + files=files, + timeout=150) + job_resp.raise_for_status() + job_resp = job_resp.json() + if not job_resp["status"]: + raise Exception("生成失败, 可能因为风控") + job_id = job_resp["data"] + for _ in range(0, timeout // interval, interval): + logger.info("查询结果") + resp = send_request("get", f"https://{endpoint}/api/mj/async/query/status?task_id={job_id}", + headers={'Authorization': 'Bearer bowong7777'}, timeout=30) + resp.raise_for_status() + if resp.json()["status"]: + if "fail" in resp.json()["msg"]: + raise Exception("生成失败,可能因为风控") + result_url = resp.json()["data"] + if isinstance(result_url, list): + result_list = [] + for url in result_url: + logger.success("img_url: " + url) + result_list.append(url_to_tensor(url).squeeze(0)) + result_list = torch.stack(result_list, dim=0) + return (result_list,) + logger.success("img_url: " + result_url) + return (url_to_tensor(result_url),) + sleep(interval) + raise Exception("等待超时") except Exception as e: raise e