ADD mj生图增加provider选项

This commit is contained in:
kyj@bowong.ai 2025-07-30 15:18:48 +08:00
parent 5b0e970b6f
commit b4d094ce64
3 changed files with 157 additions and 21 deletions

View File

@ -12,7 +12,7 @@ image = (
.pip_install("comfy_cli==0.0.0",
index_url="https://packages-1747622887395:0ee15474ccd7b27b57ca63a9306327678e6c2631@g-ldyi2063-pypi.pkg.coding.net/dev/packages/simple")
.run_commands(
"comfy --skip-prompt install --fast-deps --nvidia --version 0.3.40"
"comfy --skip-prompt install --fast-deps --nvidia --version 0.3.46"
)
.pip_install_from_pyproject(os.path.join(os.path.dirname(__file__), "pyproject.toml"))
.run_commands("comfy node install https://github.com/yolain/ComfyUI-Easy-Use.git")

View File

@ -0,0 +1,103 @@
# 文件名 comfyui_v2.py
import os
import subprocess
import modal
image = (
modal.Image.debian_slim(
python_version="3.10"
)
.apt_install("git", "gcc", "libportaudio2", "ffmpeg")
.pip_install("comfy_cli==0.0.0",
index_url="https://packages-1747622887395:0ee15474ccd7b27b57ca63a9306327678e6c2631@g-ldyi2063-pypi.pkg.coding.net/dev/packages/simple")
.run_commands(
"comfy --skip-prompt install --fast-deps --nvidia --version 0.3.40"
)
.pip_install_from_pyproject(os.path.join(os.path.dirname(__file__), "pyproject.toml"))
.run_commands("comfy node install https://github.com/yolain/ComfyUI-Easy-Use.git")
.run_commands("comfy node install https://github.com/crystian/ComfyUI-Crystools.git")
.run_commands("comfy node install https://github.com/pythongosssss/ComfyUI-Custom-Scripts.git")
.run_commands("comfy node install https://github.com/kijai/ComfyUI-KJNodes.git")
.run_commands("comfy node install https://github.com/Kosinkadink/ComfyUI-VideoHelperSuite.git")
.run_commands("comfy node install https://github.com/WASasquatch/was-node-suite-comfyui.git")
.run_commands("comfy node install https://github.com/cubiq/ComfyUI_essentials.git")
.run_commands("comfy node install https://github.com/jamesWalker55/comfyui-various.git")
.run_commands("comfy node install https://gitea.bowongai.com/Polaris/ComfyUI-CustomNode.git")
.run_commands("comfy node install https://github.com/rgthree/rgthree-comfy.git")
.run_commands("rm -rf /root/comfy/ComfyUI/models&&ln -s /models /root/comfy/ComfyUI/models")
.run_commands("rm -rf /root/comfy/ComfyUI/input&&ln -s /models/input /root/comfy/ComfyUI/input")
.run_commands("rm -rf /root/comfy/ComfyUI/output&&ln -s /models/output /root/comfy/ComfyUI/output")
)
app = modal.App(image=image)
custom_secret = modal.Secret.from_name("comfyui-custom-secret", environment_name="dev")
vol = modal.Volume.from_name("comfy_model", environment_name="dev", create_if_missing=True)
@app.function(
min_containers=0,
buffer_containers=0,
max_containers=1,
scaledown_window=600,
secrets=[custom_secret],
volumes={
"/models": vol
}
)
@modal.concurrent(
max_inputs=10
)
@modal.web_server(8000, startup_timeout=120)
def ui_1():
subprocess.Popen("comfy launch -- --cpu --listen 0.0.0.0 --port 8000", shell=True)
@app.function(
min_containers=0,
buffer_containers=0,
max_containers=1,
scaledown_window=600,
secrets=[custom_secret],
volumes={
"/models": vol
}
)
@modal.concurrent(
max_inputs=10
)
@modal.web_server(8000, startup_timeout=120)
def ui_2():
subprocess.Popen("comfy launch -- --cpu --listen 0.0.0.0 --port 8000", shell=True)
@app.function(
min_containers=0,
buffer_containers=0,
max_containers=1,
scaledown_window=600,
secrets=[custom_secret],
volumes={
"/models": vol
}
)
@modal.concurrent(
max_inputs=10
)
@modal.web_server(8000, startup_timeout=120)
def ui_3():
subprocess.Popen("comfy launch -- --cpu --listen 0.0.0.0 --port 8000", shell=True)
@app.function(
min_containers=0,
buffer_containers=0,
max_containers=1,
scaledown_window=600,
secrets=[custom_secret],
volumes={
"/models": vol
}
)
@modal.concurrent(
max_inputs=10
)
@modal.web_server(8000, startup_timeout=120)
def ui_4():
subprocess.Popen("comfy launch -- --cpu --listen 0.0.0.0 --port 8000", shell=True)

View File

@ -209,6 +209,7 @@ class ModalMidJourneyGenerateImage:
return {
"required": {
"prompt": ("STRING", {"default": "一幅宏大壮美的山川画卷", "multiline": True}),
"provider":(["ttapi","302ai"],),
"endpoint": ("STRING", {"default": "bowongai-test--text-video-agent-fastapi-app.modal.run"}),
"timeout": ("INT", {"default": 300, "min": 10, "max": 1200}),
},
@ -223,7 +224,7 @@ class ModalMidJourneyGenerateImage:
OUTPUT_NODE = False
CATEGORY = "不忘科技-自定义节点🚩/图片/Midjourney"
def process(self, prompt: str, endpoint: str, timeout: int, **kwargs):
def process(self, prompt: str, provider:str, endpoint: str, timeout: int, **kwargs):
try:
logger.info("请求同步接口")
format = "PNG"
@ -234,25 +235,57 @@ class ModalMidJourneyGenerateImage:
f'image/{format.lower()}')}
else:
files = None
job_resp = send_request("post", f"https://{endpoint}/api/union/img/sync/generate/image",
headers={'Authorization': 'Bearer bowong7777'},
data={"prompt": prompt},
files=files,
timeout=timeout)
job_resp.raise_for_status()
job_resp = job_resp.json()
if not job_resp["status"]:
raise Exception("生成失败, 可能因为风控")
result_url = job_resp["data"]
if isinstance(result_url, list):
result_list = []
for url in result_url:
logger.success("img_url: " + url)
result_list.append(url_to_tensor(url).squeeze(0))
result_list = torch.stack(result_list, dim=0)
return (result_list,)
logger.success("img_url: " + result_url)
return (url_to_tensor(result_url),)
if provider == "302ai":
job_resp = send_request("post", f"https://{endpoint}/api/union/img/sync/generate/image",
headers={'Authorization': 'Bearer bowong7777'},
data={"prompt": prompt},
files=files,
timeout=timeout)
job_resp.raise_for_status()
job_resp = job_resp.json()
if not job_resp["status"]:
raise Exception("生成失败, 可能因为风控")
result_url = job_resp["data"]
if isinstance(result_url, list):
result_list = []
for url in result_url:
logger.success("img_url: " + url)
result_list.append(url_to_tensor(url).squeeze(0))
result_list = torch.stack(result_list, dim=0)
return (result_list,)
logger.success("img_url: " + result_url)
return (url_to_tensor(result_url),)
elif provider == "ttapi":
interval = 3
job_resp = send_request("post", f"https://{endpoint}/api/mj/async/generate/image?prompt={prompt}",
headers={'Authorization': 'Bearer bowong7777'},
files=files,
timeout=150)
job_resp.raise_for_status()
job_resp = job_resp.json()
if not job_resp["status"]:
raise Exception("生成失败, 可能因为风控")
job_id = job_resp["data"]
for _ in range(0, timeout // interval, interval):
logger.info("查询结果")
resp = send_request("get", f"https://{endpoint}/api/mj/async/query/status?task_id={job_id}",
headers={'Authorization': 'Bearer bowong7777'}, timeout=30)
resp.raise_for_status()
if resp.json()["status"]:
if "fail" in resp.json()["msg"]:
raise Exception("生成失败,可能因为风控")
result_url = resp.json()["data"]
if isinstance(result_url, list):
result_list = []
for url in result_url:
logger.success("img_url: " + url)
result_list.append(url_to_tensor(url).squeeze(0))
result_list = torch.stack(result_list, dim=0)
return (result_list,)
logger.success("img_url: " + result_url)
return (url_to_tensor(result_url),)
sleep(interval)
raise Exception("等待超时")
except Exception as e:
raise e