ADD 增加llm节点

This commit is contained in:
kyj@bowong.ai 2025-07-07 16:28:23 +08:00
parent c9b0b3db82
commit 3f1932ceab
3 changed files with 88 additions and 4 deletions

View File

@ -1,3 +1,4 @@
from .nodes.llm_api import LLMChat
from .nodes.compute_video_point import VideoStartPointDurationCompute from .nodes.compute_video_point import VideoStartPointDurationCompute
from .nodes.cos import COSUpload, COSDownload from .nodes.cos import COSUpload, COSDownload
from .nodes.face_detect import FaceDetect from .nodes.face_detect import FaceDetect
@ -63,7 +64,8 @@ NODE_CLASS_MAPPINGS = {
"TaskIdGenerate": TaskIdGenerate, "TaskIdGenerate": TaskIdGenerate,
"RandomLineSelector": RandomLineSelector, "RandomLineSelector": RandomLineSelector,
"PlugAndPlayWebhook": PlugAndPlayWebhook, "PlugAndPlayWebhook": PlugAndPlayWebhook,
"SaveImageWithOutput": SaveImageWithOutput "SaveImageWithOutput": SaveImageWithOutput,
"LLMChat": LLMChat
} }
# A dictionary that contains the friendly/humanly readable titles for the nodes # A dictionary that contains the friendly/humanly readable titles for the nodes
@ -101,6 +103,6 @@ NODE_DISPLAY_NAME_MAPPINGS = {
"TaskIdGenerate": "TaskID生成器", "TaskIdGenerate": "TaskID生成器",
"RandomLineSelector": "随机选择一行内容", "RandomLineSelector": "随机选择一行内容",
"PlugAndPlayWebhook": "Webhook转发器", "PlugAndPlayWebhook": "Webhook转发器",
"SaveImageWithOutput": "保存图片(带输出)" "SaveImageWithOutput": "保存图片(带输出)",
"LLMChat": "LLM调用"
} }

81
nodes/llm_api.py Normal file
View File

@ -0,0 +1,81 @@
# LLM API 通过cloudflare gateway调用llm
import re
from typing import Any, Union
import httpx
from retry import retry
def find_value_recursive(key:str, data:Union[dict, list]) -> str | None | Any:
if isinstance(data, dict):
if key in data:
return data[key]
# 递归检查所有其他键的值
for value in data.values():
result = find_value_recursive(key, value)
if result is not None:
return result
elif isinstance(data, list):
for item in data:
result = find_value_recursive(key, item)
if result is not None:
return result
class LLMChat:
"""AWS S3下载"""
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"llm_provider": (["claude-3-5-sonnet-20241022-v2",
"claude-3-5-sonnet-20241022-v3",
"claude-3-7-sonnet-20250219-v1",
"claude-4-sonnet-20250514-v1",
"gpt-4o-1120",
"gpt-4.1",
"deepseek-v3",
"deepseek-r1"],),
"prompt": ("STRING", {"multiline": True}),
"temperature": ("FLOAT",{"default": 0.7, "min": 0.0, "max": 1.0}),
"max_tokens": ("INT",{"default": 4096, "min":1, "max":65535}),
"timeout": ("INT", {"default": 120, "min": 30, "max": 900}),
}
}
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("llm输出",)
FUNCTION = "chat"
CATEGORY = "不忘科技-自定义节点🚩/llm"
def chat(self, llm_provider:str, prompt:str, temperature:float, max_tokens:int, timeout:int):
@retry(Exception, tries=3, delay=1)
def _chat():
try:
with httpx.Client(timeout=httpx.Timeout(timeout, connect=15)) as session:
resp = session.post("https://gateway.bowong.cc/chat/completions",
headers={
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": "Bearer auth-bowong7777"
},
json={
"model": llm_provider,
"messages": [
{
"role": "user",
"content": prompt
}
],
"temperature": temperature,
"max_tokens": max_tokens
})
resp.raise_for_status()
resp = resp.json()
content = find_value_recursive("content", resp)
content = re.sub(r'\n{2,}', '\n', content)
except Exception as e:
# logger.exception("llm调用失败 {}".format(e))
raise Exception("llm调用失败 {}".format(e))
return (content,)
return _chat()

View File

@ -10,3 +10,4 @@ tencentcloud-sdk-python
boto3 boto3
loguru loguru
pyYAML pyYAML
retry