From 3f1932ceab0aca43c2d114bb34e9e03396878f7a Mon Sep 17 00:00:00 2001 From: "kyj@bowong.ai" Date: Mon, 7 Jul 2025 16:28:23 +0800 Subject: [PATCH] =?UTF-8?q?ADD=20=E5=A2=9E=E5=8A=A0llm=E8=8A=82=E7=82=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- __init__.py | 8 +++-- nodes/llm_api.py | 81 ++++++++++++++++++++++++++++++++++++++++++++++++ requirements.txt | 3 +- 3 files changed, 88 insertions(+), 4 deletions(-) create mode 100644 nodes/llm_api.py diff --git a/__init__.py b/__init__.py index cb0c6b6..08526d4 100644 --- a/__init__.py +++ b/__init__.py @@ -1,3 +1,4 @@ +from .nodes.llm_api import LLMChat from .nodes.compute_video_point import VideoStartPointDurationCompute from .nodes.cos import COSUpload, COSDownload from .nodes.face_detect import FaceDetect @@ -63,7 +64,8 @@ NODE_CLASS_MAPPINGS = { "TaskIdGenerate": TaskIdGenerate, "RandomLineSelector": RandomLineSelector, "PlugAndPlayWebhook": PlugAndPlayWebhook, - "SaveImageWithOutput": SaveImageWithOutput + "SaveImageWithOutput": SaveImageWithOutput, + "LLMChat": LLMChat } # A dictionary that contains the friendly/humanly readable titles for the nodes @@ -101,6 +103,6 @@ NODE_DISPLAY_NAME_MAPPINGS = { "TaskIdGenerate": "TaskID生成器", "RandomLineSelector": "随机选择一行内容", "PlugAndPlayWebhook": "Webhook转发器", - "SaveImageWithOutput": "保存图片(带输出)" - + "SaveImageWithOutput": "保存图片(带输出)", + "LLMChat": "LLM调用" } diff --git a/nodes/llm_api.py b/nodes/llm_api.py new file mode 100644 index 0000000..38fd9d3 --- /dev/null +++ b/nodes/llm_api.py @@ -0,0 +1,81 @@ +# LLM API 通过cloudflare gateway调用llm +import re +from typing import Any, Union + +import httpx +from retry import retry + + +def find_value_recursive(key:str, data:Union[dict, list]) -> str | None | Any: + if isinstance(data, dict): + if key in data: + return data[key] + # 递归检查所有其他键的值 + for value in data.values(): + result = find_value_recursive(key, value) + if result is not None: + return result + elif isinstance(data, list): + for item in data: + result = find_value_recursive(key, item) + if result is not None: + return result + +class LLMChat: + """AWS S3下载""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "llm_provider": (["claude-3-5-sonnet-20241022-v2", + "claude-3-5-sonnet-20241022-v3", + "claude-3-7-sonnet-20250219-v1", + "claude-4-sonnet-20250514-v1", + "gpt-4o-1120", + "gpt-4.1", + "deepseek-v3", + "deepseek-r1"],), + "prompt": ("STRING", {"multiline": True}), + "temperature": ("FLOAT",{"default": 0.7, "min": 0.0, "max": 1.0}), + "max_tokens": ("INT",{"default": 4096, "min":1, "max":65535}), + "timeout": ("INT", {"default": 120, "min": 30, "max": 900}), + } + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("llm输出",) + FUNCTION = "chat" + CATEGORY = "不忘科技-自定义节点🚩/llm" + + def chat(self, llm_provider:str, prompt:str, temperature:float, max_tokens:int, timeout:int): + @retry(Exception, tries=3, delay=1) + def _chat(): + try: + with httpx.Client(timeout=httpx.Timeout(timeout, connect=15)) as session: + resp = session.post("https://gateway.bowong.cc/chat/completions", + headers={ + "Content-Type": "application/json", + "Accept": "application/json", + "Authorization": "Bearer auth-bowong7777" + }, + json={ + "model": llm_provider, + "messages": [ + { + "role": "user", + "content": prompt + } + ], + "temperature": temperature, + "max_tokens": max_tokens + }) + resp.raise_for_status() + resp = resp.json() + content = find_value_recursive("content", resp) + content = re.sub(r'\n{2,}', '\n', content) + except Exception as e: + # logger.exception("llm调用失败 {}".format(e)) + raise Exception("llm调用失败 {}".format(e)) + return (content,) + return _chat() \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 7fa82d7..06c2e11 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,4 +9,5 @@ sqlalchemy tencentcloud-sdk-python boto3 loguru -pyYAML \ No newline at end of file +pyYAML +retry \ No newline at end of file