import os import cv2 import numpy as np import torch from comfy import model_management from ultralytics import YOLO class FaceExtract: """人脸提取 By YOLO""" @classmethod def INPUT_TYPES(s): return { "required": { "image": ("IMAGE",), } } RETURN_TYPES = ("IMAGE",) RETURN_NAMES = ("图片",) FUNCTION = "crop" CATEGORY = "不忘科技-自定义节点🚩" def crop(self, image): device = model_management.get_torch_device() image_np = 255.0 * image.cpu().numpy() model = YOLO( model=os.path.join( os.path.dirname(os.path.abspath(__file__)), "model", "yolov8n-face-lindevs.pt", ) ) total_images = image_np.shape[0] out_images = np.ndarray(shape=(total_images, 512, 512, 3)) print("shape", image_np.shape) print("aaaaa") idx = 0 for image_item in image_np: results = model.predict( image_item, imgsz=640, conf=0.75, iou=0.7, device=device, verbose=False ) n = 512 r = results[0] if len(r.boxes.data.cpu().numpy()) == 1: y1, x1, y2, x2, p, cls = r.boxes.data.cpu().numpy()[0] face_size = int(max(y2 - y1, x2 - x1)) center = (x1 + x2) // 2, (y1 + y2) // 2 x1, x2, y1, y2 = ( center[0] - face_size // 2, center[0] + face_size // 2, center[1] - face_size // 2, center[1] + face_size // 2, ) template = np.ndarray(shape=(face_size, face_size, 3)) template.fill(20) for a, a1 in zip(list(range(int(x1), int(x2))), list(range(face_size))): for b, b1 in zip( list(range(int(y1), int(y2))), list(range(face_size)) ): if (a >= 0 and a < r.orig_img.shape[0]) and ( b >= 0 and b < r.orig_img.shape[1] ): template[a1][b1] = r.orig_img[a][b] print(int(x1), int(x2), int(y1), int(y2)) img = cv2.resize(template, (n, n)) out_images[idx] = img idx += 1 else: idx += 1 cropped_face = np.array(out_images).astype(np.float32) / 255.0 cropped_face = torch.from_numpy(cropped_face) return (cropped_face,)