diff --git a/ltx_video/pipelines/pipeline_ltx_video.py b/ltx_video/pipelines/pipeline_ltx_video.py index b69c443..8bb6d27 100644 --- a/ltx_video/pipelines/pipeline_ltx_video.py +++ b/ltx_video/pipelines/pipeline_ltx_video.py @@ -1412,7 +1412,7 @@ class LTXVideoPipeline(DiffusionPipeline): sigmoid_term = torch.sigmoid(4.0 * scale_factor * (abs_latents - 1.0)) # DeepBeepMeep special touch to allow a smooth transition with tone mapping if start > 0: - gradient_tensor = torch.linspace(0, 1, latents.shape[2]) + gradient_tensor = torch.linspace(0, 1, latents.shape[2],dtype= sigmoid_term.dtype, device=sigmoid_term.device) gradient_tensor = gradient_tensor ** 0.5 gradient_tensor = gradient_tensor[ None, None, :, None, None ] sigmoid_term *= gradient_tensor diff --git a/wgp.py b/wgp.py index ccb1a0f..eef96a0 100644 --- a/wgp.py +++ b/wgp.py @@ -4468,7 +4468,7 @@ def generate_video( status_info = "Extracting " + processes_names[preprocess_type] send_cmd("progress", [0, get_latest_status(state, status_info)]) # start one frame ealier to faciliate latents merging later - src_video, _ = preprocess_video_with_mask(video_guide, video_mask, height=image_size[0], width = image_size[1], max_frames= len(keep_frames_parsed) + (0 if guide_start_frame == 0 else 1), start_frame = guide_start_frame - (0 if guide_start_frame == 0 else 1), fit_canvas = sample_fit_canvas, target_fps = fps, process_type = preprocess_type, inpaint_color = 0, proc_no =1, negate_mask = "N" in video_prompt_type, process_outside_mask = "inpaint" if "X" in video_prompt_type else "identity" ) + src_video, _ = preprocess_video_with_mask(video_guide, video_mask, height=image_size[0], width = image_size[1], max_frames= len(keep_frames_parsed) + (0 if guide_start_frame == 0 else 1), start_frame = guide_start_frame - (0 if guide_start_frame == 0 else 1), fit_canvas = sample_fit_canvas, target_fps = fps, process_type = preprocess_type, inpaint_color = 0, proc_no =1, negate_mask = "N" in video_prompt_type, process_outside_mask = "inpaint" if "X" in video_prompt_type else "identity", block_size =32 ) if src_video != None: src_video = src_video[ :(len(src_video)-1)// latent_size * latent_size +1 ] refresh_preview["video_guide"] = Image.fromarray(src_video[0].cpu().numpy())