From 38e4bcf311640aaf8cc81be93924805f7b8f7c9c Mon Sep 17 00:00:00 2001 From: AkashKarnatak Date: Tue, 27 Jan 2026 18:42:56 +0530 Subject: [PATCH] Perform inplace zeroing instead of creating a new tensor --- src/diffusers/pipelines/wan/pipeline_wan.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/diffusers/pipelines/wan/pipeline_wan.py b/src/diffusers/pipelines/wan/pipeline_wan.py index dc2bb471101d..6e6aea70ea43 100644 --- a/src/diffusers/pipelines/wan/pipeline_wan.py +++ b/src/diffusers/pipelines/wan/pipeline_wan.py @@ -184,10 +184,8 @@ def _get_t5_prompt_embeds( prompt_embeds = self.text_encoder(text_input_ids.to(device), mask.to(device)).last_hidden_state prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) - prompt_embeds = [u[:v] for u, v in zip(prompt_embeds, seq_lens)] - prompt_embeds = torch.stack( - [torch.cat([u, u.new_zeros(max_sequence_length - u.size(0), u.size(1))]) for u in prompt_embeds], dim=0 - ) + for i, seq_len in enumerate(seq_lens): + prompt_embeds[i, seq_len:] = 0 # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape