From 015f73dc4941ae6e01e01b934368f031c7fa8b8d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 21 Aug 2024 16:17:15 -0400 Subject: [PATCH] Try a different type of flux fp16 fix. --- comfy/ldm/flux/layers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/flux/layers.py b/comfy/ldm/flux/layers.py index da0cf61b1c3..9820832bae9 100644 --- a/comfy/ldm/flux/layers.py +++ b/comfy/ldm/flux/layers.py @@ -178,7 +178,7 @@ def forward(self, img: Tensor, txt: Tensor, vec: Tensor, pe: Tensor): txt += txt_mod2.gate * self.txt_mlp((1 + txt_mod2.scale) * self.txt_norm2(txt) + txt_mod2.shift) if txt.dtype == torch.float16: - txt = txt.clip(-65504, 65504) + txt = torch.nan_to_num(txt, nan=0.0, posinf=65504, neginf=-65504) return img, txt @@ -233,7 +233,7 @@ def forward(self, x: Tensor, vec: Tensor, pe: Tensor) -> Tensor: output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2)) x += mod.gate * output if x.dtype == torch.float16: - x = x.clip(-65504, 65504) + x = torch.nan_to_num(x, nan=0.0, posinf=65504, neginf=-65504) return x