Skip to content

Commit

Permalink
Only auto enable bf16 VAE on nvidia GPUs that actually support it.
Browse files Browse the repository at this point in the history
  • Loading branch information
comfyanonymous committed Jan 15, 2024
1 parent 2395ae7 commit f9e55d8
Showing 1 changed file with 1 addition and 1 deletion.
2 changes: 1 addition & 1 deletion comfy/model_management.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ def is_nvidia():
if int(torch_version[0]) >= 2:
if ENABLE_PYTORCH_ATTENTION == False and args.use_split_cross_attention == False and args.use_quad_cross_attention == False:
ENABLE_PYTORCH_ATTENTION = True
if torch.cuda.is_bf16_supported():
if torch.cuda.is_bf16_supported() and torch.cuda.get_device_properties(torch.cuda.current_device()).major >= 8:
VAE_DTYPE = torch.bfloat16
if is_intel_xpu():
if args.use_split_cross_attention == False and args.use_quad_cross_attention == False:
Expand Down

0 comments on commit f9e55d8

Please sign in to comment.