Skip to content

Commit

Permalink
precommit
Browse files Browse the repository at this point in the history
  • Loading branch information
dakinggg committed Oct 9, 2023
1 parent bebd26d commit 969b3e7
Showing 1 changed file with 2 additions and 0 deletions.
2 changes: 2 additions & 0 deletions llmfoundry/models/layers/attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from llmfoundry.models.layers.fc import FC_CLASS_REGISTRY
from llmfoundry.models.layers.norm import NORM_CLASS_REGISTRY


def raise_if_flash_attn_v2():
flash_attn_version = None
# This only needs to be in a try except so that huggingface does not try to import it
Expand All @@ -28,6 +29,7 @@ def raise_if_flash_attn_v2():
except:
pass


def _reset_is_causal(num_query_tokens: int, num_key_tokens: int,
original_is_causal: bool) -> bool:
# disable causal when it is not needed
Expand Down

0 comments on commit 969b3e7

Please sign in to comment.