mirror of
https://github.com/facebookresearch/blt.git
synced 2025-09-10 14:27:49 +00:00
allow flex-attention to be disabled (#19)
* allow flex-attention to silently fail * allow flex-attn to be disabled via an env var
This commit is contained in:
parent
1da3dd9315
commit
caec8d2621
2 changed files with 10 additions and 4 deletions
|
@ -1,5 +1,5 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
|
||||
import os
|
||||
from enum import Enum
|
||||
from typing import Optional, Tuple, Union
|
||||
|
||||
|
@ -16,7 +16,10 @@ from xformers.ops import AttentionBias, fmha
|
|||
|
||||
from bytelatent import probe
|
||||
|
||||
flex_attention_comp = torch.compile(flex_attention)
|
||||
if int(os.environ.get("BLT_ALLOW_MISSING_FLEX_ATTENTION", False)) == 0:
|
||||
flex_attention_comp = torch.compile(flex_attention)
|
||||
else:
|
||||
flex_attention_comp = None
|
||||
|
||||
|
||||
class InitStdFactor(Enum):
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue