removed flash attention

This commit is contained in:
Roland Tannous 2026-04-16 01:21:18 +04:00
parent c3cec00ca8
commit 8a531e3290
2 changed files with 7 additions and 7 deletions

View file

@ -488,10 +488,10 @@ def run_training_process(
try:
_ensure_causal_conv1d_fast_path(event_queue, model_name)
_ensure_mamba_ssm(event_queue, model_name)
_ensure_flash_attn_for_long_context(
event_queue,
int(config.get("max_seq_length", 2048)),
)
#_ensure_flash_attn_for_long_context(
# event_queue,
# int(config.get("max_seq_length", 2048)),
#)
except Exception as exc:
event_queue.put(
{

View file

@ -989,9 +989,9 @@ def install_python_stack() -> int:
# constrain = False,
# )
if not IS_WINDOWS and not IS_MACOS and not NO_TORCH:
_progress("flash-attn")
_ensure_flash_attn()
#if not IS_WINDOWS and not IS_MACOS and not NO_TORCH:
# _progress("flash-attn")
# _ensure_flash_attn()
# # 6. Patch: override llama_cpp.py with fix from unsloth-zoo feature/llama-cpp-windows-support branch
# patch_package_file(