mirror of
https://github.com/unslothai/unsloth.git
synced 2026-04-28 03:19:57 +00:00
removed flash attention
This commit is contained in:
parent
c3cec00ca8
commit
8a531e3290
2 changed files with 7 additions and 7 deletions
|
|
@ -488,10 +488,10 @@ def run_training_process(
|
|||
try:
|
||||
_ensure_causal_conv1d_fast_path(event_queue, model_name)
|
||||
_ensure_mamba_ssm(event_queue, model_name)
|
||||
_ensure_flash_attn_for_long_context(
|
||||
event_queue,
|
||||
int(config.get("max_seq_length", 2048)),
|
||||
)
|
||||
#_ensure_flash_attn_for_long_context(
|
||||
# event_queue,
|
||||
# int(config.get("max_seq_length", 2048)),
|
||||
#)
|
||||
except Exception as exc:
|
||||
event_queue.put(
|
||||
{
|
||||
|
|
|
|||
|
|
@ -989,9 +989,9 @@ def install_python_stack() -> int:
|
|||
# constrain = False,
|
||||
# )
|
||||
|
||||
if not IS_WINDOWS and not IS_MACOS and not NO_TORCH:
|
||||
_progress("flash-attn")
|
||||
_ensure_flash_attn()
|
||||
#if not IS_WINDOWS and not IS_MACOS and not NO_TORCH:
|
||||
# _progress("flash-attn")
|
||||
# _ensure_flash_attn()
|
||||
|
||||
# # 6. Patch: override llama_cpp.py with fix from unsloth-zoo feature/llama-cpp-windows-support branch
|
||||
# patch_package_file(
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue