diff --git a/studio/backend/core/training/worker.py b/studio/backend/core/training/worker.py index cca1c9b63..19b9990ce 100644 --- a/studio/backend/core/training/worker.py +++ b/studio/backend/core/training/worker.py @@ -488,10 +488,10 @@ def run_training_process( try: _ensure_causal_conv1d_fast_path(event_queue, model_name) _ensure_mamba_ssm(event_queue, model_name) - _ensure_flash_attn_for_long_context( - event_queue, - int(config.get("max_seq_length", 2048)), - ) + #_ensure_flash_attn_for_long_context( + # event_queue, + # int(config.get("max_seq_length", 2048)), + #) except Exception as exc: event_queue.put( { diff --git a/studio/install_python_stack.py b/studio/install_python_stack.py index 1ef626ce3..bacf8bc7e 100644 --- a/studio/install_python_stack.py +++ b/studio/install_python_stack.py @@ -989,9 +989,9 @@ def install_python_stack() -> int: # constrain = False, # ) - if not IS_WINDOWS and not IS_MACOS and not NO_TORCH: - _progress("flash-attn") - _ensure_flash_attn() + #if not IS_WINDOWS and not IS_MACOS and not NO_TORCH: + # _progress("flash-attn") + # _ensure_flash_attn() # # 6. Patch: override llama_cpp.py with fix from unsloth-zoo feature/llama-cpp-windows-support branch # patch_package_file(