cu11 just always stick to wmma

This commit is contained in:
Concedo 2025-06-06 13:57:00 +08:00
parent eec5a8ad16
commit ca99f79ea9

View file

@ -342,7 +342,7 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst
}
// The MMA implementation needs Turing or newer, use the old WMMA code for Volta:
if (cc == GGML_CUDA_CC_TURING || (fp16_mma_available(cc) && !new_mma_available(cc))) { //kcpp: turing use wmma to fix cu11 incoherence
if (ggml_cuda_highest_compiled_arch(cc) <= GGML_CUDA_CC_TURING || cc == GGML_CUDA_CC_TURING || (fp16_mma_available(cc) && !new_mma_available(cc))) { //kcpp: use wmma to fix cu11 incoherence
ggml_cuda_flash_attn_ext_wmma_f16(ctx, dst);
return;
}