From 5b5deda42025d32aaede52f05f234ed7f0e3daf0 Mon Sep 17 00:00:00 2001 From: rnwang04 Date: Tue, 1 Jul 2025 14:24:27 +0800 Subject: [PATCH] revert using FP16 --- ktransformers/local_chat.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ktransformers/local_chat.py b/ktransformers/local_chat.py index 75e12fb..0a685d7 100644 --- a/ktransformers/local_chat.py +++ b/ktransformers/local_chat.py @@ -79,8 +79,9 @@ def local_chat( if mode == 'long_context': assert config.architectures[0] == "LlamaForCausalLM", "only LlamaForCausalLM support long_context mode" torch.set_default_dtype(torch.float16) - elif xpu_fp16_model(config): - torch.set_default_dtype(torch.float16) + # elif xpu_fp16_model(config): + # # using FP16 may cause accuracy issues, triggering core dumped during runtime + # torch.set_default_dtype(torch.float16) else: torch.set_default_dtype(config.torch_dtype)