mirror of
https://github.com/kvcache-ai/ktransformers.git
synced 2025-09-06 12:40:02 +00:00
Merge pull request #51 from molamooo/fix-f16-dequantize-device
[fix] f16 dequantize device ignored
This commit is contained in:
commit
1f85db3d73
1 changed files with 1 additions and 1 deletions
|
@ -681,7 +681,7 @@ def dequantize_f16_gpu(data, device):
|
||||||
res = torch.from_numpy(data)
|
res = torch.from_numpy(data)
|
||||||
res_gpu = torch.empty_like(res, device=device)
|
res_gpu = torch.empty_like(res, device=device)
|
||||||
res_gpu.copy_(res)
|
res_gpu.copy_(res)
|
||||||
return res
|
return res_gpu
|
||||||
|
|
||||||
GGML_DEQUANTIZE = {
|
GGML_DEQUANTIZE = {
|
||||||
"F32": dequantize_f32,
|
"F32": dequantize_f32,
|
||||||
|
|
Loading…
Add table
Reference in a new issue