do not cast to size_t, instead just use doubles

This commit is contained in:
Concedo 2023-12-14 16:43:34 +08:00
parent 1ad8f0d80e
commit 05f7db4b29
3 changed files with 4 additions and 4 deletions

View file

@ -1555,7 +1555,7 @@ static bool llama_kv_cache_init(
cache.cells.clear();
cache.cells.resize(n_ctx);
cache.buf.resize(n_elements*((size_t)(ggml_type_sizef(ktype) + ggml_type_sizef(vtype))) + 2u*n_layer*ggml_tensor_overhead());
cache.buf.resize(n_elements*(ggml_type_sizef(ktype) + ggml_type_sizef(vtype)) + 2u*n_layer*ggml_tensor_overhead());
memset(cache.buf.data, 0, cache.buf.size);
struct ggml_init_params params;