change GPT-J and GPT2 KVs to use fp16 instead

This commit is contained in:
Concedo 2023-04-05 15:53:07 +08:00
parent 57e9f929ee
commit 1490cdd71d
5 changed files with 18 additions and 12 deletions

View file

@ -103,6 +103,8 @@ ModelLoadResult legacy_gptj_model_load(const std::string & fname, gptj_model_v1
auto & ctx = model.ctx;
auto memory_type = GGML_V1_TYPE_F16;
size_t ctx_size = 0;
{
@ -136,8 +138,8 @@ ModelLoadResult legacy_gptj_model_load(const std::string & fname, gptj_model_v1
ctx_size += n_layer*(4*n_embd*n_embd*ggml_v1_type_sizef(wtype)); // c_mlp_proj_w_trans
ctx_size += n_layer*( n_embd*ggml_v1_type_sizef(GGML_V1_TYPE_F32)); // c_mlp_proj_b
ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_sizef(GGML_V1_TYPE_F32); // memory_k
ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_sizef(GGML_V1_TYPE_F32); // memory_v
ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_sizef(memory_type); // memory_k
ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_sizef(memory_type); // memory_v
ctx_size += (5 + 10*n_layer)*256; // object overhead
@ -240,8 +242,8 @@ ModelLoadResult legacy_gptj_model_load(const std::string & fname, gptj_model_v1
const int n_mem = n_layer*n_ctx;
const int n_elements = n_embd*n_mem;
model.memory_k = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, n_elements);
model.memory_v = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, n_elements);
model.memory_k = ggml_v1_new_tensor_1d(ctx, memory_type, n_elements);
model.memory_v = ggml_v1_new_tensor_1d(ctx, memory_type, n_elements);
const size_t memory_size = ggml_v1_nbytes(model.memory_k) + ggml_v1_nbytes(model.memory_v);