option to not use scratch

This commit is contained in:
Concedo 2023-06-23 19:01:36 +08:00
parent d5e4cf7ffe
commit 43c2891afa
5 changed files with 102 additions and 32 deletions

View file

@ -389,7 +389,7 @@ bool gpt2_eval(
const std::vector<gpt_vocab::id> & embd_inp,
std::vector<float> & embd_w,
size_t & mem_per_token,
FileFormat file_format) {
bool use_scratch=true) {
const int N = embd_inp.size();
const auto & hparams = model.hparams;
@ -406,13 +406,21 @@ bool gpt2_eval(
// use 2 scratch buffers
// TODO: very hacky solution - reimplement in a more elegant way
static size_t scr0_size = (n_ctx>1024?512u:256u)*1024*1024;
static void * scr0 = malloc(scr0_size);
static void * scr0;
static size_t scr1_size = (n_ctx>1024?512u:256u)*1024*1024;
static void * scr1 = malloc(scr1_size);
static void * scr1;
if (mem_per_token > 0 && mem_per_token*N*1.05 > buf_size) {
const size_t buf_size_new = 64u*1024*1024 + 1.15*(mem_per_token*N); // add 10% to account for ggml object overhead
if(use_scratch)
{
scr0 = malloc(scr0_size);
scr1 = malloc(scr1_size);
}
size_t scratch_needed_mem = mem_per_token*N;
if (mem_per_token > 0 && scratch_needed_mem*1.1 > buf_size) {
const size_t buf_size_new = 64u*1024*1024 + 1.2*(scratch_needed_mem); // add 10% to account for ggml object overhead
//printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
// reallocate
@ -455,7 +463,9 @@ bool gpt2_eval(
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * cur;
if(use_scratch){
ggml_set_scratch(ctx0, { 0, scr0_size, scr0, });
}
// norm
{
@ -603,7 +613,9 @@ bool gpt2_eval(
struct ggml_tensor * inpFF = cur;
if(use_scratch){
ggml_set_scratch(ctx0, { 0, scr1_size, scr1, });
}
// feed-forward network
{
@ -661,7 +673,9 @@ bool gpt2_eval(
inpL = ggml_add(ctx0, cur, inpFF);
}
if(use_scratch){
ggml_set_scratch(ctx0, { 0, scr0_size, scr0, });
}
// norm
{
@ -677,7 +691,9 @@ bool gpt2_eval(
ggml_repeat(ctx0, model.ln_f_b, inpL));
}
if(use_scratch){
ggml_set_scratch(ctx0, { 0, 0, nullptr, });
}
// inpL = WTE * inpL
// [ 768, 50257] - model.lm_head