bug fixes for openblas

This commit is contained in:
Concedo 2023-06-23 22:45:22 +08:00
parent 43c2891afa
commit f39a746089
5 changed files with 40 additions and 70 deletions

View file

@ -383,7 +383,7 @@ bool gptj_eval(
const std::vector<gpt_vocab::id> & embd_inp,
std::vector<float> & embd_w,
size_t & mem_per_token,
bool use_scratch=true) {
bool use_scratch) {
const int N = embd_inp.size();
const auto & hparams = model.hparams;
@ -400,19 +400,14 @@ bool gptj_eval(
// use 2 scratch buffers
// TODO: very hacky solution - reimplement in a more elegant way
static size_t scr0_size = (n_ctx>1024?512u:256u)*1024*1024;
static void * scr0;
static size_t scr0_size = 512u*1024*1024;
static size_t scr1_size = 512u*1024*1024;
static size_t scr1_size = (n_ctx>1024?512u:256u)*1024*1024;
static void * scr1;
if(use_scratch)
{
scr0 = malloc(scr0_size);
scr1 = malloc(scr1_size);
}
static void * scr0 = malloc(scr0_size);
static void * scr1 = malloc(scr1_size);
if (mem_per_token > 0 && 32u*1024*1024 + mem_per_token*N*1.2 > buf_size) {
const size_t buf_size_new = 64u*1024*1024 + 1.2*(mem_per_token*N); // add 10% to account for ggml object overhead
if (mem_per_token > 0 && (mem_per_token*N*2 + 64u*1024*1024) > buf_size) {
const size_t buf_size_new = 320u*1024*1024 + 1.2*(mem_per_token*N); // add 10% to account for ggml object overhead
//printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
// reallocate