mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-10 09:04:36 +00:00
integrated gpt2 support
This commit is contained in:
parent
52de932842
commit
14273fea7a
9 changed files with 926 additions and 30 deletions
327
gpttype_adapter.cpp
Normal file
327
gpttype_adapter.cpp
Normal file
|
@ -0,0 +1,327 @@
|
|||
//This is Concedo's shitty adapter for adding python bindings for llama
|
||||
|
||||
//Considerations:
|
||||
//Don't want to use pybind11 due to dependencies on MSVCC
|
||||
//ZERO or MINIMAL changes as possible to main.cpp - do not move their function declarations here!
|
||||
//Leave main.cpp UNTOUCHED, We want to be able to update the repo and pull any changes automatically.
|
||||
//No dynamic memory allocation! Setup structs with FIXED (known) shapes and sizes for ALL output fields
|
||||
//Python will ALWAYS provide the memory, we just write to it.
|
||||
|
||||
#include <time.h>
|
||||
#include "model_adapter.h"
|
||||
#include "otherarch/otherarch.h"
|
||||
|
||||
//concat source files into one file for compilation purposes
|
||||
#include "otherarch/utils.cpp"
|
||||
#include "otherarch/gptj_v1.cpp"
|
||||
#include "otherarch/gptj_v2.cpp"
|
||||
#include "otherarch/gpt2_v1.cpp"
|
||||
|
||||
//return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt)
|
||||
static FileFormat file_format = FileFormat::BADFORMAT;
|
||||
static gpt_vocab vocab;
|
||||
static gptj_model_v1 model_v1;
|
||||
static gptj_model model_v2;
|
||||
static gpt2_model model_gpt2;
|
||||
static gpt_params params;
|
||||
static int n_past = 0;
|
||||
static int n_threads = 4;
|
||||
static int n_batch = 8;
|
||||
static std::string modelname;
|
||||
static std::vector<gpt_vocab::id> last_n_tokens;
|
||||
static std::vector<gpt_vocab::id> current_context_tokens;
|
||||
static size_t mem_per_token = 0;
|
||||
static std::vector<float> logits;
|
||||
|
||||
ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in_file_format)
|
||||
{
|
||||
ggml_time_init();
|
||||
|
||||
file_format = in_file_format;
|
||||
n_threads = params.n_threads = inputs.threads;
|
||||
n_batch = params.n_batch = inputs.batch_size;
|
||||
modelname = params.model = inputs.model_filename;
|
||||
|
||||
if (file_format == FileFormat::GPT2)
|
||||
{
|
||||
ModelLoadResult res = gpt2_model_load(params.model, model_gpt2, vocab, file_format);
|
||||
if(res==ModelLoadResult::FAIL)
|
||||
{
|
||||
fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str());
|
||||
return res;
|
||||
}
|
||||
// determine the required inference memory per token:
|
||||
gpt2_eval(model_gpt2, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, file_format);
|
||||
return ModelLoadResult::SUCCESS;
|
||||
}
|
||||
else if (file_format == FileFormat::GPTJ1 || file_format == FileFormat::GPTJ2)
|
||||
{
|
||||
ModelLoadResult res = legacy_gptj_model_load(params.model, model_v1, vocab, file_format);
|
||||
if(res==ModelLoadResult::FAIL)
|
||||
{
|
||||
fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str());
|
||||
return res;
|
||||
}
|
||||
else if(res==ModelLoadResult::RETRY_LOAD)
|
||||
{
|
||||
printf("\nTensor Transposition Detected! Retrying GPT-J model loading...");
|
||||
return res;
|
||||
}
|
||||
// determine the required inference memory per token:
|
||||
legacy_gptj_eval(model_v1, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, file_format);
|
||||
return ModelLoadResult::SUCCESS;
|
||||
}
|
||||
else
|
||||
{
|
||||
ModelLoadResult loadresult = gptj_model_load(params.model, model_v2, vocab);
|
||||
if (loadresult == ModelLoadResult::FAIL)
|
||||
{
|
||||
fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str());
|
||||
return loadresult;
|
||||
}
|
||||
else if (loadresult == ModelLoadResult::RETRY_LOAD)
|
||||
{
|
||||
printf("\nTensor Transposition Detected! Retrying GPT-J model loading...");
|
||||
return loadresult;
|
||||
}
|
||||
|
||||
// determine the required inference memory per token:
|
||||
gptj_eval(model_v2, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token);
|
||||
return ModelLoadResult::SUCCESS;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
generation_outputs gpttype_generate(const generation_inputs inputs, generation_outputs &output)
|
||||
{
|
||||
params.prompt = inputs.prompt;
|
||||
params.seed = inputs.seed;
|
||||
params.n_predict = inputs.max_length;
|
||||
params.top_k = inputs.top_k;
|
||||
params.top_p = inputs.top_p;
|
||||
params.temp = inputs.temperature;
|
||||
params.repeat_last_n = inputs.rep_pen_range;
|
||||
params.repeat_penalty = inputs.rep_pen;
|
||||
params.n_batch = n_batch;
|
||||
params.n_threads = n_threads;
|
||||
|
||||
if (params.repeat_last_n < 1)
|
||||
{
|
||||
params.repeat_last_n = 1;
|
||||
}
|
||||
if (params.top_k < 1)
|
||||
{
|
||||
params.top_k = 300; //to disable top_k we actually need to increase this value to a very high number
|
||||
}
|
||||
if (params.seed <= 0)
|
||||
{
|
||||
params.seed = time(NULL);
|
||||
}
|
||||
|
||||
// tokenize the prompt
|
||||
std::vector<gpt_vocab::id> embd_inp = ::gpt_tokenize(vocab, params.prompt);
|
||||
|
||||
//truncate to front of the prompt if its too long
|
||||
int32_t nctx = 512;
|
||||
if(file_format == FileFormat::GPTJ1||file_format == FileFormat::GPTJ2)
|
||||
{
|
||||
nctx = model_v1.hparams.n_ctx;
|
||||
}
|
||||
else if(file_format==FileFormat::GPTJ3)
|
||||
{
|
||||
nctx = model_v2.hparams.n_ctx;
|
||||
}
|
||||
else if(file_format==FileFormat::GPT2)
|
||||
{
|
||||
nctx = model_gpt2.hparams.n_ctx;
|
||||
}
|
||||
|
||||
if (embd_inp.size() + params.n_predict > nctx)
|
||||
{
|
||||
int offset = embd_inp.size() - nctx + params.n_predict;
|
||||
embd_inp = std::vector<llama_token>(embd_inp.begin() + offset, embd_inp.end());
|
||||
}
|
||||
|
||||
//determine how much npast we have to rewind from the current state
|
||||
std::vector<gpt_vocab::id> embd;
|
||||
|
||||
int last_n_size = params.repeat_last_n;
|
||||
last_n_tokens.resize(last_n_size);
|
||||
|
||||
std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
|
||||
n_past = 0;
|
||||
|
||||
//fast forward the past based on identical tokens, stop once a divergence is noted
|
||||
int embd_inp_len = embd_inp.size();
|
||||
for (int i = 0; i < current_context_tokens.size(); ++i)
|
||||
{
|
||||
if (current_context_tokens[i] == embd_inp[i])
|
||||
{
|
||||
n_past += 1;
|
||||
last_n_tokens.push_back(current_context_tokens[i]);
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
if ((i + 2) >= embd_inp_len)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
last_n_tokens.erase(last_n_tokens.begin(), last_n_tokens.begin() + n_past);
|
||||
embd_inp.erase(embd_inp.begin(), embd_inp.begin() + n_past);
|
||||
|
||||
//if using BLAS and prompt is big enough, switch to single thread and use a huge batch
|
||||
bool blasmode = false; //(embd_inp.size() >= 32 && ggml_cpu_has_blas());
|
||||
int original_batch = params.n_batch;
|
||||
int original_threads = params.n_threads;
|
||||
if (blasmode)
|
||||
{
|
||||
params.n_batch = 512;
|
||||
params.n_threads = 1;
|
||||
}
|
||||
|
||||
current_context_tokens.resize(n_past);
|
||||
|
||||
int remaining_tokens = params.n_predict;
|
||||
int input_consumed = 0;
|
||||
std::mt19937 rng(params.seed);
|
||||
std::string concat_output = "";
|
||||
|
||||
bool startedsampling = false;
|
||||
|
||||
timer_start();
|
||||
double time1 = 0, time2 = 0;
|
||||
unsigned int embd_inp_size = embd_inp.size();
|
||||
int32_t n_vocab = 0;
|
||||
if(file_format == FileFormat::GPTJ1||file_format == FileFormat::GPTJ2)
|
||||
{
|
||||
n_vocab = model_v1.hparams.n_vocab;
|
||||
}
|
||||
else if(file_format == FileFormat::GPTJ3)
|
||||
{
|
||||
n_vocab = model_v2.hparams.n_vocab;
|
||||
}
|
||||
else if(file_format == FileFormat::GPT2)
|
||||
{
|
||||
n_vocab = model_gpt2.hparams.n_vocab;
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("Bad format!");
|
||||
}
|
||||
|
||||
printf("\n");
|
||||
while (remaining_tokens > 0)
|
||||
{
|
||||
gpt_vocab::id id = 0;
|
||||
// predict
|
||||
unsigned int embdsize = embd.size();
|
||||
if (embdsize > 0)
|
||||
{
|
||||
//print progress
|
||||
if (!startedsampling)
|
||||
{
|
||||
printf("\rProcessing Prompt%s (%d / %d tokens)", (blasmode ? " [BLAS]" : ""), input_consumed, embd_inp_size);
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("\rGenerating (%d / %d tokens)", (1 + params.n_predict - remaining_tokens), params.n_predict);
|
||||
}
|
||||
|
||||
bool evalres = false;
|
||||
|
||||
//print_tok_vec(logits);
|
||||
if(file_format==FileFormat::GPT2)
|
||||
{
|
||||
evalres = gpt2_eval(model_gpt2, params.n_threads, n_past, embd, logits, mem_per_token, file_format);
|
||||
}
|
||||
else if(file_format==FileFormat::GPTJ1 || file_format==FileFormat::GPTJ2)
|
||||
{
|
||||
evalres = legacy_gptj_eval(model_v1, params.n_threads, n_past, embd, logits, mem_per_token, file_format);
|
||||
}
|
||||
else
|
||||
{
|
||||
evalres = gptj_eval(model_v2, params.n_threads, n_past, embd, logits, mem_per_token);
|
||||
}
|
||||
if (!evalres)
|
||||
{
|
||||
fprintf(stderr, "Failed to predict\n");
|
||||
snprintf(output.text, sizeof(output.text), "%s", "");
|
||||
output.status = 0;
|
||||
return output;
|
||||
}
|
||||
}
|
||||
|
||||
n_past += embd.size();
|
||||
embd.clear();
|
||||
if ((int)embd_inp.size() <= input_consumed)
|
||||
{
|
||||
// out of user input, sample next token
|
||||
const float top_k = params.top_k;
|
||||
const float top_p = params.top_p;
|
||||
const float temp = params.temp;
|
||||
const float repeat_penalty = params.repeat_penalty;
|
||||
|
||||
if (!startedsampling)
|
||||
{
|
||||
startedsampling = true;
|
||||
params.n_batch = original_batch;
|
||||
params.n_threads = original_threads;
|
||||
time1 = timer_check();
|
||||
timer_start();
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
{
|
||||
// set the logit of the eos token (2) to zero to avoid sampling it
|
||||
logits[50256] = (logits[50256]<0?logits[50256]:0);
|
||||
|
||||
//gpt2 uses negative logits, so we cant zero it
|
||||
|
||||
id = gptj_sample_top_p_top_k(vocab, logits.data() + (logits.size() - n_vocab), last_n_tokens, repeat_penalty, top_k, top_p, temp, rng);
|
||||
|
||||
|
||||
last_n_tokens.erase(last_n_tokens.begin());
|
||||
last_n_tokens.push_back(id);
|
||||
current_context_tokens.push_back(id);
|
||||
}
|
||||
|
||||
// add it to the context
|
||||
embd.push_back(id);
|
||||
|
||||
// decrement remaining sampling budget
|
||||
--remaining_tokens;
|
||||
|
||||
for (auto id : embd) {
|
||||
concat_output += vocab.id_to_token[id].c_str();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// some user input remains from prompt or interaction, forward it to processing
|
||||
while ((int)embd_inp.size() > input_consumed)
|
||||
{
|
||||
embd.push_back(embd_inp[input_consumed]);
|
||||
last_n_tokens.erase(last_n_tokens.begin());
|
||||
last_n_tokens.push_back(embd_inp[input_consumed]);
|
||||
current_context_tokens.push_back(embd_inp[input_consumed]);
|
||||
++input_consumed;
|
||||
if ((int)embd.size() >= params.n_batch)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
time2 = timer_check();
|
||||
printf("\nTime Taken - Processing:%.1fs, Generation:%.1fs, Total:%.1fs", time1, time2, (time1 + time2));
|
||||
fflush(stdout);
|
||||
output.status = 1;
|
||||
snprintf(output.text, sizeof(output.text), "%s", concat_output.c_str());
|
||||
return output;
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue