Merge branch 'master' into concedo_experimental

# Conflicts:
#	.flake8
#	.github/workflows/python-lint.yml
#	flake.lock
#	ggml-cuda.cu
#	ggml-quants.c
#	llama.cpp
#	pocs/vdot/q8dot.cpp
#	pocs/vdot/vdot.cpp
#	tests/test-quantize-fns.cpp
#	tests/test-quantize-perf.cpp
This commit is contained in:
Concedo 2024-02-13 00:14:22 +08:00
commit 3cec37c2e0
35 changed files with 2814 additions and 1749 deletions

View file

@ -37,6 +37,7 @@ struct server_params
std::string hostname = "127.0.0.1";
std::vector<std::string> api_keys;
std::string public_path = "examples/server/public";
std::string chat_template = "chatml";
int32_t port = 8080;
int32_t read_timeout = 600;
int32_t write_timeout = 600;
@ -626,18 +627,36 @@ struct llama_server_context
const int n_vocab = llama_n_vocab(model);
for (const auto &el : *logit_bias)
{
if (el.is_array() && el.size() == 2 && el[0].is_number_integer())
if (el.is_array() && el.size() == 2)
{
llama_token tok = el[0].get<llama_token>();
if (tok >= 0 && tok < n_vocab)
float bias;
if (el[1].is_number())
{
if (el[1].is_number())
bias = el[1].get<float>();
}
else if (el[1].is_boolean() && !el[1].get<bool>())
{
bias = -INFINITY;
}
else
{
continue;
}
if (el[0].is_number_integer())
{
llama_token tok = el[0].get<llama_token>();
if (tok >= 0 && tok < n_vocab)
{
slot->sparams.logit_bias[tok] = el[1].get<float>();
slot->sparams.logit_bias[tok] = bias;
}
else if (el[1].is_boolean() && !el[1].get<bool>())
}
else if (el[0].is_string())
{
auto toks = llama_tokenize(model, el[0].get<std::string>(), false);
for (auto tok : toks)
{
slot->sparams.logit_bias[tok] = -INFINITY;
slot->sparams.logit_bias[tok] = bias;
}
}
}
@ -1860,6 +1879,8 @@ static void server_print_usage(const char *argv0, const gpt_params &params,
printf(" types: int, float, bool. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
printf(" -gan N, --grp-attn-n N set the group attention factor to extend context size through self-extend(default: 1=disabled), used together with group attention width `--grp-attn-w`");
printf(" -gaw N, --grp-attn-w N set the group attention width to extend context size through self-extend(default: 512), used together with group attention factor `--grp-attn-n`");
printf(" --chat-template FORMAT_NAME");
printf(" set chat template, possible valus is: llama2, chatml (default %s)", sparams.chat_template.c_str());
printf("\n");
}
@ -2291,6 +2312,21 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
log_set_target(stdout);
LOG_INFO("logging to file is disabled.", {});
}
else if (arg == "--chat-template")
{
if (++i >= argc)
{
invalid_param = true;
break;
}
std::string value(argv[i]);
if (value != "chatml" && value != "llama2") {
fprintf(stderr, "error: chat template can be \"llama2\" or \"chatml\", but got: %s\n", value.c_str());
invalid_param = true;
break;
}
sparams.chat_template = value;
}
else if (arg == "--override-kv")
{
if (++i >= argc) {
@ -2744,13 +2780,13 @@ int main(int argc, char **argv)
// TODO: add mount point without "/v1" prefix -- how?
svr.Post("/v1/chat/completions", [&llama, &validate_api_key](const httplib::Request &req, httplib::Response &res)
svr.Post("/v1/chat/completions", [&llama, &validate_api_key, &sparams](const httplib::Request &req, httplib::Response &res)
{
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
if (!validate_api_key(req, res)) {
return;
}
json data = oaicompat_completion_params_parse(json::parse(req.body));
json data = oaicompat_completion_params_parse(json::parse(req.body), sparams.chat_template);
const int task_id = llama.queue_tasks.get_new_id();
llama.queue_results.add_waiting_task_id(task_id);