mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-11 09:34:37 +00:00
Merge branch 'upstream' into concedo_experimental
# Conflicts: # .github/workflows/build.yml # CMakeLists.txt # README.md # ci/run.sh # llama.cpp # models/ggml-vocab-llama-bpe.gguf.inp # models/ggml-vocab-llama-bpe.gguf.out # requirements.txt # scripts/compare-llama-bench.py # scripts/sync-ggml.last # tests/CMakeLists.txt # tests/test-backend-ops.cpp # tests/test-grammar-integration.cpp # tests/test-tokenizer-1-bpe.cpp
This commit is contained in:
commit
2ee808a747
66 changed files with 3034 additions and 1821 deletions
|
@ -48,7 +48,7 @@ page cache before using this. See https://github.com/ggerganov/llama.cpp/issues/
|
|||
- `--path`: Path from which to serve static files. Default: disabled
|
||||
- `--api-key`: Set an api key for request authorization. By default, the server responds to every request. With an api key set, the requests must have the Authorization header set with the api key as Bearer token. May be used multiple times to enable multiple valid keys.
|
||||
- `--api-key-file`: Path to file containing api keys delimited by new lines. If set, requests must include one of the keys for access. May be used in conjunction with `--api-key`s.
|
||||
- `--embedding`: Enable embedding extraction. Default: disabled
|
||||
- `--embeddings`: Enable embedding vector output and the OAI compatible endpoint /v1/embeddings. Physical batch size (`--ubatch-size`) must be carefully defined. Default: disabled
|
||||
- `-np N`, `--parallel N`: Set the number of slots for process requests. Default: `1`
|
||||
- `-cb`, `--cont-batching`: Enable continuous batching (a.k.a dynamic batching). Default: disabled
|
||||
- `-spf FNAME`, `--system-prompt-file FNAME` Set a file to load a system prompt (initial prompt of all slots). This is useful for chat applications. [See more](#change-system-prompt-on-runtime)
|
||||
|
|
|
@ -652,9 +652,6 @@ struct server_context {
|
|||
std::string system_prompt;
|
||||
std::vector<llama_token> system_tokens;
|
||||
|
||||
std::string name_user; // this should be the antiprompt
|
||||
std::string name_assistant;
|
||||
|
||||
// slots / clients
|
||||
std::vector<server_slot> slots;
|
||||
json default_generation_settings_for_props;
|
||||
|
@ -674,6 +671,8 @@ struct server_context {
|
|||
llama_free_model(model);
|
||||
model = nullptr;
|
||||
}
|
||||
|
||||
llama_batch_free(batch);
|
||||
}
|
||||
|
||||
bool load_model(const gpt_params & params_) {
|
||||
|
@ -1099,15 +1098,11 @@ struct server_context {
|
|||
system_need_update = false;
|
||||
}
|
||||
|
||||
void system_prompt_set(const json & sys_props) {
|
||||
system_prompt = sys_props.value("prompt", "");
|
||||
name_user = sys_props.value("anti_prompt", "");
|
||||
name_assistant = sys_props.value("assistant_name", "");
|
||||
bool system_prompt_set(const std::string & sys_prompt) {
|
||||
system_prompt = sys_prompt;
|
||||
|
||||
LOG_VERBOSE("system prompt process", {
|
||||
{"system_prompt", system_prompt},
|
||||
{"name_user", name_user},
|
||||
{"name_assistant", name_assistant},
|
||||
});
|
||||
|
||||
// release all slots
|
||||
|
@ -1116,6 +1111,7 @@ struct server_context {
|
|||
}
|
||||
|
||||
system_need_update = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool process_token(completion_token_output & result, server_slot & slot) {
|
||||
|
@ -1535,7 +1531,8 @@ struct server_context {
|
|||
}
|
||||
|
||||
if (task.data.contains("system_prompt")) {
|
||||
system_prompt_set(task.data.at("system_prompt"));
|
||||
std::string sys_prompt = json_value(task.data, "system_prompt", std::string());
|
||||
system_prompt_set(sys_prompt);
|
||||
|
||||
for (server_slot & slot : slots) {
|
||||
slot.n_past = 0;
|
||||
|
@ -2271,10 +2268,10 @@ struct server_context {
|
|||
|
||||
const size_t n_probs = std::min(cur_p.size, (size_t) slot.sparams.n_probs);
|
||||
if (n_probs > 0) {
|
||||
const size_t n_considered = slot.ctx_sampling->n_considered;
|
||||
const size_t n_valid = slot.ctx_sampling->n_valid;
|
||||
|
||||
// Make sure at least n_probs top tokens are at the front of the vector:
|
||||
if (slot.sparams.temp == 0.0f && n_probs > n_considered) {
|
||||
if (slot.sparams.temp == 0.0f && n_probs > n_valid) {
|
||||
llama_sample_top_k(ctx, &cur_p, n_probs, 0);
|
||||
}
|
||||
|
||||
|
@ -2290,7 +2287,7 @@ struct server_context {
|
|||
for (size_t i = 0; i < n_probs; ++i) {
|
||||
result.probs.push_back({
|
||||
cur_p.data[i].id,
|
||||
i >= n_considered ? 0.0f : cur_p.data[i].p // Tokens filtered out due to e.g. top_k have 0 probability.
|
||||
i >= n_valid ? 0.0f : cur_p.data[i].p // Tokens filtered out due to e.g. top_k have 0 probability.
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -2919,7 +2916,7 @@ int main(int argc, char ** argv) {
|
|||
server_params_parse(argc, argv, sparams, params);
|
||||
|
||||
if (!sparams.system_prompt.empty()) {
|
||||
ctx_server.system_prompt_set(json::parse(sparams.system_prompt));
|
||||
ctx_server.system_prompt_set(sparams.system_prompt);
|
||||
}
|
||||
|
||||
if (params.model_alias == "unknown") {
|
||||
|
@ -3408,8 +3405,7 @@ int main(int argc, char ** argv) {
|
|||
const auto handle_props = [&ctx_server](const httplib::Request & req, httplib::Response & res) {
|
||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||
json data = {
|
||||
{ "user_name", ctx_server.name_user.c_str() },
|
||||
{ "assistant_name", ctx_server.name_assistant.c_str() },
|
||||
{ "system_prompt", ctx_server.system_prompt.c_str() },
|
||||
{ "default_generation_settings", ctx_server.default_generation_settings_for_props },
|
||||
{ "total_slots", ctx_server.params.n_parallel }
|
||||
};
|
||||
|
|
|
@ -887,6 +887,7 @@ async def oai_chat_completions(user_prompt,
|
|||
base_path,
|
||||
async_client,
|
||||
debug=False,
|
||||
temperature=None,
|
||||
model=None,
|
||||
n_predict=None,
|
||||
enable_streaming=None,
|
||||
|
@ -913,7 +914,8 @@ async def oai_chat_completions(user_prompt,
|
|||
"model": model,
|
||||
"max_tokens": n_predict,
|
||||
"stream": enable_streaming,
|
||||
"seed": seed
|
||||
"temperature": temperature if temperature is not None else 0.0,
|
||||
"seed": seed,
|
||||
}
|
||||
if response_format is not None:
|
||||
payload['response_format'] = response_format
|
||||
|
@ -978,7 +980,8 @@ async def oai_chat_completions(user_prompt,
|
|||
max_tokens=n_predict,
|
||||
stream=enable_streaming,
|
||||
response_format=payload.get('response_format'),
|
||||
seed=seed
|
||||
seed=seed,
|
||||
temperature=payload['temperature']
|
||||
)
|
||||
except openai.error.AuthenticationError as e:
|
||||
if expect_api_error is not None and expect_api_error:
|
||||
|
|
|
@ -371,7 +371,7 @@ static json oaicompat_completion_params_parse(
|
|||
llama_params["presence_penalty"] = json_value(body, "presence_penalty", 0.0);
|
||||
llama_params["seed"] = json_value(body, "seed", LLAMA_DEFAULT_SEED);
|
||||
llama_params["stream"] = json_value(body, "stream", false);
|
||||
llama_params["temperature"] = json_value(body, "temperature", 0.0);
|
||||
llama_params["temperature"] = json_value(body, "temperature", 1.0);
|
||||
llama_params["top_p"] = json_value(body, "top_p", 1.0);
|
||||
|
||||
// Apply chat template to the list of messages
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue