mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-15 03:19:41 +00:00
Merge branch 'upstream' into concedo_experimental
# Conflicts: # .github/workflows/build.yml # README.md # ci/run.sh # docs/build.md # examples/CMakeLists.txt # examples/parallel/parallel.cpp # ggml/CMakeLists.txt # ggml/src/CMakeLists.txt # scripts/server-bench.py # src/llama-kv-cache-unified.cpp # tests/test-backend-ops.cpp # tools/batched-bench/batched-bench.cpp # tools/server/README.md
This commit is contained in:
commit
bdff33e0de
47 changed files with 3128 additions and 509 deletions
|
@ -127,7 +127,6 @@ struct slot_params {
|
|||
std::vector<std::string> response_fields;
|
||||
bool timings_per_token = false;
|
||||
bool post_sampling_probs = false;
|
||||
bool ignore_eos = false;
|
||||
|
||||
struct common_params_sampling sampling;
|
||||
struct common_params_speculative speculative;
|
||||
|
@ -441,7 +440,6 @@ struct server_task {
|
|||
|
||||
{
|
||||
params.sampling.logit_bias.clear();
|
||||
params.ignore_eos = json_value(data, "ignore_eos", false);
|
||||
|
||||
const auto & logit_bias = data.find("logit_bias");
|
||||
if (logit_bias != data.end() && logit_bias->is_array()) {
|
||||
|
@ -472,6 +470,13 @@ struct server_task {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
params.sampling.ignore_eos = json_value(data, "ignore_eos", params_base.sampling.ignore_eos);
|
||||
if (params.sampling.ignore_eos) {
|
||||
params.sampling.logit_bias.insert(
|
||||
params.sampling.logit_bias.end(),
|
||||
defaults.sampling.logit_bias_eog.begin(), defaults.sampling.logit_bias_eog.end());
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -1898,7 +1903,6 @@ struct server_context {
|
|||
|
||||
bool clean_kv_cache = true;
|
||||
bool add_bos_token = true;
|
||||
bool has_eos_token = false;
|
||||
|
||||
int32_t n_ctx; // total context for all clients / slots
|
||||
|
||||
|
@ -1957,7 +1961,6 @@ struct server_context {
|
|||
n_ctx = llama_n_ctx(ctx);
|
||||
|
||||
add_bos_token = llama_vocab_get_add_bos(vocab);
|
||||
has_eos_token = llama_vocab_eos(vocab) != LLAMA_TOKEN_NULL;
|
||||
|
||||
if (!params_base.speculative.model.path.empty() || !params_base.speculative.model.hf_repo.empty()) {
|
||||
SRV_INF("loading draft model '%s'\n", params_base.speculative.model.path.c_str());
|
||||
|
@ -2217,10 +2220,6 @@ struct server_context {
|
|||
slot.params.n_predict = slot.n_predict;
|
||||
}
|
||||
|
||||
if (slot.params.ignore_eos && has_eos_token) {
|
||||
slot.params.sampling.logit_bias.push_back({llama_vocab_eos(vocab), -INFINITY});
|
||||
}
|
||||
|
||||
{
|
||||
if (slot.smpl != nullptr) {
|
||||
common_sampler_free(slot.smpl);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue