mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-10 17:14:36 +00:00
Merge branch 'upstream' into concedo_experimental
# Conflicts: # .github/workflows/build.yml # README.md # examples/CMakeLists.txt # examples/batched/batched.cpp # examples/gritlm/gritlm.cpp # examples/llama.android/llama/build.gradle.kts # examples/main/README.md # examples/retrieval/retrieval.cpp # examples/server/CMakeLists.txt # examples/server/README.md # ggml/CMakeLists.txt # ggml/src/ggml-cpu/CMakeLists.txt # ggml/src/ggml.c # scripts/compare-commits.sh # scripts/sync-ggml.last # tests/CMakeLists.txt # tests/test-backend-ops.cpp # tests/test-chat-template.cpp # tests/test-sampling.cpp
This commit is contained in:
commit
ee486bad3e
59 changed files with 20531 additions and 13185 deletions
|
@ -120,29 +120,33 @@ std::string common_arg::to_string() {
|
|||
// utils
|
||||
//
|
||||
|
||||
static void common_params_handle_model_default(common_params & params) {
|
||||
if (!params.hf_repo.empty()) {
|
||||
static void common_params_handle_model_default(
|
||||
std::string & model,
|
||||
std::string & model_url,
|
||||
std::string & hf_repo,
|
||||
std::string & hf_file) {
|
||||
if (!hf_repo.empty()) {
|
||||
// short-hand to avoid specifying --hf-file -> default it to --model
|
||||
if (params.hf_file.empty()) {
|
||||
if (params.model.empty()) {
|
||||
if (hf_file.empty()) {
|
||||
if (model.empty()) {
|
||||
throw std::invalid_argument("error: --hf-repo requires either --hf-file or --model\n");
|
||||
}
|
||||
params.hf_file = params.model;
|
||||
} else if (params.model.empty()) {
|
||||
hf_file = model;
|
||||
} else if (model.empty()) {
|
||||
// this is to avoid different repo having same file name, or same file name in different subdirs
|
||||
std::string filename = params.hf_repo + "_" + params.hf_file;
|
||||
std::string filename = hf_repo + "_" + hf_file;
|
||||
// to make sure we don't have any slashes in the filename
|
||||
string_replace_all(filename, "/", "_");
|
||||
params.model = fs_get_cache_file(filename);
|
||||
model = fs_get_cache_file(filename);
|
||||
}
|
||||
} else if (!params.model_url.empty()) {
|
||||
if (params.model.empty()) {
|
||||
auto f = string_split<std::string>(params.model_url, '#').front();
|
||||
} else if (!model_url.empty()) {
|
||||
if (model.empty()) {
|
||||
auto f = string_split<std::string>(model_url, '#').front();
|
||||
f = string_split<std::string>(f, '?').front();
|
||||
params.model = fs_get_cache_file(string_split<std::string>(f, '/').back());
|
||||
model = fs_get_cache_file(string_split<std::string>(f, '/').back());
|
||||
}
|
||||
} else if (params.model.empty()) {
|
||||
params.model = DEFAULT_MODEL_PATH;
|
||||
} else if (model.empty()) {
|
||||
model = DEFAULT_MODEL_PATH;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -277,7 +281,9 @@ static bool common_params_parse_ex(int argc, char ** argv, common_params_context
|
|||
throw std::invalid_argument("error: --prompt-cache-all not supported in interactive mode yet\n");
|
||||
}
|
||||
|
||||
common_params_handle_model_default(params);
|
||||
// TODO: refactor model params in a common struct
|
||||
common_params_handle_model_default(params.model, params.model_url, params.hf_repo, params.hf_file);
|
||||
common_params_handle_model_default(params.vocoder.model, params.vocoder.model_url, params.vocoder.hf_repo, params.vocoder.hf_file);
|
||||
|
||||
if (params.escape) {
|
||||
string_process_escapes(params.prompt);
|
||||
|
@ -843,7 +849,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
|
|||
}
|
||||
).set_sparam());
|
||||
add_opt(common_arg(
|
||||
{"--sampling-seq"}, "SEQUENCE",
|
||||
{"--sampling-seq", "--sampler-seq"}, "SEQUENCE",
|
||||
string_format("simplified sequence for samplers that will be used (default: %s)", sampler_type_chars.c_str()),
|
||||
[](common_params & params, const std::string & value) {
|
||||
params.sampling.samplers = common_sampler_types_from_chars(value);
|
||||
|
@ -856,13 +862,6 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
|
|||
params.sampling.ignore_eos = true;
|
||||
}
|
||||
).set_sparam());
|
||||
add_opt(common_arg(
|
||||
{"--penalize-nl"},
|
||||
string_format("penalize newline tokens (default: %s)", params.sampling.penalize_nl ? "true" : "false"),
|
||||
[](common_params & params) {
|
||||
params.sampling.penalize_nl = true;
|
||||
}
|
||||
).set_sparam());
|
||||
add_opt(common_arg(
|
||||
{"--temp"}, "N",
|
||||
string_format("temperature (default: %.1f)", (double)params.sampling.temp),
|
||||
|
@ -917,6 +916,9 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
|
|||
{"--repeat-last-n"}, "N",
|
||||
string_format("last n tokens to consider for penalize (default: %d, 0 = disabled, -1 = ctx_size)", params.sampling.penalty_last_n),
|
||||
[](common_params & params, int value) {
|
||||
if (value < -1) {
|
||||
throw std::runtime_error(string_format("error: invalid repeat-last-n = %d\n", value));
|
||||
}
|
||||
params.sampling.penalty_last_n = value;
|
||||
params.sampling.n_prev = std::max(params.sampling.n_prev, params.sampling.penalty_last_n);
|
||||
}
|
||||
|
@ -971,6 +973,9 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
|
|||
{"--dry-penalty-last-n"}, "N",
|
||||
string_format("set DRY penalty for the last n tokens (default: %d, 0 = disable, -1 = context size)", params.sampling.dry_penalty_last_n),
|
||||
[](common_params & params, int value) {
|
||||
if (value < -1) {
|
||||
throw std::runtime_error(string_format("error: invalid dry-penalty-last-n = %d\n", value));
|
||||
}
|
||||
params.sampling.dry_penalty_last_n = value;
|
||||
}
|
||||
).set_sparam());
|
||||
|
@ -1583,6 +1588,20 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
|
|||
params.hf_file = value;
|
||||
}
|
||||
).set_env("LLAMA_ARG_HF_FILE"));
|
||||
add_opt(common_arg(
|
||||
{"-hfrv", "--hf-repo-v"}, "REPO",
|
||||
"Hugging Face model repository for the vocoder model (default: unused)",
|
||||
[](common_params & params, const std::string & value) {
|
||||
params.vocoder.hf_repo = value;
|
||||
}
|
||||
).set_env("LLAMA_ARG_HF_REPO_V"));
|
||||
add_opt(common_arg(
|
||||
{"-hffv", "--hf-file-v"}, "FILE",
|
||||
"Hugging Face model file for the vocoder model (default: unused)",
|
||||
[](common_params & params, const std::string & value) {
|
||||
params.vocoder.hf_file = value;
|
||||
}
|
||||
).set_env("LLAMA_ARG_HF_FILE_V"));
|
||||
add_opt(common_arg(
|
||||
{"-hft", "--hf-token"}, "TOKEN",
|
||||
"Hugging Face access token (default: value from HF_TOKEN environment variable)",
|
||||
|
@ -2180,5 +2199,13 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
|
|||
}
|
||||
).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_MODEL_DRAFT"));
|
||||
|
||||
add_opt(common_arg(
|
||||
{"-mv", "--model-vocoder"}, "FNAME",
|
||||
"vocoder model for audio generation (default: unused)",
|
||||
[](common_params & params, const std::string & value) {
|
||||
params.vocoder.model = value;
|
||||
}
|
||||
).set_examples({LLAMA_EXAMPLE_TTS, LLAMA_EXAMPLE_SERVER}));
|
||||
|
||||
return ctx_arg;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue