Merge branch 'upstream' into concedo_experimental

# Conflicts:
#	README.md
#	examples/llama-bench/llama-bench.cpp
#	examples/llama.android/llama/src/main/cpp/llama-android.cpp
#	examples/llama.android/llama/src/main/java/android/llama/cpp/LLamaAndroid.kt
#	src/llama-vocab.cpp
#	tests/test-backend-ops.cpp
This commit is contained in:
Concedo 2025-01-17 23:13:50 +08:00
commit 96407502cd
43 changed files with 15434 additions and 435 deletions

View file

@ -664,7 +664,7 @@ struct llm_tokenizer_bpe_session {
// "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
// "Are you sure this is what you want?\n", __FUNCTION__);
// }
// if (vocab.get_add_bos() && output.size() >= 2 && *(output.end()-2) == vocab.token_eos()) {
// if (vocab.get_add_eos() && output.size() >= 2 && *(output.end()-2) == vocab.token_eos()) {
// LLAMA_LOG_WARN(
// "%s: Added a EOS token to the prompt as specified by the model but the prompt "
// "also ends with a EOS token. So now the final prompt ends with 2 EOS tokens. "
@ -3537,4 +3537,4 @@ const std::unordered_map<std::string, llama_token> & llama_vocab::get_token_to_i
const std::vector<llama_vocab::token_data> & llama_vocab::get_id_to_token() const
{
return pimpl->id_to_token;
}
}