mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-10 17:14:36 +00:00
replace deprecated
This commit is contained in:
parent
4bf675a83d
commit
6a709be50a
4 changed files with 6 additions and 6 deletions
|
@ -188,7 +188,7 @@ extern "C"
|
||||||
}
|
}
|
||||||
else if(file_format==FileFormat::GGUF_GENERIC)
|
else if(file_format==FileFormat::GGUF_GENERIC)
|
||||||
{
|
{
|
||||||
printf("\n---\nIdentified as GGUF model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
printf("\n---\nIdentified as GGUF model.\nAttempting to Load...\n---\n", file_format);
|
||||||
}
|
}
|
||||||
else if(file_format==FileFormat::GGML || file_format==FileFormat::GGHF || file_format==FileFormat::GGJT || file_format==FileFormat::GGJT_2 || file_format==FileFormat::GGJT_3)
|
else if(file_format==FileFormat::GGML || file_format==FileFormat::GGHF || file_format==FileFormat::GGJT || file_format==FileFormat::GGJT_2 || file_format==FileFormat::GGJT_3)
|
||||||
{
|
{
|
||||||
|
|
|
@ -583,7 +583,7 @@ static void speculative_decoding_setup(std::string spec_model_filename, const ll
|
||||||
draft_ctx_params.type_v = base_ctx_params.type_v;
|
draft_ctx_params.type_v = base_ctx_params.type_v;
|
||||||
|
|
||||||
llama_model * draftmodel = llama_model_load_from_file(spec_model_filename.c_str(), draft_model_params);
|
llama_model * draftmodel = llama_model_load_from_file(spec_model_filename.c_str(), draft_model_params);
|
||||||
draft_ctx = llama_new_context_with_model(draftmodel, draft_ctx_params);
|
draft_ctx = llama_init_from_model(draftmodel, draft_ctx_params);
|
||||||
if(draft_ctx == NULL)
|
if(draft_ctx == NULL)
|
||||||
{
|
{
|
||||||
printf("Error: failed to load speculative decoding draft model '%s'\n", spec_model_filename.c_str());
|
printf("Error: failed to load speculative decoding draft model '%s'\n", spec_model_filename.c_str());
|
||||||
|
@ -2227,7 +2227,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
|
||||||
llama_ctx_params.flash_attn = kcpp_data->flash_attn;
|
llama_ctx_params.flash_attn = kcpp_data->flash_attn;
|
||||||
llama_ctx_params.type_k = (inputs.quant_k>1?GGML_TYPE_Q4_0:(inputs.quant_k==1?GGML_TYPE_Q8_0:GGML_TYPE_F16));
|
llama_ctx_params.type_k = (inputs.quant_k>1?GGML_TYPE_Q4_0:(inputs.quant_k==1?GGML_TYPE_Q8_0:GGML_TYPE_F16));
|
||||||
llama_ctx_params.type_v = (inputs.quant_v>1?GGML_TYPE_Q4_0:(inputs.quant_v==1?GGML_TYPE_Q8_0:GGML_TYPE_F16));
|
llama_ctx_params.type_v = (inputs.quant_v>1?GGML_TYPE_Q4_0:(inputs.quant_v==1?GGML_TYPE_Q8_0:GGML_TYPE_F16));
|
||||||
llama_ctx_v4 = llama_new_context_with_model(llamamodel, llama_ctx_params);
|
llama_ctx_v4 = llama_init_from_model(llamamodel, llama_ctx_params);
|
||||||
|
|
||||||
if (llama_ctx_v4 == NULL)
|
if (llama_ctx_v4 == NULL)
|
||||||
{
|
{
|
||||||
|
|
|
@ -142,7 +142,7 @@ bool embeddingstype_load_model(const embeddings_load_model_inputs inputs)
|
||||||
ctx_params.n_threads_batch = nthreads;
|
ctx_params.n_threads_batch = nthreads;
|
||||||
ctx_params.flash_attn = inputs.flash_attention;
|
ctx_params.flash_attn = inputs.flash_attention;
|
||||||
|
|
||||||
embeddings_ctx = llama_new_context_with_model(embeddingsmodel, ctx_params);
|
embeddings_ctx = llama_init_from_model(embeddingsmodel, ctx_params);
|
||||||
|
|
||||||
if (embeddings_ctx == nullptr) {
|
if (embeddings_ctx == nullptr) {
|
||||||
printf("\nEmbeddings Model Load Error: Failed to initialize context!\n");
|
printf("\nEmbeddings Model Load Error: Failed to initialize context!\n");
|
||||||
|
|
|
@ -542,7 +542,7 @@ bool ttstype_load_model(const tts_load_model_inputs inputs)
|
||||||
tts_ctx_params.flash_attn = inputs.flash_attention;
|
tts_ctx_params.flash_attn = inputs.flash_attention;
|
||||||
|
|
||||||
llama_model * ttcmodel = llama_model_load_from_file(modelfile_ttc.c_str(), tts_model_params);
|
llama_model * ttcmodel = llama_model_load_from_file(modelfile_ttc.c_str(), tts_model_params);
|
||||||
ttc_ctx = llama_new_context_with_model(ttcmodel, tts_ctx_params);
|
ttc_ctx = llama_init_from_model(ttcmodel, tts_ctx_params);
|
||||||
|
|
||||||
if (ttc_ctx == nullptr) {
|
if (ttc_ctx == nullptr) {
|
||||||
printf("\nTTS Load Error: Failed to initialize ttc context!\n");
|
printf("\nTTS Load Error: Failed to initialize ttc context!\n");
|
||||||
|
@ -552,7 +552,7 @@ bool ttstype_load_model(const tts_load_model_inputs inputs)
|
||||||
llama_model * ctsmodel = llama_model_load_from_file(modelfile_cts.c_str(), tts_model_params);
|
llama_model * ctsmodel = llama_model_load_from_file(modelfile_cts.c_str(), tts_model_params);
|
||||||
|
|
||||||
tts_ctx_params.embeddings = true; //this requires embeddings instead
|
tts_ctx_params.embeddings = true; //this requires embeddings instead
|
||||||
cts_ctx = llama_new_context_with_model(ctsmodel, tts_ctx_params);
|
cts_ctx = llama_init_from_model(ctsmodel, tts_ctx_params);
|
||||||
|
|
||||||
if (cts_ctx == nullptr) {
|
if (cts_ctx == nullptr) {
|
||||||
printf("\nTTS Load Error: Failed to initialize cts context!\n");
|
printf("\nTTS Load Error: Failed to initialize cts context!\n");
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue