mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-11 09:34:37 +00:00
fixed compile error: GGML_BACKEND_TYPE_GPU (+1 squashed commits)
Squashed commits: [00ca282a] fixed compile error: LLAMA_SPLIT_MODE_ROW
This commit is contained in:
parent
3ccaf8e09a
commit
d47e13c892
2 changed files with 3 additions and 3 deletions
|
@ -970,9 +970,9 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
|
|||
model_params.main_gpu = cu_parseinfo_maindevice;
|
||||
|
||||
#if defined(GGML_USE_CUBLAS)
|
||||
model_params.split_mode = (inputs.use_rowsplit?llama_split_mode::LLAMA_SPLIT_ROW:llama_split_mode::LLAMA_SPLIT_LAYER);
|
||||
model_params.split_mode = (inputs.use_rowsplit?llama_split_mode::LLAMA_SPLIT_MODE_ROW:llama_split_mode::LLAMA_SPLIT_MODE_LAYER);
|
||||
#else
|
||||
model_params.split_mode = llama_split_mode::LLAMA_SPLIT_LAYER;
|
||||
model_params.split_mode = llama_split_mode::LLAMA_SPLIT_MODE_LAYER;
|
||||
#endif
|
||||
|
||||
llama_ctx_params.n_batch = kcpp_params->n_batch;
|
||||
|
|
|
@ -2863,7 +2863,7 @@ struct llama_model_loader {
|
|||
bool shouldoffload = (layernum>=0 && clblast_offload_fallback_layers>layernum);
|
||||
if(shouldoffload)
|
||||
{
|
||||
cur->backend = GGML_BACKEND_GPU;
|
||||
cur->backend = GGML_BACKEND_TYPE_GPU;
|
||||
ggml_cl_transform_tensor(cur->data, cur);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue