mirror of
https://github.com/Lizonghang/prima.cpp.git
synced 2025-09-06 21:59:03 +00:00
add api llama_model_set_n_gpu_layers
This commit is contained in:
parent
9279a2e3ff
commit
1e1ba5bb91
2 changed files with 9 additions and 0 deletions
|
@ -1440,6 +1440,7 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
|
|||
params.n_gpu_layers = n_gpu_layers[my_rank];
|
||||
cparams.n_gpu_layers = n_gpu_layers[my_rank];
|
||||
mparams.n_gpu_layers = n_gpu_layers[my_rank];
|
||||
llama_model_set_n_gpu_layers(model, n_gpu_layers[my_rank]);
|
||||
|
||||
#ifdef LLAMA_DEBUG
|
||||
device_print_props(dev_info_set, n_world, model, cparams);
|
||||
|
|
|
@ -20705,6 +20705,14 @@ uint32_t llama_model_n_layers(const struct llama_model * model) {
|
|||
return model->hparams.n_layer;
|
||||
}
|
||||
|
||||
uint32_t llama_model_n_gpu_layers(const struct llama_model * model) {
|
||||
return model->n_gpu_layers;
|
||||
}
|
||||
|
||||
void llama_model_set_n_gpu_layers(struct llama_model * model, uint32_t value) {
|
||||
model->n_gpu_layers = value;
|
||||
}
|
||||
|
||||
uint64_t llama_model_n_params(const struct llama_model * model) {
|
||||
uint64_t nparams = 0;
|
||||
for (const auto & it : model->tensors_by_name) {
|
||||
|
|
Loading…
Add table
Reference in a new issue