fix auto schedule logic

This commit is contained in:
Lizonghang 2025-01-29 13:13:37 +04:00
parent e7c6b830e6
commit 849b47ccd0
2 changed files with 3 additions and 1 deletions

View file

@ -1539,7 +1539,7 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
std::copy(std::begin(n_layer_window), std::end(n_layer_window), mparams.n_layer_window);
std::copy(std::begin(n_layer_window), std::end(n_layer_window), llama_context_n_layer_window(lctx));
if (params.n_gpu_layers > 0) {
if (params.n_gpu_layers == 0) { // if -ngl not set
params.n_gpu_layers = n_gpu_layers[my_rank];
cparams.n_gpu_layers = n_gpu_layers[my_rank];
mparams.n_gpu_layers = n_gpu_layers[my_rank];

View file

@ -3618,6 +3618,7 @@ void llama_profile_device(
dev_info->cpu_props.name = cpu_props.name;
dev_info->cpu_props.description = cpu_props.description;
#if defined(GGML_USE_METAL) || defined(GGML_USE_CUDA)
dev_info->gpu_props.name = gpu_props.name;
dev_info->gpu_props.description = gpu_props.description;
@ -3630,6 +3631,7 @@ void llama_profile_device(
dev_info->gpu_props.cuda_read_vram_bw = device_cuda_read_vram_bw();
dev_info->gpu_props.metal_mem_cpy_delay = device_metal_mem_copy(model);
dev_info->gpu_props.cuda_mem_cpy_delay = device_cuda_mem_copy(model);
#endif
if (is_dtype_exist(n_params, GGML_TYPE_F32)) {
dev_info->cpu_props.flops_f32_f32 = device_cpu_flops (model, GGML_TYPE_F32, GGML_TYPE_F32, n_threads);