diff --git a/common/common.cpp b/common/common.cpp index 524c7f9b..16344f4d 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1539,7 +1539,7 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) { std::copy(std::begin(n_layer_window), std::end(n_layer_window), mparams.n_layer_window); std::copy(std::begin(n_layer_window), std::end(n_layer_window), llama_context_n_layer_window(lctx)); - if (params.n_gpu_layers > 0) { + if (params.n_gpu_layers == 0) { // if -ngl not set params.n_gpu_layers = n_gpu_layers[my_rank]; cparams.n_gpu_layers = n_gpu_layers[my_rank]; mparams.n_gpu_layers = n_gpu_layers[my_rank]; diff --git a/src/llama.cpp b/src/llama.cpp index 6d5eff70..c937edaf 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -3618,6 +3618,7 @@ void llama_profile_device( dev_info->cpu_props.name = cpu_props.name; dev_info->cpu_props.description = cpu_props.description; +#if defined(GGML_USE_METAL) || defined(GGML_USE_CUDA) dev_info->gpu_props.name = gpu_props.name; dev_info->gpu_props.description = gpu_props.description; @@ -3630,6 +3631,7 @@ void llama_profile_device( dev_info->gpu_props.cuda_read_vram_bw = device_cuda_read_vram_bw(); dev_info->gpu_props.metal_mem_cpy_delay = device_metal_mem_copy(model); dev_info->gpu_props.cuda_mem_cpy_delay = device_cuda_mem_copy(model); +#endif if (is_dtype_exist(n_params, GGML_TYPE_F32)) { dev_info->cpu_props.flops_f32_f32 = device_cpu_flops (model, GGML_TYPE_F32, GGML_TYPE_F32, n_threads);