diff --git a/ggml-cuda.cu b/ggml-cuda.cu index f855fdce1..1e835b1e7 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -100,16 +100,16 @@ static ggml_cuda_device_info ggml_cuda_init() { GGML_ASSERT(info.device_count <= GGML_CUDA_MAX_DEVICES); int64_t total_vram = 0; -#if defined(GGML_CUDA_FORCE_MMQ) - fprintf(stderr, "%s: GGML_CUDA_FORCE_MMQ: yes\n", __func__); -#else - fprintf(stderr, "%s: GGML_CUDA_FORCE_MMQ: no\n", __func__); -#endif -#if defined(CUDA_USE_TENSOR_CORES) - fprintf(stderr, "%s: CUDA_USE_TENSOR_CORES: yes\n", __func__); -#else - fprintf(stderr, "%s: CUDA_USE_TENSOR_CORES: no\n", __func__); -#endif +// #if defined(GGML_CUDA_FORCE_MMQ) +// fprintf(stderr, "%s: GGML_CUDA_FORCE_MMQ: yes\n", __func__); +// #else +// fprintf(stderr, "%s: GGML_CUDA_FORCE_MMQ: no\n", __func__); +// #endif +// #if defined(CUDA_USE_TENSOR_CORES) +// fprintf(stderr, "%s: CUDA_USE_TENSOR_CORES: yes\n", __func__); +// #else +// fprintf(stderr, "%s: CUDA_USE_TENSOR_CORES: no\n", __func__); +// #endif fprintf(stderr, "%s: found %d " GGML_CUDA_NAME " devices:\n", __func__, info.device_count); for (int id = 0; id < info.device_count; ++id) { int device_vmm = 0;