mirror of
https://github.com/illian64/llm-translate.git
synced 2026-04-28 03:39:51 +00:00
* book translate * files processing * files processing * files processing * files processing --------- Co-authored-by: APodoinikov <APodoynikov@detmir.ru>
33 lines
848 B
Python
33 lines
848 B
Python
import logging
|
|
|
|
import torch
|
|
|
|
logger = logging.getLogger('uvicorn')
|
|
|
|
|
|
def cuda_info() -> None:
|
|
cuda_is_available = torch.cuda.is_available()
|
|
device_count = torch.cuda.device_count()
|
|
current_device = torch.cuda.current_device()
|
|
|
|
logger.info("CUDA info: is available={0}, device count={1}, current_device={2}. Devices:"
|
|
.format(cuda_is_available, device_count, current_device))
|
|
|
|
for i in range(device_count):
|
|
logger.info("GPU #%d: %s", i, torch.cuda.get_device_name(i))
|
|
|
|
|
|
def get_device(options: dict) -> str:
|
|
cuda_opt = options["cuda"]
|
|
if cuda_opt:
|
|
return "cuda"
|
|
else:
|
|
return "cpu"
|
|
|
|
|
|
def get_device_with_gpu_num(options: dict) -> str:
|
|
cuda_opt = options["cuda"]
|
|
if cuda_opt:
|
|
return "cuda:{0}".format(options["cuda_device_index"])
|
|
else:
|
|
return "cpu"
|