fix-singleton

This commit is contained in:
Atream 2025-03-14 04:16:53 +00:00
parent 7f57769c23
commit 6f43bbe55f
4 changed files with 11 additions and 4 deletions

View file

@ -56,7 +56,7 @@ def local_chat(
model_path: str | None = None,
optimize_config_path: str = None,
gguf_path: str | None = None,
max_new_tokens: int = 300,
max_new_tokens: int = 1000,
cpu_infer: int = Config().cpu_infer,
use_cuda_graph: bool = True,
prompt_file : str | None = None,

View file

@ -26,6 +26,7 @@ import json
class DynamicScaledDotProductAttention:
remaining_length: int
cpu_infer = None
def __init__(
self,
@ -180,7 +181,9 @@ class DynamicScaledDotProductAttention:
self.preselect_block_num = 0 # block_num before preselect
self.evict_tokens = 0
self.cpu_infer = CPUInfer(threads_num)
if DynamicScaledDotProductAttention.cpu_infer is None:
DynamicScaledDotProductAttention.cpu_infer = CPUInfer(threads_num)
self.cpu_infer = DynamicScaledDotProductAttention.cpu_infer
self.local_thread = CPUInferKVCache(
self.layer_num,
self.kv_head_num,

View file

@ -120,7 +120,7 @@ class KExpertsCPU(KExpertsBase):
output_gpu_map:dict = {} # Manage output tensor buffer on different gpu
#stream_map:dict = {} # Manage cuda stream on different gpu
#gguf_loader:GGUFLoader = None
CPU_INFER = CPUInfer(Config().cpu_infer)
CPU_INFER = None
def __init__(
self,
key: str,
@ -133,6 +133,8 @@ class KExpertsCPU(KExpertsBase):
**kwargs
):
super().__init__(key, gguf_loader, config, orig_module, device, **kwargs)
if KExpertsCPU.CPU_INFER is None:
KExpertsCPU.CPU_INFER = CPUInfer(Config().cpu_infer)
#if KExpertsCPU.gguf_loader is None:
# KExpertsCPU.gguf_loader = GGUFLoader("/mnt/data/model/DeepseekV3-q4km-gguf")
self.gguf_loader = gguf_loader

View file

@ -360,7 +360,7 @@ class KLinearMarlin(KLinearBase):
self.workspace = None
class KLinearCPUInfer(KLinearBase):
CPU_INFER = CPUInfer(Config().cpu_infer)
CPU_INFER = None
def __init__(
self,
key: str,
@ -374,6 +374,8 @@ class KLinearCPUInfer(KLinearBase):
**kwargs,
):
super().__init__(key, gguf_loader, config, orig_module, device, **kwargs)
if KLinearCPUInfer.CPU_INFER is None:
KLinearCPUInfer.CPU_INFER = CPUInfer(Config().cpu_infer)
self.has_bias = False
self.dtype = torch.get_default_dtype()
self.w = None