mirror of
https://github.com/kvcache-ai/ktransformers.git
synced 2026-05-05 15:40:13 +00:00
* refactor: move legacy code to archive/ directory - Moved ktransformers, csrc, third_party, merge_tensors to archive/ - Moved build scripts and configurations to archive/ - Kept kt-kernel, KT-SFT, doc, and README files in root - Preserved complete git history for all moved files * refactor: restructure repository to focus on kt-kernel and KT-SFT modules * fix README * fix README * fix README * fix README * docs: add performance benchmarks to kt-kernel section Add comprehensive performance data for kt-kernel to match KT-SFT's presentation: - AMX kernel optimization: 21.3 TFLOPS (3.9× faster than PyTorch) - Prefill phase: up to 20× speedup vs baseline - Decode phase: up to 4× speedup - NUMA optimization: up to 63% throughput improvement - Multi-GPU (8×L20): 227.85 tokens/s total throughput with DeepSeek-R1 FP8 Source: https://lmsys.org/blog/2025-10-22-KTransformers/ This provides users with concrete performance metrics for both core modules, making it easier to understand the capabilities of each component. * refactor: improve kt-kernel performance data with specific hardware and models Replace generic performance descriptions with concrete benchmarks: - Specify exact hardware: 8×L20 GPU + Xeon Gold 6454S, Single/Dual-socket Xeon + AMX - Include specific models: DeepSeek-R1-0528 (FP8), DeepSeek-V3 (671B) - Show detailed metrics: total throughput, output throughput, concurrency details - Match KT-SFT presentation style for consistency This provides users with actionable performance data they can use to evaluate hardware requirements and expected performance for their use cases. * fix README * docs: clean up performance table and improve formatting * add pic for README * refactor: simplify .gitmodules and backup legacy submodules - Remove 7 legacy submodules from root .gitmodules (archive/third_party/*) - Keep only 2 active submodules for kt-kernel (llama.cpp, pybind11) - Backup complete .gitmodules to archive/.gitmodules - Add documentation in archive/README.md for researchers who need legacy submodules This reduces initial clone size by ~500MB and avoids downloading unused dependencies. * refactor: move doc/ back to root directory Keep documentation in root for easier access and maintenance. * refactor: consolidate all images to doc/assets/ - Move kt-kernel/assets/heterogeneous_computing.png to doc/assets/ - Remove KT-SFT/assets/ (images already in doc/assets/) - Update KT-SFT/README.md image references to ../doc/assets/ - Eliminates ~7.9MB image duplication - Centralizes all documentation assets in one location * fix pic path for README
63 lines
2.9 KiB
Python
63 lines
2.9 KiB
Python
'''
|
|
Description :
|
|
Author : Boxin Zhang
|
|
Version : 0.1.0
|
|
Copyright (c) 2024 by KVCache.AI, All Rights Reserved.
|
|
'''
|
|
from typing import Any
|
|
from torch import nn, Tensor
|
|
from ktransformers.util.custom_loader import GGUFLoader
|
|
from transformers.configuration_utils import PretrainedConfig
|
|
import ktransformers.util.utils as utils
|
|
class BaseInjectedModule(nn.Module):
|
|
|
|
def __init__(self,
|
|
key: str,
|
|
gguf_loader : GGUFLoader,
|
|
config: PretrainedConfig,
|
|
orig_module: nn.Module,
|
|
prefill_device: str = "cuda",
|
|
generate_device: str = "cuda",
|
|
**kwargs):
|
|
nn.Module.__init__(self)
|
|
nn.Module.__setattr__(self, "orig_module", orig_module)
|
|
object.__setattr__(self, "key", key)
|
|
object.__setattr__(self, "gguf_loader", gguf_loader)
|
|
object.__setattr__(self, "config", config)
|
|
object.__setattr__(self, "prefill_device", prefill_device)
|
|
object.__setattr__(self, "generate_device", generate_device)
|
|
object.__setattr__(self, "device", generate_device)
|
|
|
|
def __getattr__(self, name: str) -> Any:
|
|
# __getattr__ in nn.Module doesn't call super().__getattribute__ when name is not in nn.Module.__dict__,
|
|
# but __setattr__ in nn.Module call super().__setattr__ in that case, there may be some attribute set
|
|
# but can't get using __getattr__, typically these attr is build in attr of the class, so class.attr does not
|
|
# call __getattr__.
|
|
# Example:
|
|
# ...import torch
|
|
# ...l=torch.nn.Linear(100,200)
|
|
# ...l.out_features # 200
|
|
# ...l.__getattr__("out_features") # AttributeError: 'Linear' object has no attribute 'out_features'
|
|
try:
|
|
return object.__getattribute__(self, name) # if this attr belongs to BaseInjectedModule
|
|
except:
|
|
if name == "orig_module":
|
|
return nn.Module.__getattr__(self, "orig_module")
|
|
try:
|
|
return nn.Module.__getattr__(self, "orig_module").__getattr__(name) # if this attr belongs to orig_module
|
|
except:
|
|
return super(nn.Module, nn.Module.__getattr__(self, "orig_module")).__getattribute__(name) # if this attr belongs to orig_module but not in nn.Module.__dict__
|
|
|
|
def __setattr__(self, name: str, value: Tensor | nn.Module) -> None:
|
|
if name == "orig_module":
|
|
return nn.Module.__setattr__(self, "orig_module", value)
|
|
elif hasattr(self, name):
|
|
return object.__setattr__(self, name, value)
|
|
return nn.Module.__getattr__(self, "orig_module").__setattr__(name, value)
|
|
|
|
def forward(self, *args, **kwargs):
|
|
return self.orig_module.forward(*args, **kwargs)
|
|
|
|
def load(self):
|
|
for name, child in self._modules.items():
|
|
utils.load_weights(child, self.gguf_loader, self.key+".")
|