mirror of
https://github.com/kvcache-ai/ktransformers.git
synced 2026-05-05 15:40:13 +00:00
Refactor: restructure repository to focus on kt-kernel and KT-SFT modulesq recon (#1581)
* refactor: move legacy code to archive/ directory - Moved ktransformers, csrc, third_party, merge_tensors to archive/ - Moved build scripts and configurations to archive/ - Kept kt-kernel, KT-SFT, doc, and README files in root - Preserved complete git history for all moved files * refactor: restructure repository to focus on kt-kernel and KT-SFT modules * fix README * fix README * fix README * fix README * docs: add performance benchmarks to kt-kernel section Add comprehensive performance data for kt-kernel to match KT-SFT's presentation: - AMX kernel optimization: 21.3 TFLOPS (3.9× faster than PyTorch) - Prefill phase: up to 20× speedup vs baseline - Decode phase: up to 4× speedup - NUMA optimization: up to 63% throughput improvement - Multi-GPU (8×L20): 227.85 tokens/s total throughput with DeepSeek-R1 FP8 Source: https://lmsys.org/blog/2025-10-22-KTransformers/ This provides users with concrete performance metrics for both core modules, making it easier to understand the capabilities of each component. * refactor: improve kt-kernel performance data with specific hardware and models Replace generic performance descriptions with concrete benchmarks: - Specify exact hardware: 8×L20 GPU + Xeon Gold 6454S, Single/Dual-socket Xeon + AMX - Include specific models: DeepSeek-R1-0528 (FP8), DeepSeek-V3 (671B) - Show detailed metrics: total throughput, output throughput, concurrency details - Match KT-SFT presentation style for consistency This provides users with actionable performance data they can use to evaluate hardware requirements and expected performance for their use cases. * fix README * docs: clean up performance table and improve formatting * add pic for README * refactor: simplify .gitmodules and backup legacy submodules - Remove 7 legacy submodules from root .gitmodules (archive/third_party/*) - Keep only 2 active submodules for kt-kernel (llama.cpp, pybind11) - Backup complete .gitmodules to archive/.gitmodules - Add documentation in archive/README.md for researchers who need legacy submodules This reduces initial clone size by ~500MB and avoids downloading unused dependencies. * refactor: move doc/ back to root directory Keep documentation in root for easier access and maintenance. * refactor: consolidate all images to doc/assets/ - Move kt-kernel/assets/heterogeneous_computing.png to doc/assets/ - Remove KT-SFT/assets/ (images already in doc/assets/) - Update KT-SFT/README.md image references to ../doc/assets/ - Eliminates ~7.9MB image duplication - Centralizes all documentation assets in one location * fix pic path for README
This commit is contained in:
parent
8729435d85
commit
57d14d22bc
510 changed files with 711 additions and 334 deletions
73
archive/ktransformers/configs/config.yaml
Normal file
73
archive/ktransformers/configs/config.yaml
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
log:
|
||||
dir: "logs"
|
||||
file: "lexllama.log"
|
||||
#log level: debug, info, warn, error, crit
|
||||
level: "debug"
|
||||
backup_count: -1
|
||||
|
||||
server:
|
||||
ip: 0.0.0.0
|
||||
port: 10002
|
||||
|
||||
db:
|
||||
type: "sqllite"
|
||||
database: "server.db"
|
||||
host: "./"
|
||||
pool_size: 10
|
||||
|
||||
user:
|
||||
secret_key: "981f1dd2a44e27d68759d0252a486568ed43480b4e616a26e3af3709c3a7ce73"
|
||||
algorithm: "HS256"
|
||||
|
||||
model:
|
||||
# type: transformers
|
||||
type: balance_serve
|
||||
# type: ktransformers
|
||||
|
||||
name: DeepSeek-Coder-V2-Instruct
|
||||
path: deepseek-ai/DeepSeek-V2-Lite-Chat
|
||||
gguf_path: /mnt/data/models/Smallthinker-21B
|
||||
|
||||
device: cuda:0
|
||||
cache_lens: 16384
|
||||
max_new_tokens: 500
|
||||
web:
|
||||
mount: False
|
||||
open_cross_domain: True
|
||||
|
||||
ext:
|
||||
cpu_infer: 10
|
||||
|
||||
long_context:
|
||||
max_seq_len: 32000
|
||||
block_size: 128
|
||||
local_windows_len: 4096
|
||||
second_select_num: 32
|
||||
anchor_type: DYNAMIC
|
||||
kv_type: FP16
|
||||
dense_layer_num: 2
|
||||
anchor_num: 1
|
||||
preselect_block: True
|
||||
head_select_mode: SHARED
|
||||
preselect_block_count: 32
|
||||
layer_step: 1
|
||||
token_step:
|
||||
|
||||
local_chat:
|
||||
prompt_file: ""
|
||||
|
||||
async_server:
|
||||
sched_strategy: "FCFS"
|
||||
sched_port: 56441
|
||||
sched_metrics_port: 54321
|
||||
kvc2_metrics_port: 54391
|
||||
max_batch_size: 4 # decode count + prefill count, in one mini batch
|
||||
|
||||
attn:
|
||||
page_size: 256
|
||||
chunk_size: 256
|
||||
kvc2:
|
||||
gpu_only: true
|
||||
utilization_percentage: 1.0
|
||||
cpu_memory_size_GB: 500
|
||||
disk_path: /home/wjh/kvc
|
||||
Loading…
Add table
Add a link
Reference in a new issue