mirror of
https://github.com/kvcache-ai/ktransformers.git
synced 2026-05-05 15:40:13 +00:00
* refactor: move legacy code to archive/ directory - Moved ktransformers, csrc, third_party, merge_tensors to archive/ - Moved build scripts and configurations to archive/ - Kept kt-kernel, KT-SFT, doc, and README files in root - Preserved complete git history for all moved files * refactor: restructure repository to focus on kt-kernel and KT-SFT modules * fix README * fix README * fix README * fix README * docs: add performance benchmarks to kt-kernel section Add comprehensive performance data for kt-kernel to match KT-SFT's presentation: - AMX kernel optimization: 21.3 TFLOPS (3.9× faster than PyTorch) - Prefill phase: up to 20× speedup vs baseline - Decode phase: up to 4× speedup - NUMA optimization: up to 63% throughput improvement - Multi-GPU (8×L20): 227.85 tokens/s total throughput with DeepSeek-R1 FP8 Source: https://lmsys.org/blog/2025-10-22-KTransformers/ This provides users with concrete performance metrics for both core modules, making it easier to understand the capabilities of each component. * refactor: improve kt-kernel performance data with specific hardware and models Replace generic performance descriptions with concrete benchmarks: - Specify exact hardware: 8×L20 GPU + Xeon Gold 6454S, Single/Dual-socket Xeon + AMX - Include specific models: DeepSeek-R1-0528 (FP8), DeepSeek-V3 (671B) - Show detailed metrics: total throughput, output throughput, concurrency details - Match KT-SFT presentation style for consistency This provides users with actionable performance data they can use to evaluate hardware requirements and expected performance for their use cases. * fix README * docs: clean up performance table and improve formatting * add pic for README * refactor: simplify .gitmodules and backup legacy submodules - Remove 7 legacy submodules from root .gitmodules (archive/third_party/*) - Keep only 2 active submodules for kt-kernel (llama.cpp, pybind11) - Backup complete .gitmodules to archive/.gitmodules - Add documentation in archive/README.md for researchers who need legacy submodules This reduces initial clone size by ~500MB and avoids downloading unused dependencies. * refactor: move doc/ back to root directory Keep documentation in root for easier access and maintenance. * refactor: consolidate all images to doc/assets/ - Move kt-kernel/assets/heterogeneous_computing.png to doc/assets/ - Remove KT-SFT/assets/ (images already in doc/assets/) - Update KT-SFT/README.md image references to ../doc/assets/ - Eliminates ~7.9MB image duplication - Centralizes all documentation assets in one location * fix pic path for README
116 lines
No EOL
4.2 KiB
Python
116 lines
No EOL
4.2 KiB
Python
import torch
|
|
import torch.nn.functional as F
|
|
from typing import Optional
|
|
import pytest
|
|
from typing import Tuple, Optional, Literal
|
|
import time
|
|
# use dir path
|
|
import os
|
|
import sys
|
|
sys.path.insert(0, "/home/azure/ktransformers")
|
|
print(sys.path)
|
|
from ktransformers.ktransformers_ext.triton.fp8gemm import fp8_gemm, act_quant, weight_dequant
|
|
from safetensors import safe_open
|
|
|
|
world_size = 1
|
|
rank = 0
|
|
block_size = 128
|
|
gemm_impl: Literal["bf16", "fp8"] = "bf16"
|
|
# Assuming `fp8_gemm`, `act_quant`, `weight_dequant` and other relevant functions are already defined
|
|
|
|
def test_fp8_gemm_vs_torch_matmul():
|
|
# Test case 1: Create random matrices of size (M, K) and (K, N)
|
|
M, K, N = 64, 128, 256 # Matrix dimensions
|
|
x = torch.randn(M, K, dtype=torch.bfloat16, device='cuda')
|
|
weight = torch.randn(N, K, dtype=torch.bfloat16, device='cuda')
|
|
|
|
# Apply act_quant to both matrices
|
|
x_quantized, scale_x = act_quant(x, block_size)
|
|
weight_quantized, scale_w = act_quant(weight, block_size)
|
|
|
|
# mk continous
|
|
x_quantized = x_quantized.contiguous()
|
|
weight_quantized = weight_quantized.contiguous()
|
|
scale_x = scale_x.contiguous()
|
|
scale_w = scale_w.contiguous()
|
|
|
|
# Perform fp8_gemm using the quantized tensors
|
|
result_fp8_gemm = fp8_gemm(x_quantized, scale_x, weight_quantized, scale_w)
|
|
|
|
# Perform torch.matmul using the original floating point tensors
|
|
result_torch_matmul = torch.matmul(x, weight.T)
|
|
print(f'result_torch_matmul: {result_torch_matmul.shape}')
|
|
print(f'result_fp8_gemm: {result_fp8_gemm.shape}')
|
|
|
|
print(f"result_fp8_gemm:\n {result_fp8_gemm}")
|
|
print(f"result_torch_matmul:\n {result_torch_matmul}")
|
|
|
|
def test_fp8_gemm_vs_torch_matmul_load():
|
|
file_path = "/mnt/data/model/DeepSeek-V3/model-00001-of-000163.safetensors"
|
|
with safe_open(file_path, framework="pt", device=0) as f:
|
|
weight = f.get_tensor("model.layers.0.mlp.down_proj.weight")
|
|
scale = f.get_tensor("model.layers.0.mlp.down_proj.weight_scale_inv")
|
|
|
|
# weight_dequant
|
|
weight_dequantized = weight_dequant(weight, scale)
|
|
print(f"weight_dequantized: {weight_dequantized.shape}")
|
|
N, K = weight_dequantized.shape
|
|
M = 64
|
|
x = torch.randn(2 ,M, K, dtype=torch.bfloat16, device='cuda')
|
|
x_quantized, scale_x = act_quant(x, block_size)
|
|
|
|
# Test case 1: quantized x matmal with undequantized weight
|
|
result_fp8_gemm = fp8_gemm(x_quantized, scale_x, weight, scale)
|
|
print(f"result_fp8_gemm:\n {result_fp8_gemm}")
|
|
print(f"dtype {result_fp8_gemm.dtype}")
|
|
|
|
# Perform torch.matmul using the original floating point tensors
|
|
result_torch_matmul = torch.matmul(x, weight_dequantized.to(torch.bfloat16).T)
|
|
print(f"result_torch_matmul:\n {result_torch_matmul}")
|
|
|
|
def test_fp8_gemm_tplops():
|
|
file_path = "/mnt/data/model/DeepSeek-V3/model-00001-of-000163.safetensors"
|
|
with safe_open(file_path, framework="pt", device=0) as f:
|
|
weight = f.get_tensor("model.layers.0.mlp.down_proj.weight")
|
|
scale = f.get_tensor("model.layers.0.mlp.down_proj.weight_scale_inv")
|
|
|
|
# weight_dequant
|
|
weight_dequantized = weight_dequant(weight, scale)
|
|
print(f"weight_dequantized: {weight_dequantized.shape}")
|
|
N, K = weight_dequantized.shape
|
|
M = 6400
|
|
x = torch.randn(2 ,M, K, dtype=torch.bfloat16, device='cuda')
|
|
# x_quantized, scale_x = act_quant(x, block_size)
|
|
|
|
# Calculate time for 1000 fp8_gemm
|
|
i = 10
|
|
flops_per_gemm = 2 * M * N * K
|
|
total_flops = i * flops_per_gemm
|
|
|
|
x_quantized, scale_x = act_quant(x, block_size)
|
|
result_fp8_gemm = fp8_gemm(x_quantized, scale_x, weight, scale)
|
|
x_quantized, scale_x = act_quant(x, block_size)
|
|
result_fp8_gemm = fp8_gemm(x_quantized, scale_x, weight, scale)
|
|
|
|
|
|
t0 = time.time()
|
|
torch.cuda.synchronize()
|
|
for i in range(i):
|
|
x_quantized, scale_x = act_quant(x, block_size)
|
|
result_fp8_gemm = fp8_gemm(x_quantized, scale_x, weight, scale)
|
|
torch.cuda.synchronize()
|
|
t1 = time.time()
|
|
|
|
total_time = t1 - t0
|
|
tflops = total_flops / total_time / 1e12
|
|
print(f"total_time: {total_time}")
|
|
print(f"tflops: {tflops}")
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
test_fp8_gemm_vs_torch_matmul()
|
|
test_fp8_gemm_vs_torch_matmul_load()
|
|
test_fp8_gemm_tplops()
|
|
|