1) Linear and MLP operators support qlen>1; 2) All operators now share a single memory buffer; 3) Refactor CPUInfer submit/sync logic.

This commit is contained in:
chenht2022 2024-08-08 09:04:36 +00:00
parent 442e13bc97
commit c1cc7d2cd2
21 changed files with 749 additions and 731 deletions

View file

@ -6,7 +6,7 @@ Author : chenht2022
Date : 2024-07-25 10:32:05
Version : 1.0.0
LastEditors : chenht2022
LastEditTime : 2024-07-25 10:34:03
LastEditTime : 2024-08-06 10:37:28
Copyright (c) 2024 by KVCache.AI, All Rights Reserved.
'''
import os, sys
@ -15,20 +15,30 @@ sys.path.append(os.path.dirname(__file__) + '/../build')
import cpuinfer_ext
import torch
with torch.inference_mode(mode=True):
hidden_size = 5120
intermediate_size = 3072
stride = 32
gate_type = 1 # ggml_type::GGML_TYPE_F16
up_type = 1 # ggml_type::GGML_TYPE_F16
down_type = 1 # ggml_type::GGML_TYPE_F16
hidden_type = 1 # ggml_type::GGML_TYPE_F16
layer_num = 10
CPUInfer = cpuinfer_ext.CPUInfer(48)
validation_iter = 100
warm_up_iter = 1000
test_iter = 10000
hidden_size = 5120
intermediate_size = 3072
stride = 32
group_max_len = 1024
gate_type = 1 # ggml_type::GGML_TYPE_F16
up_type = 1 # ggml_type::GGML_TYPE_F16
down_type = 1 # ggml_type::GGML_TYPE_F16
hidden_type = 1 # ggml_type::GGML_TYPE_F16
qlen = 30
layer_num = 10
CPUInfer = cpuinfer_ext.CPUInfer(48)
validation_iter = 100
def act_fn(x):
return x / (1.0 + torch.exp(-x))
def mlp_torch(input, gate_proj, up_proj, down_proj):
gate_buf = torch.mm(input, gate_proj.t())
up_buf = torch.mm(input, up_proj.t())
intermediate = act_fn(gate_buf) * up_buf
ret = torch.mm(intermediate, down_proj.t())
return ret
with torch.inference_mode(mode=True):
mlps = []
gate_projs = []
up_projs = []
@ -37,7 +47,7 @@ with torch.inference_mode(mode=True):
gate_proj = torch.randn((intermediate_size, hidden_size), dtype=torch.float16, device = "cuda").to("cpu").contiguous()
up_proj = torch.randn((intermediate_size, hidden_size), dtype=torch.float16, device = "cuda").to("cpu").contiguous()
down_proj = torch.randn((hidden_size, intermediate_size), dtype=torch.float16, device = "cuda").to("cpu").contiguous()
config = cpuinfer_ext.mlp.MLPConfig(hidden_size, intermediate_size, stride, gate_proj.data_ptr(), up_proj.data_ptr(), down_proj.data_ptr(), gate_type, up_type, down_type, hidden_type)
config = cpuinfer_ext.mlp.MLPConfig(hidden_size, intermediate_size, stride, group_max_len, gate_proj.data_ptr(), up_proj.data_ptr(), down_proj.data_ptr(), gate_type, up_type, down_type, hidden_type)
mlp = cpuinfer_ext.mlp.MLP(config)
gate_projs.append(gate_proj)
up_projs.append(up_proj)
@ -47,52 +57,26 @@ with torch.inference_mode(mode=True):
# validation
for i in range(validation_iter):
mlp = mlps[i % layer_num]
input = torch.randn((1, hidden_size), dtype=torch.float16).contiguous()
output = torch.empty((1, hidden_size), dtype=torch.float16).contiguous()
input = torch.randn((qlen, hidden_size), dtype=torch.float16).contiguous()
output = torch.empty((qlen, hidden_size), dtype=torch.float16).contiguous()
input = input / 100
CPUInfer.submit(mlp.forward, input.data_ptr(), output.data_ptr())
CPUInfer.submit(
mlp.forward(
qlen,
input.data_ptr(),
output.data_ptr()
)
)
CPUInfer.sync()
# print('cpuinfer output', output)
def act_fn(x):
return x / (1.0 + torch.exp(-x))
gate_proj = gate_projs[i%layer_num]
up_proj = up_projs[i%layer_num]
down_proj = down_projs[i%layer_num]
gate_buf = torch.mm(input, gate_proj.t())
up_buf = torch.mm(input, up_proj.t())
intermediate = act_fn(gate_buf) * up_buf
t_output = torch.mm(intermediate, down_proj.t())
t_output = mlp_torch(input, gate_proj, up_proj, down_proj)
# print('torch output', t_output)
diff = torch.mean(torch.abs(output - t_output)) / torch.mean(torch.abs(t_output))
print('diff = ', diff)
assert(diff < 0.001)
# warm up
for i in range(warm_up_iter):
mlp = mlps[i % layer_num]
input = torch.randn((1, hidden_size), dtype=torch.float16).contiguous()
output = torch.empty((1, hidden_size), dtype=torch.float16).contiguous()
input = input / 100
CPUInfer.submit(mlp.forward, input.data_ptr(), output.data_ptr())
CPUInfer.sync()
# test
total_time = 0
for i in range(test_iter):
mlp = mlps[i % layer_num]
input = torch.randn((1, hidden_size), dtype=torch.float16).contiguous()
output = torch.empty((1, hidden_size), dtype=torch.float16).contiguous()
input = input / 100
start = time.time()
CPUInfer.submit(mlp.forward, input.data_ptr(), output.data_ptr())
CPUInfer.sync()
end = time.time()
total_time += end - start
print('Time: ', total_time)
print('Iteration: ', test_iter)
print('Time per iteration: ', total_time / test_iter)
print('Bandwidth: ', hidden_size * intermediate_size * 3 * 2 * test_iter / total_time / 1024 / 1024 / 1024, 'GB/s')
print("All tasks completed.")