1) Linear and MLP operators support qlen>1; 2) All operators now share a single memory buffer; 3) Refactor CPUInfer submit/sync logic.

This commit is contained in:
chenht2022 2024-08-08 09:04:36 +00:00
parent 442e13bc97
commit c1cc7d2cd2
21 changed files with 749 additions and 731 deletions

View file

@ -6,7 +6,7 @@ Author : chenht2022
Date : 2024-07-25 10:32:05
Version : 1.0.0
LastEditors : chenht2022
LastEditTime : 2024-07-25 10:34:00
LastEditTime : 2024-08-06 10:36:59
Copyright (c) 2024 by KVCache.AI, All Rights Reserved.
'''
import os, sys
@ -15,23 +15,23 @@ sys.path.append(os.path.dirname(__file__) + '/../build')
import cpuinfer_ext
import torch
with torch.inference_mode(mode=True):
input_size = 16384
output_size = 5120
stride = 32
proj_type = 1 # ggml_type::GGML_TYPE_F16
hidden_type = 1 # ggml_type::GGML_TYPE_F16
layer_num = 10
CPUInfer = cpuinfer_ext.CPUInfer(48)
validation_iter = 100
warm_up_iter = 1000
test_iter = 10000
input_size = 16384
output_size = 5120
stride = 32
group_max_len = 1024
proj_type = 1 # ggml_type::GGML_TYPE_F16
hidden_type = 1 # ggml_type::GGML_TYPE_F16
qlen = 30
layer_num = 10
CPUInfer = cpuinfer_ext.CPUInfer(48)
validation_iter = 100
with torch.inference_mode(mode=True):
linears = []
projs = []
for _ in range(layer_num):
proj = torch.randn((output_size, input_size), dtype=torch.float16, device = "cuda").to("cpu").contiguous()
config = cpuinfer_ext.linear.LinearConfig(input_size, output_size, stride, proj.data_ptr(), proj_type, hidden_type)
config = cpuinfer_ext.linear.LinearConfig(input_size, output_size, stride, group_max_len, proj.data_ptr(), proj_type, hidden_type)
linear = cpuinfer_ext.linear.Linear(config)
projs.append(proj)
linears.append(linear)
@ -39,11 +39,17 @@ with torch.inference_mode(mode=True):
# validation
for i in range(validation_iter):
linear = linears[i % layer_num]
input = torch.randn((1, input_size), dtype=torch.float16).contiguous()
output = torch.empty((1, output_size), dtype=torch.float16).contiguous()
input = torch.randn((qlen, input_size), dtype=torch.float16).contiguous()
output = torch.empty((qlen, output_size), dtype=torch.float16).contiguous()
input = input / 100
CPUInfer.submit(linear.forward, input.data_ptr(), output.data_ptr())
CPUInfer.submit(
linear.forward(
qlen,
input.data_ptr(),
output.data_ptr()
)
)
CPUInfer.sync()
# print('cpuinfer output', output)
@ -54,30 +60,3 @@ with torch.inference_mode(mode=True):
diff = torch.mean(torch.abs(output - t_output)) / torch.mean(torch.abs(t_output))
print('diff = ', diff)
assert(diff < 0.001)
# warm up
for i in range(warm_up_iter):
linear = linears[i % layer_num]
input = torch.randn((1, input_size), dtype=torch.float16).contiguous()
output = torch.empty((1, output_size), dtype=torch.float16).contiguous()
input = input / 100
CPUInfer.submit(linear.forward, input.data_ptr(), output.data_ptr())
CPUInfer.sync()
# test
total_time = 0
for i in range(test_iter):
linear = linears[i % layer_num]
input = torch.randn((1, input_size), dtype=torch.float16).contiguous()
output = torch.empty((1, output_size), dtype=torch.float16).contiguous()
input = input / 100
start = time.perf_counter()
CPUInfer.submit(linear.forward, input.data_ptr(), output.data_ptr())
CPUInfer.sync()
end = time.perf_counter()
total_time += end - start
print('Time: ', total_time)
print('Iteration: ', test_iter)
print('Time per iteration: ', total_time / test_iter)
print('Bandwidth: ', input_size * output_size * 2 * test_iter / total_time / 1000 / 1000 / 1000, 'GB/s')
print("All tasks completed.")