mirror of
https://github.com/kvcache-ai/ktransformers.git
synced 2025-09-09 13:55:27 +00:00
merge main; Add torch q8 linear
This commit is contained in:
parent
6c4ed59175
commit
ed8437413b
27 changed files with 1561 additions and 114 deletions
|
@ -17,7 +17,10 @@ import logging
|
|||
logger = logging.getLogger("dynamic_attention")
|
||||
sys.path.append(os.path.dirname(__file__) + "/../ktransformers_ext/cpu_backend")
|
||||
from ktransformers.operators.cpuinfer import CPUInfer, CPUInferKVCache
|
||||
from flash_attn import flash_attn_func, flash_attn_with_kvcache
|
||||
try:
|
||||
from flash_attn import flash_attn_func, flash_attn_with_kvcache
|
||||
except:
|
||||
print("falsh attn not found")
|
||||
|
||||
|
||||
import math
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue