mirror of
https://github.com/kvcache-ai/ktransformers.git
synced 2026-04-28 03:39:48 +00:00
fix flash_attn whl path
This commit is contained in:
parent
b4ad815ef0
commit
a8d159771e
1 changed files with 2 additions and 1 deletions
3
.github/workflows/install.yml
vendored
3
.github/workflows/install.yml
vendored
|
|
@ -40,7 +40,8 @@ jobs:
|
|||
conda activate ktransformers-dev
|
||||
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu126
|
||||
pip3 install packaging ninja cpufeature numpy
|
||||
pip install ~/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiTRUE-cp311-cp311-linux_x86_64.whl
|
||||
wget
|
||||
pip install https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.2cxx11abiTRUE-cp311-cp311-linux_x86_64.whl
|
||||
- name: Install KTransformers
|
||||
run: |
|
||||
source /home/qujing3/anaconda3/etc/profile.d/conda.sh
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue