diff --git a/csrc/cutlass b/csrc/cutlass index 34fd98056..3a8f57a3c 160000 --- a/csrc/cutlass +++ b/csrc/cutlass @@ -1 +1 @@ -Subproject commit 34fd98056b69fbf7f0929b3f734bb5f00642e2c9 +Subproject commit 3a8f57a3c89cfff7aa686e95f13d9ad850f61898 diff --git a/flash_attn/__init__.py b/flash_attn/__init__.py index 7d9184383..7a51e3df6 100644 --- a/flash_attn/__init__.py +++ b/flash_attn/__init__.py @@ -1,4 +1,4 @@ -__version__ = "2.1.2" +__version__ = "2.1.2.post1" from flash_attn.flash_attn_interface import ( flash_attn_func, diff --git a/training/Dockerfile b/training/Dockerfile index 9e8e273a1..0a8d36684 100644 --- a/training/Dockerfile +++ b/training/Dockerfile @@ -85,11 +85,11 @@ RUN pip install transformers==4.25.1 datasets==2.8.0 pytorch-lightning==1.8.6 tr RUN pip install git+https://github.com/mlcommons/logging.git@2.1.0 # Install FlashAttention -RUN pip install flash-attn==2.1.2 +RUN pip install flash-attn==2.1.2.post1 # Install CUDA extensions for cross-entropy, fused dense, layer norm RUN git clone https://github.com/HazyResearch/flash-attention \ - && cd flash-attention && git checkout v2.1.2 \ + && cd flash-attention && git checkout v2.1.2.post1 \ && cd csrc/fused_softmax && pip install . && cd ../../ \ && cd csrc/rotary && pip install . && cd ../../ \ && cd csrc/xentropy && pip install . && cd ../../ \