Skip to content

Commit f173cff

Browse files
refactor(attention): removed duplicate exception
Co-authored-by: Richard Rutmann <97447451+rrutmann@users.noreply.github.com>
1 parent 6d7e502 commit f173cff

1 file changed

Lines changed: 1 addition & 5 deletions

File tree

src/modalities/models/gpt2/gpt2_model.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -532,12 +532,8 @@ def prepare_inter_document_masking(
532532
device=device,
533533
)
534534
return self._get_unpad_data_for_concatenated_sequences(concatenated_lengths)
535-
if self.attention_impl == AttentionImplementation.PYTORCH_FLASH:
536-
raise NotImplementedError(
537-
"Inter-document masking is not supported for `pytorch_flash`. " "Use `manual` or `dao_flash`."
538-
)
539535
raise NotImplementedError(
540-
f"Attention implementation {self.attention_impl} is not supported for inter-document masking."
536+
f"Attention implementation {self.attention_impl} is not supported for inter-document masking. Use `manual` or `dao_flash`."
541537
)
542538

543539
@staticmethod

0 commit comments

Comments
 (0)