We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 6d7e502 commit f173cffCopy full SHA for f173cff
1 file changed
src/modalities/models/gpt2/gpt2_model.py
@@ -532,12 +532,8 @@ def prepare_inter_document_masking(
532
device=device,
533
)
534
return self._get_unpad_data_for_concatenated_sequences(concatenated_lengths)
535
- if self.attention_impl == AttentionImplementation.PYTORCH_FLASH:
536
- raise NotImplementedError(
537
- "Inter-document masking is not supported for `pytorch_flash`. " "Use `manual` or `dao_flash`."
538
- )
539
raise NotImplementedError(
540
- f"Attention implementation {self.attention_impl} is not supported for inter-document masking."
+ f"Attention implementation {self.attention_impl} is not supported for inter-document masking. Use `manual` or `dao_flash`."
541
542
543
@staticmethod
0 commit comments