Skip to content

Commit 9a25145

Browse files
Support VLM finetuning
1 parent 158ae5a commit 9a25145

2 files changed

Lines changed: 3 additions & 2 deletions

File tree

src/together/resources/finetune.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -256,7 +256,7 @@ def create_finetune_request(
256256

257257
if model_limits.supports_vision:
258258
multimodal_params = FinetuneMultimodalParams(train_vision=train_vision)
259-
elif train_vision:
259+
elif not model_limits.supports_vision and train_vision:
260260
raise ValueError(
261261
f"Vision encoder training is not supported for the non-multimodal model `{model}`"
262262
)

src/together/utils/files.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -271,6 +271,7 @@ def _check_message_content(
271271
272272
Args:
273273
message: The message to check.
274+
role: The role of the message.
274275
idx: Line number in the file.
275276
276277
Returns:
@@ -421,7 +422,7 @@ def validate_messages(
421422
elif messages_are_multimodal != is_multimodal:
422423
# Due to the format limitation, we cannot mix multimodal and text only messages in the same sample.
423424
raise InvalidFileFormatError(
424-
"Messages in the conversation must be either all in multimodal or all intext only format.",
425+
"Messages in the conversation must be either all in multimodal or all in text-only format.",
425426
line_number=idx + 1,
426427
error_source="key_value",
427428
)

0 commit comments

Comments
 (0)