@@ -370,7 +370,10 @@ paths:
370370 properties :
371371 training_file :
372372 type : string
373- description : File-ID of a file uploaded to the Together API
373+ description : File-ID of a training file uploaded to the Together API
374+ validation_file :
375+ type : string
376+ description : File-ID of a validation file uploaded to the Together API
374377 model :
375378 type : string
376379 description : Name of the base model to run fine-tune job on
@@ -382,6 +385,10 @@ paths:
382385 type : integer
383386 default : 1
384387 description : Number of checkpoints to save during fine-tuning
388+ n_evals :
389+ type : integer
390+ default : 0
391+ description : Number of evaluations to be run on a given validation set during training
385392 batch_size :
386393 type : integer
387394 default : 32
@@ -397,6 +404,26 @@ paths:
397404 wandb_api_key :
398405 type : string
399406 description : API key for Weights & Biases integration
407+ lora :
408+ type : boolean
409+ description : Whether to enable LoRA training. If not provided, full fine-tuning will be applied.
410+ lora_r :
411+ type : integer
412+ default : 8
413+ description : Rank for LoRA adapter weights
414+ lora_alpha :
415+ type : integer
416+ default : 8
417+ description : The alpha value for LoRA adapter training.
418+ lora_dropout :
419+ type : number
420+ format : float
421+ default : 0.0
422+ description : The dropout probability for Lora layers.
423+ lora_trainable_modules :
424+ type : string
425+ default : ' all-linear'
426+ description : A list of LoRA trainable modules, separated by a comma
400427 responses :
401428 ' 200 ' :
402429 description : Fine-tuning job initiated successfully
@@ -1656,9 +1683,9 @@ components:
16561683 type : string
16571684 model_output_path :
16581685 type : string
1659- TrainingFileNumLines :
1686+ trainingfile_numlines :
16601687 type : integer
1661- TrainingFileSize :
1688+ trainingfile_size :
16621689 type : integer
16631690 created_at :
16641691 type : string
@@ -1668,6 +1695,8 @@ components:
16681695 type : integer
16691696 n_checkpoints :
16701697 type : integer
1698+ n_evals :
1699+ type : integer
16711700 batch_size :
16721701 type : integer
16731702 learning_rate :
@@ -1681,7 +1710,9 @@ components:
16811710 lora_alpha :
16821711 type : integer
16831712 lora_dropout :
1684- type : integer
1713+ type : number
1714+ lora_trainable_modules :
1715+ type : string
16851716 status :
16861717 $ref : ' #/components/schemas/FinetuneJobStatus'
16871718 job_id :
0 commit comments