@@ -76,6 +76,8 @@ def create_finetune_request(
7676 rpo_alpha : float | None = None ,
7777 simpo_gamma : float | None = None ,
7878 from_checkpoint : str | None = None ,
79+ from_hf_model : str | None = None ,
80+ hf_api_token : str | None = None ,
7981) -> FinetuneRequest :
8082 if model is not None and from_checkpoint is not None :
8183 raise ValueError (
@@ -262,6 +264,8 @@ def create_finetune_request(
262264 wandb_name = wandb_name ,
263265 training_method = training_method_cls ,
264266 from_checkpoint = from_checkpoint ,
267+ from_hf_model = from_hf_model ,
268+ hf_api_token = hf_api_token ,
265269 )
266270
267271 return finetune_request
@@ -341,6 +345,8 @@ def create(
341345 rpo_alpha : float | None = None ,
342346 simpo_gamma : float | None = None ,
343347 from_checkpoint : str | None = None ,
348+ from_hf_model : str | None = None ,
349+ hf_api_token : str | None = None ,
344350 ) -> FinetuneResponse :
345351 """
346352 Method to initiate a fine-tuning job
@@ -397,6 +403,10 @@ def create(
397403 from_checkpoint (str, optional): The checkpoint identifier to continue training from a previous fine-tuning job.
398404 The format: {$JOB_ID/$OUTPUT_MODEL_NAME}:{$STEP}.
399405 The step value is optional, without it the final checkpoint will be used.
406+ from_hf_model (str, optional): Model name from the Hugging Face Hub that will be used to initialize the trained model.
407+ The model config is not validated; any model supported by Transformers should work, but the batch size
408+ limits are not checked. Defaults to None.
409+ hf_api_token (str, optional): API key for the Hugging Face Hub. Defaults to None.
400410
401411 Returns:
402412 FinetuneResponse: Object containing information about fine-tuning job.
@@ -450,6 +460,8 @@ def create(
450460 rpo_alpha = rpo_alpha ,
451461 simpo_gamma = simpo_gamma ,
452462 from_checkpoint = from_checkpoint ,
463+ from_hf_model = from_hf_model ,
464+ hf_api_token = hf_api_token ,
453465 )
454466
455467 if verbose :
@@ -762,6 +774,8 @@ async def create(
762774 rpo_alpha : float | None = None ,
763775 simpo_gamma : float | None = None ,
764776 from_checkpoint : str | None = None ,
777+ from_hf_model : str | None = None ,
778+ hf_api_token : str | None = None ,
765779 ) -> FinetuneResponse :
766780 """
767781 Async method to initiate a fine-tuning job
@@ -818,6 +832,10 @@ async def create(
818832 from_checkpoint (str, optional): The checkpoint identifier to continue training from a previous fine-tuning job.
819833 The format: {$JOB_ID/$OUTPUT_MODEL_NAME}:{$STEP}.
820834 The step value is optional, without it the final checkpoint will be used.
835+ from_hf_model (str, optional): Model name from the Hugging Face Hub that will be used to initialize the trained model.
836+ The model config is not validated; any model supported by Transformers should work, but the batch size
837+ limits are not checked. Defaults to None.
838+ hf_api_token (str, optional): API key for the Huggging Face Hub. Defaults to None.
821839
822840 Returns:
823841 FinetuneResponse: Object containing information about fine-tuning job.
@@ -871,6 +889,8 @@ async def create(
871889 rpo_alpha = rpo_alpha ,
872890 simpo_gamma = simpo_gamma ,
873891 from_checkpoint = from_checkpoint ,
892+ from_hf_model = from_hf_model ,
893+ hf_api_token = hf_api_token ,
874894 )
875895
876896 if verbose :
0 commit comments