@@ -76,7 +76,6 @@ def create_finetune_request(
7676 rpo_alpha : float | None = None ,
7777 simpo_gamma : float | None = None ,
7878 from_checkpoint : str | None = None ,
79- from_hf_model : str | None = None ,
8079 hf_api_token : str | None = None ,
8180) -> FinetuneRequest :
8281 if model is not None and from_checkpoint is not None :
@@ -264,7 +263,6 @@ def create_finetune_request(
264263 wandb_name = wandb_name ,
265264 training_method = training_method_cls ,
266265 from_checkpoint = from_checkpoint ,
267- from_hf_model = from_hf_model ,
268266 hf_api_token = hf_api_token ,
269267 )
270268
@@ -345,7 +343,6 @@ def create(
345343 rpo_alpha : float | None = None ,
346344 simpo_gamma : float | None = None ,
347345 from_checkpoint : str | None = None ,
348- from_hf_model : str | None = None ,
349346 hf_api_token : str | None = None ,
350347 ) -> FinetuneResponse :
351348 """
@@ -403,9 +400,6 @@ def create(
403400 from_checkpoint (str, optional): The checkpoint identifier to continue training from a previous fine-tuning job.
404401 The format: {$JOB_ID/$OUTPUT_MODEL_NAME}:{$STEP}.
405402 The step value is optional, without it the final checkpoint will be used.
406- from_hf_model (str, optional): Model name from the Hugging Face Hub that will be used to initialize the trained model.
407- The model config is not validated; any model supported by Transformers should work, but the batch size
408- limits are not checked. Defaults to None.
409403 hf_api_token (str, optional): API key for the Hugging Face Hub. Defaults to None.
410404
411405 Returns:
@@ -460,7 +454,6 @@ def create(
460454 rpo_alpha = rpo_alpha ,
461455 simpo_gamma = simpo_gamma ,
462456 from_checkpoint = from_checkpoint ,
463- from_hf_model = from_hf_model ,
464457 hf_api_token = hf_api_token ,
465458 )
466459
@@ -774,7 +767,6 @@ async def create(
774767 rpo_alpha : float | None = None ,
775768 simpo_gamma : float | None = None ,
776769 from_checkpoint : str | None = None ,
777- from_hf_model : str | None = None ,
778770 hf_api_token : str | None = None ,
779771 ) -> FinetuneResponse :
780772 """
@@ -832,9 +824,6 @@ async def create(
832824 from_checkpoint (str, optional): The checkpoint identifier to continue training from a previous fine-tuning job.
833825 The format: {$JOB_ID/$OUTPUT_MODEL_NAME}:{$STEP}.
834826 The step value is optional, without it the final checkpoint will be used.
835- from_hf_model (str, optional): Model name from the Hugging Face Hub that will be used to initialize the trained model.
836- The model config is not validated; any model supported by Transformers should work, but the batch size
837- limits are not checked. Defaults to None.
838827 hf_api_token (str, optional): API key for the Huggging Face Hub. Defaults to None.
839828
840829 Returns:
0 commit comments