@@ -19,11 +19,6 @@ def __init__(self, embedding_directory=None, tokenizer_data={}):
1919
2020class Gemma2_2BModel (sd1_clip .SDClipModel ):
2121 def __init__ (self , device = "cpu" , layer = "hidden" , layer_idx = - 2 , dtype = None , attention_mask = True , model_options = {}):
22- llama_scaled_fp8 = model_options .get ("llama_scaled_fp8" , None )
23- if llama_scaled_fp8 is not None :
24- model_options = model_options .copy ()
25- model_options ["scaled_fp8" ] = llama_scaled_fp8
26-
2722 super ().__init__ (device = device , layer = layer , layer_idx = layer_idx , textmodel_json_config = {}, dtype = dtype , special_tokens = {"start" : 2 , "pad" : 0 }, layer_norm_hidden_state = False , model_class = comfy .text_encoders .llama .Gemma2_2B , enable_attention_masks = attention_mask , return_attention_masks = attention_mask , model_options = model_options )
2823
2924
@@ -35,10 +30,10 @@ def __init__(self, device="cpu", dtype=None, model_options={}):
3530def te (dtype_llama = None , llama_scaled_fp8 = None ):
3631 class LuminaTEModel_ (LuminaModel ):
3732 def __init__ (self , device = "cpu" , dtype = None , model_options = {}):
38- if llama_scaled_fp8 is not None and "llama_scaled_fp8 " not in model_options :
33+ if llama_scaled_fp8 is not None and "scaled_fp8 " not in model_options :
3934 model_options = model_options .copy ()
40- model_options ["llama_scaled_fp8 " ] = llama_scaled_fp8
41- if dtype_llama is not None :
42- dtype = dtype_llama
35+ model_options ["scaled_fp8 " ] = llama_scaled_fp8
36+ if dtype_llama is not None :
37+ dtype = dtype_llama
4338 super ().__init__ (device = device , dtype = dtype , model_options = model_options )
4439 return LuminaTEModel_
0 commit comments