Skip to content

Commit 9ead17a

Browse files
authored
Merge branch 'comfyanonymous:master' into master
2 parents b9d909d + f067ad1 commit 9ead17a

4 files changed

Lines changed: 37 additions & 10 deletions

File tree

comfy/cli_args.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,8 @@ class LatentPreviewMethod(enum.Enum):
9292

9393
parser.add_argument("--preview-method", type=LatentPreviewMethod, default=LatentPreviewMethod.NoPreviews, help="Default preview method for sampler nodes.", action=EnumAction)
9494

95+
parser.add_argument("--preview-size", type=int, default=512, help="Sets the maximum preview size for sampler nodes.")
96+
9597
cache_group = parser.add_mutually_exclusive_group()
9698
cache_group.add_argument("--cache-classic", action="store_true", help="Use the old style (aggressive) caching.")
9799
cache_group.add_argument("--cache-lru", type=int, default=0, help="Use LRU caching with a maximum of N node results cached. May use more RAM/VRAM.")

comfy/float.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,9 +41,8 @@ def manual_stochastic_round_to_float8(x, dtype, generator=None):
4141
(2.0 ** (exponent - EXPONENT_BIAS)) * (1.0 + abs_x),
4242
(2.0 ** (-EXPONENT_BIAS + 1)) * abs_x
4343
)
44-
del abs_x
4544

46-
return sign.to(dtype=dtype)
45+
return sign
4746

4847

4948

@@ -57,6 +56,11 @@ def stochastic_rounding(value, dtype, seed=0):
5756
if dtype == torch.float8_e4m3fn or dtype == torch.float8_e5m2:
5857
generator = torch.Generator(device=value.device)
5958
generator.manual_seed(seed)
60-
return manual_stochastic_round_to_float8(value, dtype, generator=generator)
59+
output = torch.empty_like(value, dtype=dtype)
60+
num_slices = max(1, (value.numel() / (4096 * 4096)))
61+
slice_size = max(1, round(value.shape[0] / num_slices))
62+
for i in range(0, value.shape[0], slice_size):
63+
output[i:i+slice_size].copy_(manual_stochastic_round_to_float8(value[i:i+slice_size], dtype, generator=generator))
64+
return output
6165

6266
return value.to(dtype=dtype)

comfy/lora.py

Lines changed: 27 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -324,6 +324,7 @@ def model_lora_keys_unet(model, key_map={}):
324324
to = diffusers_keys[k]
325325
key_map["transformer.{}".format(k[:-len(".weight")])] = to #simpletrainer and probably regular diffusers flux lora format
326326
key_map["lycoris_{}".format(k[:-len(".weight")].replace(".", "_"))] = to #simpletrainer lycoris
327+
key_map["lora_transformer_{}".format(k[:-len(".weight")].replace(".", "_"))] = to #onetrainer
327328

328329
return key_map
329330

@@ -527,20 +528,40 @@ def calculate_weight(patches, weight, key, intermediate_dtype=torch.float32):
527528
except Exception as e:
528529
logging.error("ERROR {} {} {}".format(patch_type, key, e))
529530
elif patch_type == "glora":
530-
if v[4] is not None:
531-
alpha = v[4] / v[0].shape[0]
532-
else:
533-
alpha = 1.0
534-
535531
dora_scale = v[5]
536532

533+
old_glora = False
534+
if v[3].shape[1] == v[2].shape[0] == v[0].shape[0] == v[1].shape[1]:
535+
rank = v[0].shape[0]
536+
old_glora = True
537+
538+
if v[3].shape[0] == v[2].shape[1] == v[0].shape[1] == v[1].shape[0]:
539+
if old_glora and v[1].shape[0] == weight.shape[0] and weight.shape[0] == weight.shape[1]:
540+
pass
541+
else:
542+
old_glora = False
543+
rank = v[1].shape[0]
544+
537545
a1 = comfy.model_management.cast_to_device(v[0].flatten(start_dim=1), weight.device, intermediate_dtype)
538546
a2 = comfy.model_management.cast_to_device(v[1].flatten(start_dim=1), weight.device, intermediate_dtype)
539547
b1 = comfy.model_management.cast_to_device(v[2].flatten(start_dim=1), weight.device, intermediate_dtype)
540548
b2 = comfy.model_management.cast_to_device(v[3].flatten(start_dim=1), weight.device, intermediate_dtype)
541549

550+
if v[4] is not None:
551+
alpha = v[4] / rank
552+
else:
553+
alpha = 1.0
554+
542555
try:
543-
lora_diff = (torch.mm(b2, b1) + torch.mm(torch.mm(weight.flatten(start_dim=1).to(dtype=intermediate_dtype), a2), a1)).reshape(weight.shape)
556+
if old_glora:
557+
lora_diff = (torch.mm(b2, b1) + torch.mm(torch.mm(weight.flatten(start_dim=1).to(dtype=intermediate_dtype), a2), a1)).reshape(weight.shape) #old lycoris glora
558+
else:
559+
if weight.dim() > 2:
560+
lora_diff = torch.einsum("o i ..., i j -> o j ...", torch.einsum("o i ..., i j -> o j ...", weight.to(dtype=intermediate_dtype), a1), a2).reshape(weight.shape)
561+
else:
562+
lora_diff = torch.mm(torch.mm(weight.to(dtype=intermediate_dtype), a1), a2).reshape(weight.shape)
563+
lora_diff += torch.mm(b1, b2).reshape(weight.shape)
564+
544565
if dora_scale is not None:
545566
weight = function(weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediate_dtype))
546567
else:

latent_preview.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
import comfy.utils
1010
import logging
1111

12-
MAX_PREVIEW_RESOLUTION = 512
12+
MAX_PREVIEW_RESOLUTION = args.preview_size
1313

1414
def preview_to_image(latent_image):
1515
latents_ubyte = (((latent_image + 1.0) / 2.0).clamp(0, 1) # change scale from -1..1 to 0..1

0 commit comments

Comments
 (0)