Skip to content

Commit 12b10bf

Browse files
authored
Merge branch 'comfyanonymous:master' into master
2 parents be1aed6 + dafbe32 commit 12b10bf

7 files changed

Lines changed: 78 additions & 7 deletions

File tree

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
.\python_embeded\python.exe -s ComfyUI\main.py --windows-standalone-build --fast
2+
pause

.github/workflows/windows_release_nightly_pytorch.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,7 @@ jobs:
6767
mkdir update
6868
cp -r ComfyUI/.ci/update_windows/* ./update/
6969
cp -r ComfyUI/.ci/windows_base_files/* ./
70+
cp -r ComfyUI/.ci/windows_nightly_base_files/* ./
7071
7172
echo "call update_comfyui.bat nopause
7273
..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/cu${{ inputs.cu }} -r ../ComfyUI/requirements.txt pygit2

comfy/model_management.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -987,16 +987,13 @@ def should_use_fp16(device=None, model_params=0, prioritize_performance=True, ma
987987
if props.major < 6:
988988
return False
989989

990-
fp16_works = False
991-
#FP16 is confirmed working on a 1080 (GP104) but it's a bit slower than FP32 so it should only be enabled
992-
#when the model doesn't actually fit on the card
993-
#TODO: actually test if GP106 and others have the same type of behavior
990+
#FP16 is confirmed working on a 1080 (GP104) and on latest pytorch actually seems faster than fp32
994991
nvidia_10_series = ["1080", "1070", "titan x", "p3000", "p3200", "p4000", "p4200", "p5000", "p5200", "p6000", "1060", "1050", "p40", "p100", "p6", "p4"]
995992
for x in nvidia_10_series:
996993
if x in props.name.lower():
997-
fp16_works = True
994+
return True
998995

999-
if fp16_works or manual_cast:
996+
if manual_cast:
1000997
free_model_memory = maximum_vram_for_weights(device)
1001998
if (not prioritize_performance) or model_params * 4 > free_model_memory:
1002999
return True

execution.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,8 @@ def get(self, node_id):
4747
self.is_changed[node_id] = node["is_changed"]
4848
return self.is_changed[node_id]
4949

50-
input_data_all, _ = get_input_data(node["inputs"], class_def, node_id, self.outputs_cache)
50+
# Intentionally do not use cached outputs here. We only want constants in IS_CHANGED
51+
input_data_all, _ = get_input_data(node["inputs"], class_def, node_id, None)
5152
try:
5253
is_changed = _map_node_over_list(class_def, input_data_all, "IS_CHANGED")
5354
node["is_changed"] = [None if isinstance(x, ExecutionBlocker) else x for x in is_changed]

tests/inference/test_execution.py

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -459,3 +459,22 @@ def test_output_reuse(self, client: ComfyClient, builder: GraphBuilder):
459459
assert len(images1) == 1, "Should have 1 image"
460460
assert len(images2) == 1, "Should have 1 image"
461461

462+
463+
# This tests that only constant outputs are used in the call to `IS_CHANGED`
464+
def test_is_changed_with_outputs(self, client: ComfyClient, builder: GraphBuilder):
465+
g = builder
466+
input1 = g.node("StubConstantImage", value=0.5, height=512, width=512, batch_size=1)
467+
test_node = g.node("TestIsChangedWithConstants", image=input1.out(0), value=0.5)
468+
469+
output = g.node("PreviewImage", images=test_node.out(0))
470+
471+
result = client.run(g)
472+
images = result.get_images(output)
473+
assert len(images) == 1, "Should have 1 image"
474+
assert numpy.array(images[0]).min() == 63 and numpy.array(images[0]).max() == 63, "Image should have value 0.25"
475+
476+
result = client.run(g)
477+
images = result.get_images(output)
478+
assert len(images) == 1, "Should have 1 image"
479+
assert numpy.array(images[0]).min() == 63 and numpy.array(images[0]).max() == 63, "Image should have value 0.25"
480+
assert not result.did_run(test_node), "The execution should have been cached"

tests/inference/testing_nodes/testing-pack/specific_tests.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,31 @@ def IS_CHANGED(cls, should_change=False, *args, **kwargs):
9595
else:
9696
return False
9797

98+
class TestIsChangedWithConstants:
99+
@classmethod
100+
def INPUT_TYPES(cls):
101+
return {
102+
"required": {
103+
"image": ("IMAGE",),
104+
"value": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0}),
105+
},
106+
}
107+
108+
RETURN_TYPES = ("IMAGE",)
109+
FUNCTION = "custom_is_changed"
110+
111+
CATEGORY = "Testing/Nodes"
112+
113+
def custom_is_changed(self, image, value):
114+
return (image * value,)
115+
116+
@classmethod
117+
def IS_CHANGED(cls, image, value):
118+
if image is None:
119+
return value
120+
else:
121+
return image.mean().item() * value
122+
98123
class TestCustomValidation1:
99124
@classmethod
100125
def INPUT_TYPES(cls):
@@ -312,6 +337,7 @@ def mixed_expansion_returns(self, input1):
312337
"TestLazyMixImages": TestLazyMixImages,
313338
"TestVariadicAverage": TestVariadicAverage,
314339
"TestCustomIsChanged": TestCustomIsChanged,
340+
"TestIsChangedWithConstants": TestIsChangedWithConstants,
315341
"TestCustomValidation1": TestCustomValidation1,
316342
"TestCustomValidation2": TestCustomValidation2,
317343
"TestCustomValidation3": TestCustomValidation3,
@@ -325,6 +351,7 @@ def mixed_expansion_returns(self, input1):
325351
"TestLazyMixImages": "Lazy Mix Images",
326352
"TestVariadicAverage": "Variadic Average",
327353
"TestCustomIsChanged": "Custom IsChanged",
354+
"TestIsChangedWithConstants": "IsChanged With Constants",
328355
"TestCustomValidation1": "Custom Validation 1",
329356
"TestCustomValidation2": "Custom Validation 2",
330357
"TestCustomValidation3": "Custom Validation 3",

tests/inference/testing_nodes/testing-pack/stubs.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,28 @@ def stub_image(self, content, height, width, batch_size):
2828
elif content == "NOISE":
2929
return (torch.rand(batch_size, height, width, 3),)
3030

31+
class StubConstantImage:
32+
def __init__(self):
33+
pass
34+
@classmethod
35+
def INPUT_TYPES(cls):
36+
return {
37+
"required": {
38+
"value": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
39+
"height": ("INT", {"default": 512, "min": 1, "max": 1024 ** 3, "step": 1}),
40+
"width": ("INT", {"default": 512, "min": 1, "max": 4096 ** 3, "step": 1}),
41+
"batch_size": ("INT", {"default": 1, "min": 1, "max": 1024 ** 3, "step": 1}),
42+
},
43+
}
44+
45+
RETURN_TYPES = ("IMAGE",)
46+
FUNCTION = "stub_constant_image"
47+
48+
CATEGORY = "Testing/Stub Nodes"
49+
50+
def stub_constant_image(self, value, height, width, batch_size):
51+
return (torch.ones(batch_size, height, width, 3) * value,)
52+
3153
class StubMask:
3254
def __init__(self):
3355
pass
@@ -93,12 +115,14 @@ def stub_float(self, value):
93115

94116
TEST_STUB_NODE_CLASS_MAPPINGS = {
95117
"StubImage": StubImage,
118+
"StubConstantImage": StubConstantImage,
96119
"StubMask": StubMask,
97120
"StubInt": StubInt,
98121
"StubFloat": StubFloat,
99122
}
100123
TEST_STUB_NODE_DISPLAY_NAME_MAPPINGS = {
101124
"StubImage": "Stub Image",
125+
"StubConstantImage": "Stub Constant Image",
102126
"StubMask": "Stub Mask",
103127
"StubInt": "Stub Int",
104128
"StubFloat": "Stub Float",

0 commit comments

Comments
 (0)