Release on 09.11.24
This commit is contained in:
29
tests/README.md
Normal file
29
tests/README.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# Automated Testing
|
||||
|
||||
## Running tests locally
|
||||
|
||||
Additional requirements for running tests:
|
||||
```
|
||||
pip install pytest
|
||||
pip install websocket-client==1.6.1
|
||||
opencv-python==4.6.0.66
|
||||
scikit-image==0.21.0
|
||||
```
|
||||
Run inference tests:
|
||||
```
|
||||
pytest tests/inference
|
||||
```
|
||||
|
||||
## Quality regression test
|
||||
Compares images in 2 directories to ensure they are the same
|
||||
|
||||
1) Run an inference test to save a directory of "ground truth" images
|
||||
```
|
||||
pytest tests/inference --output_dir tests/inference/baseline
|
||||
```
|
||||
2) Make code edits
|
||||
|
||||
3) Run inference and quality comparison tests
|
||||
```
|
||||
pytest
|
||||
```
|
||||
0
tests/__init__.py
Normal file
0
tests/__init__.py
Normal file
41
tests/compare/conftest.py
Normal file
41
tests/compare/conftest.py
Normal file
@@ -0,0 +1,41 @@
|
||||
import os
|
||||
import pytest
|
||||
|
||||
# Command line arguments for pytest
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption('--baseline_dir', action="store", default='tests/inference/baseline', help='Directory for ground-truth images')
|
||||
parser.addoption('--test_dir', action="store", default='tests/inference/samples', help='Directory for images to test')
|
||||
parser.addoption('--metrics_file', action="store", default='tests/metrics.md', help='Output file for metrics')
|
||||
parser.addoption('--img_output_dir', action="store", default='tests/compare/samples', help='Output directory for diff metric images')
|
||||
|
||||
# This initializes args at the beginning of the test session
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def args_pytest(pytestconfig):
|
||||
args = {}
|
||||
args['baseline_dir'] = pytestconfig.getoption('baseline_dir')
|
||||
args['test_dir'] = pytestconfig.getoption('test_dir')
|
||||
args['metrics_file'] = pytestconfig.getoption('metrics_file')
|
||||
args['img_output_dir'] = pytestconfig.getoption('img_output_dir')
|
||||
|
||||
# Initialize metrics file
|
||||
with open(args['metrics_file'], 'a') as f:
|
||||
# if file is empty, write header
|
||||
if os.stat(args['metrics_file']).st_size == 0:
|
||||
f.write("| date | run | file | status | value | \n")
|
||||
f.write("| --- | --- | --- | --- | --- | \n")
|
||||
|
||||
return args
|
||||
|
||||
|
||||
def gather_file_basenames(directory: str):
|
||||
files = []
|
||||
for file in os.listdir(directory):
|
||||
if file.endswith(".png"):
|
||||
files.append(file)
|
||||
return files
|
||||
|
||||
# Creates the list of baseline file names to use as a fixture
|
||||
def pytest_generate_tests(metafunc):
|
||||
if "baseline_fname" in metafunc.fixturenames:
|
||||
baseline_fnames = gather_file_basenames(metafunc.config.getoption("baseline_dir"))
|
||||
metafunc.parametrize("baseline_fname", baseline_fnames)
|
||||
195
tests/compare/test_quality.py
Normal file
195
tests/compare/test_quality.py
Normal file
@@ -0,0 +1,195 @@
|
||||
import datetime
|
||||
import numpy as np
|
||||
import os
|
||||
from PIL import Image
|
||||
import pytest
|
||||
from pytest import fixture
|
||||
from typing import Tuple, List
|
||||
|
||||
from cv2 import imread, cvtColor, COLOR_BGR2RGB
|
||||
from skimage.metrics import structural_similarity as ssim
|
||||
|
||||
|
||||
"""
|
||||
This test suite compares images in 2 directories by file name
|
||||
The directories are specified by the command line arguments --baseline_dir and --test_dir
|
||||
|
||||
"""
|
||||
# ssim: Structural Similarity Index
|
||||
# Returns a tuple of (ssim, diff_image)
|
||||
def ssim_score(img0: np.ndarray, img1: np.ndarray) -> Tuple[float, np.ndarray]:
|
||||
score, diff = ssim(img0, img1, channel_axis=-1, full=True)
|
||||
# rescale the difference image to 0-255 range
|
||||
diff = (diff * 255).astype("uint8")
|
||||
return score, diff
|
||||
|
||||
# Metrics must return a tuple of (score, diff_image)
|
||||
METRICS = {"ssim": ssim_score}
|
||||
METRICS_PASS_THRESHOLD = {"ssim": 0.95}
|
||||
|
||||
|
||||
class TestCompareImageMetrics:
|
||||
@fixture(scope="class")
|
||||
def test_file_names(self, args_pytest):
|
||||
test_dir = args_pytest['test_dir']
|
||||
fnames = self.gather_file_basenames(test_dir)
|
||||
yield fnames
|
||||
del fnames
|
||||
|
||||
@fixture(scope="class", autouse=True)
|
||||
def teardown(self, args_pytest):
|
||||
yield
|
||||
# Runs after all tests are complete
|
||||
# Aggregate output files into a grid of images
|
||||
baseline_dir = args_pytest['baseline_dir']
|
||||
test_dir = args_pytest['test_dir']
|
||||
img_output_dir = args_pytest['img_output_dir']
|
||||
metrics_file = args_pytest['metrics_file']
|
||||
|
||||
grid_dir = os.path.join(img_output_dir, "grid")
|
||||
os.makedirs(grid_dir, exist_ok=True)
|
||||
|
||||
for metric_dir in METRICS.keys():
|
||||
metric_path = os.path.join(img_output_dir, metric_dir)
|
||||
for file in os.listdir(metric_path):
|
||||
if file.endswith(".png"):
|
||||
score = self.lookup_score_from_fname(file, metrics_file)
|
||||
image_file_list = []
|
||||
image_file_list.append([
|
||||
os.path.join(baseline_dir, file),
|
||||
os.path.join(test_dir, file),
|
||||
os.path.join(metric_path, file)
|
||||
])
|
||||
# Create grid
|
||||
image_list = [[Image.open(file) for file in files] for files in image_file_list]
|
||||
grid = self.image_grid(image_list)
|
||||
grid.save(os.path.join(grid_dir, f"{metric_dir}_{score:.3f}_{file}"))
|
||||
|
||||
# Tests run for each baseline file name
|
||||
@fixture()
|
||||
def fname(self, baseline_fname):
|
||||
yield baseline_fname
|
||||
del baseline_fname
|
||||
|
||||
def test_directories_not_empty(self, args_pytest):
|
||||
baseline_dir = args_pytest['baseline_dir']
|
||||
test_dir = args_pytest['test_dir']
|
||||
assert len(os.listdir(baseline_dir)) != 0, f"Baseline directory {baseline_dir} is empty"
|
||||
assert len(os.listdir(test_dir)) != 0, f"Test directory {test_dir} is empty"
|
||||
|
||||
def test_dir_has_all_matching_metadata(self, fname, test_file_names, args_pytest):
|
||||
# Check that all files in baseline_dir have a file in test_dir with matching metadata
|
||||
baseline_file_path = os.path.join(args_pytest['baseline_dir'], fname)
|
||||
file_paths = [os.path.join(args_pytest['test_dir'], f) for f in test_file_names]
|
||||
file_match = self.find_file_match(baseline_file_path, file_paths)
|
||||
assert file_match is not None, f"Could not find a file in {args_pytest['test_dir']} with matching metadata to {baseline_file_path}"
|
||||
|
||||
# For a baseline image file, finds the corresponding file name in test_dir and
|
||||
# compares the images using the metrics in METRICS
|
||||
@pytest.mark.parametrize("metric", METRICS.keys())
|
||||
def test_pipeline_compare(
|
||||
self,
|
||||
args_pytest,
|
||||
fname,
|
||||
test_file_names,
|
||||
metric,
|
||||
):
|
||||
baseline_dir = args_pytest['baseline_dir']
|
||||
test_dir = args_pytest['test_dir']
|
||||
metrics_output_file = args_pytest['metrics_file']
|
||||
img_output_dir = args_pytest['img_output_dir']
|
||||
|
||||
baseline_file_path = os.path.join(baseline_dir, fname)
|
||||
|
||||
# Find file match
|
||||
file_paths = [os.path.join(test_dir, f) for f in test_file_names]
|
||||
test_file = self.find_file_match(baseline_file_path, file_paths)
|
||||
|
||||
# Run metrics
|
||||
sample_baseline = self.read_img(baseline_file_path)
|
||||
sample_secondary = self.read_img(test_file)
|
||||
|
||||
score, metric_img = METRICS[metric](sample_baseline, sample_secondary)
|
||||
metric_status = score > METRICS_PASS_THRESHOLD[metric]
|
||||
|
||||
# Save metric values
|
||||
with open(metrics_output_file, 'a') as f:
|
||||
run_info = os.path.splitext(fname)[0]
|
||||
metric_status_str = "PASS ✅" if metric_status else "FAIL ❌"
|
||||
date_str = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
f.write(f"| {date_str} | {run_info} | {metric} | {metric_status_str} | {score} | \n")
|
||||
|
||||
# Save metric image
|
||||
metric_img_dir = os.path.join(img_output_dir, metric)
|
||||
os.makedirs(metric_img_dir, exist_ok=True)
|
||||
output_filename = f'{fname}'
|
||||
Image.fromarray(metric_img).save(os.path.join(metric_img_dir, output_filename))
|
||||
|
||||
assert score > METRICS_PASS_THRESHOLD[metric]
|
||||
|
||||
def read_img(self, filename: str) -> np.ndarray:
|
||||
cvImg = imread(filename)
|
||||
cvImg = cvtColor(cvImg, COLOR_BGR2RGB)
|
||||
return cvImg
|
||||
|
||||
def image_grid(self, img_list: list[list[Image.Image]]):
|
||||
# imgs is a 2D list of images
|
||||
# Assumes the input images are a rectangular grid of equal sized images
|
||||
rows = len(img_list)
|
||||
cols = len(img_list[0])
|
||||
|
||||
w, h = img_list[0][0].size
|
||||
grid = Image.new('RGB', size=(cols*w, rows*h))
|
||||
|
||||
for i, row in enumerate(img_list):
|
||||
for j, img in enumerate(row):
|
||||
grid.paste(img, box=(j*w, i*h))
|
||||
return grid
|
||||
|
||||
def lookup_score_from_fname(self,
|
||||
fname: str,
|
||||
metrics_output_file: str
|
||||
) -> float:
|
||||
fname_basestr = os.path.splitext(fname)[0]
|
||||
with open(metrics_output_file, 'r') as f:
|
||||
for line in f:
|
||||
if fname_basestr in line:
|
||||
score = float(line.split('|')[5])
|
||||
return score
|
||||
raise ValueError(f"Could not find score for {fname} in {metrics_output_file}")
|
||||
|
||||
def gather_file_basenames(self, directory: str):
|
||||
files = []
|
||||
for file in os.listdir(directory):
|
||||
if file.endswith(".png"):
|
||||
files.append(file)
|
||||
return files
|
||||
|
||||
def read_file_prompt(self, fname:str) -> str:
|
||||
# Read prompt from image file metadata
|
||||
img = Image.open(fname)
|
||||
img.load()
|
||||
return img.info['prompt']
|
||||
|
||||
def find_file_match(self, baseline_file: str, file_paths: List[str]):
|
||||
# Find a file in file_paths with matching metadata to baseline_file
|
||||
baseline_prompt = self.read_file_prompt(baseline_file)
|
||||
|
||||
# Do not match empty prompts
|
||||
if baseline_prompt is None or baseline_prompt == "":
|
||||
return None
|
||||
|
||||
# Find file match
|
||||
# Reorder test_file_names so that the file with matching name is first
|
||||
# This is an optimization because matching file names are more likely
|
||||
# to have matching metadata if they were generated with the same script
|
||||
basename = os.path.basename(baseline_file)
|
||||
file_path_basenames = [os.path.basename(f) for f in file_paths]
|
||||
if basename in file_path_basenames:
|
||||
match_index = file_path_basenames.index(basename)
|
||||
file_paths.insert(0, file_paths.pop(match_index))
|
||||
|
||||
for f in file_paths:
|
||||
test_file_prompt = self.read_file_prompt(f)
|
||||
if baseline_prompt == test_file_prompt:
|
||||
return f
|
||||
36
tests/conftest.py
Normal file
36
tests/conftest.py
Normal file
@@ -0,0 +1,36 @@
|
||||
import os
|
||||
import pytest
|
||||
|
||||
# Command line arguments for pytest
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption('--output_dir', action="store", default='tests/inference/samples', help='Output directory for generated images')
|
||||
parser.addoption("--listen", type=str, default="127.0.0.1", metavar="IP", nargs="?", const="0.0.0.0", help="Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)")
|
||||
parser.addoption("--port", type=int, default=8188, help="Set the listen port.")
|
||||
|
||||
# This initializes args at the beginning of the test session
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def args_pytest(pytestconfig):
|
||||
args = {}
|
||||
args['output_dir'] = pytestconfig.getoption('output_dir')
|
||||
args['listen'] = pytestconfig.getoption('listen')
|
||||
args['port'] = pytestconfig.getoption('port')
|
||||
|
||||
os.makedirs(args['output_dir'], exist_ok=True)
|
||||
|
||||
return args
|
||||
|
||||
def pytest_collection_modifyitems(items):
|
||||
# Modifies items so tests run in the correct order
|
||||
|
||||
LAST_TESTS = ['test_quality']
|
||||
|
||||
# Move the last items to the end
|
||||
last_items = []
|
||||
for test_name in LAST_TESTS:
|
||||
for item in items.copy():
|
||||
print(item.module.__name__, item)
|
||||
if item.module.__name__ == test_name:
|
||||
last_items.append(item)
|
||||
items.remove(item)
|
||||
|
||||
items.extend(last_items)
|
||||
0
tests/inference/__init__.py
Normal file
0
tests/inference/__init__.py
Normal file
4
tests/inference/extra_model_paths.yaml
Normal file
4
tests/inference/extra_model_paths.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
# Config for testing nodes
|
||||
testing:
|
||||
custom_nodes: tests/inference/testing_nodes
|
||||
|
||||
144
tests/inference/graphs/default_graph_sdxl1_0.json
Normal file
144
tests/inference/graphs/default_graph_sdxl1_0.json
Normal file
@@ -0,0 +1,144 @@
|
||||
{
|
||||
"4": {
|
||||
"inputs": {
|
||||
"ckpt_name": "sd_xl_base_1.0.safetensors"
|
||||
},
|
||||
"class_type": "CheckpointLoaderSimple"
|
||||
},
|
||||
"5": {
|
||||
"inputs": {
|
||||
"width": 1024,
|
||||
"height": 1024,
|
||||
"batch_size": 1
|
||||
},
|
||||
"class_type": "EmptyLatentImage"
|
||||
},
|
||||
"6": {
|
||||
"inputs": {
|
||||
"text": "a photo of a cat",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode"
|
||||
},
|
||||
"10": {
|
||||
"inputs": {
|
||||
"add_noise": "enable",
|
||||
"noise_seed": 42,
|
||||
"steps": 20,
|
||||
"cfg": 7.5,
|
||||
"sampler_name": "euler",
|
||||
"scheduler": "normal",
|
||||
"start_at_step": 0,
|
||||
"end_at_step": 32,
|
||||
"return_with_leftover_noise": "enable",
|
||||
"model": [
|
||||
"4",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"6",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"15",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"5",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "KSamplerAdvanced"
|
||||
},
|
||||
"12": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"14",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"4",
|
||||
2
|
||||
]
|
||||
},
|
||||
"class_type": "VAEDecode"
|
||||
},
|
||||
"13": {
|
||||
"inputs": {
|
||||
"filename_prefix": "test_inference",
|
||||
"images": [
|
||||
"12",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SaveImage"
|
||||
},
|
||||
"14": {
|
||||
"inputs": {
|
||||
"add_noise": "disable",
|
||||
"noise_seed": 42,
|
||||
"steps": 20,
|
||||
"cfg": 7.5,
|
||||
"sampler_name": "euler",
|
||||
"scheduler": "normal",
|
||||
"start_at_step": 32,
|
||||
"end_at_step": 10000,
|
||||
"return_with_leftover_noise": "disable",
|
||||
"model": [
|
||||
"16",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"17",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"20",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"10",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "KSamplerAdvanced"
|
||||
},
|
||||
"15": {
|
||||
"inputs": {
|
||||
"conditioning": [
|
||||
"6",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ConditioningZeroOut"
|
||||
},
|
||||
"16": {
|
||||
"inputs": {
|
||||
"ckpt_name": "sd_xl_refiner_1.0.safetensors"
|
||||
},
|
||||
"class_type": "CheckpointLoaderSimple"
|
||||
},
|
||||
"17": {
|
||||
"inputs": {
|
||||
"text": "a photo of a cat",
|
||||
"clip": [
|
||||
"16",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode"
|
||||
},
|
||||
"20": {
|
||||
"inputs": {
|
||||
"text": "",
|
||||
"clip": [
|
||||
"16",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode"
|
||||
}
|
||||
}
|
||||
524
tests/inference/test_execution.py
Normal file
524
tests/inference/test_execution.py
Normal file
@@ -0,0 +1,524 @@
|
||||
from io import BytesIO
|
||||
import numpy
|
||||
from PIL import Image
|
||||
import pytest
|
||||
from pytest import fixture
|
||||
import time
|
||||
import torch
|
||||
from typing import Union, Dict
|
||||
import json
|
||||
import subprocess
|
||||
import websocket #NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
|
||||
import uuid
|
||||
import urllib.request
|
||||
import urllib.parse
|
||||
import urllib.error
|
||||
from comfy_execution.graph_utils import GraphBuilder, Node
|
||||
|
||||
class RunResult:
|
||||
def __init__(self, prompt_id: str):
|
||||
self.outputs: Dict[str,Dict] = {}
|
||||
self.runs: Dict[str,bool] = {}
|
||||
self.prompt_id: str = prompt_id
|
||||
|
||||
def get_output(self, node: Node):
|
||||
return self.outputs.get(node.id, None)
|
||||
|
||||
def did_run(self, node: Node):
|
||||
return self.runs.get(node.id, False)
|
||||
|
||||
def get_images(self, node: Node):
|
||||
output = self.get_output(node)
|
||||
if output is None:
|
||||
return []
|
||||
return output.get('image_objects', [])
|
||||
|
||||
def get_prompt_id(self):
|
||||
return self.prompt_id
|
||||
|
||||
class ComfyClient:
|
||||
def __init__(self):
|
||||
self.test_name = ""
|
||||
|
||||
def connect(self,
|
||||
listen:str = '127.0.0.1',
|
||||
port:Union[str,int] = 8188,
|
||||
client_id: str = str(uuid.uuid4())
|
||||
):
|
||||
self.client_id = client_id
|
||||
self.server_address = f"{listen}:{port}"
|
||||
ws = websocket.WebSocket()
|
||||
ws.connect("ws://{}/ws?clientId={}".format(self.server_address, self.client_id))
|
||||
self.ws = ws
|
||||
|
||||
def queue_prompt(self, prompt):
|
||||
p = {"prompt": prompt, "client_id": self.client_id}
|
||||
data = json.dumps(p).encode('utf-8')
|
||||
req = urllib.request.Request("http://{}/prompt".format(self.server_address), data=data)
|
||||
return json.loads(urllib.request.urlopen(req).read())
|
||||
|
||||
def get_image(self, filename, subfolder, folder_type):
|
||||
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
|
||||
url_values = urllib.parse.urlencode(data)
|
||||
with urllib.request.urlopen("http://{}/view?{}".format(self.server_address, url_values)) as response:
|
||||
return response.read()
|
||||
|
||||
def get_history(self, prompt_id):
|
||||
with urllib.request.urlopen("http://{}/history/{}".format(self.server_address, prompt_id)) as response:
|
||||
return json.loads(response.read())
|
||||
|
||||
def set_test_name(self, name):
|
||||
self.test_name = name
|
||||
|
||||
def run(self, graph):
|
||||
prompt = graph.finalize()
|
||||
for node in graph.nodes.values():
|
||||
if node.class_type == 'SaveImage':
|
||||
node.inputs['filename_prefix'] = self.test_name
|
||||
|
||||
prompt_id = self.queue_prompt(prompt)['prompt_id']
|
||||
result = RunResult(prompt_id)
|
||||
while True:
|
||||
out = self.ws.recv()
|
||||
if isinstance(out, str):
|
||||
message = json.loads(out)
|
||||
if message['type'] == 'executing':
|
||||
data = message['data']
|
||||
if data['prompt_id'] != prompt_id:
|
||||
continue
|
||||
if data['node'] is None:
|
||||
break
|
||||
result.runs[data['node']] = True
|
||||
elif message['type'] == 'execution_error':
|
||||
raise Exception(message['data'])
|
||||
elif message['type'] == 'execution_cached':
|
||||
pass # Probably want to store this off for testing
|
||||
|
||||
history = self.get_history(prompt_id)[prompt_id]
|
||||
for node_id in history['outputs']:
|
||||
node_output = history['outputs'][node_id]
|
||||
result.outputs[node_id] = node_output
|
||||
images_output = []
|
||||
if 'images' in node_output:
|
||||
for image in node_output['images']:
|
||||
image_data = self.get_image(image['filename'], image['subfolder'], image['type'])
|
||||
image_obj = Image.open(BytesIO(image_data))
|
||||
images_output.append(image_obj)
|
||||
node_output['image_objects'] = images_output
|
||||
|
||||
return result
|
||||
|
||||
#
|
||||
# Loop through these variables
|
||||
#
|
||||
@pytest.mark.execution
|
||||
class TestExecution:
|
||||
#
|
||||
# Initialize server and client
|
||||
#
|
||||
@fixture(scope="class", autouse=True, params=[
|
||||
# (use_lru, lru_size)
|
||||
(False, 0),
|
||||
(True, 0),
|
||||
(True, 100),
|
||||
])
|
||||
def _server(self, args_pytest, request):
|
||||
# Start server
|
||||
pargs = [
|
||||
'python','main.py',
|
||||
'--output-directory', args_pytest["output_dir"],
|
||||
'--listen', args_pytest["listen"],
|
||||
'--port', str(args_pytest["port"]),
|
||||
'--extra-model-paths-config', 'tests/inference/extra_model_paths.yaml',
|
||||
]
|
||||
use_lru, lru_size = request.param
|
||||
if use_lru:
|
||||
pargs += ['--cache-lru', str(lru_size)]
|
||||
print("Running server with args:", pargs)
|
||||
p = subprocess.Popen(pargs)
|
||||
yield
|
||||
p.kill()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
def start_client(self, listen:str, port:int):
|
||||
# Start client
|
||||
comfy_client = ComfyClient()
|
||||
# Connect to server (with retries)
|
||||
n_tries = 5
|
||||
for i in range(n_tries):
|
||||
time.sleep(4)
|
||||
try:
|
||||
comfy_client.connect(listen=listen, port=port)
|
||||
except ConnectionRefusedError as e:
|
||||
print(e)
|
||||
print(f"({i+1}/{n_tries}) Retrying...")
|
||||
else:
|
||||
break
|
||||
return comfy_client
|
||||
|
||||
@fixture(scope="class", autouse=True)
|
||||
def shared_client(self, args_pytest, _server):
|
||||
client = self.start_client(args_pytest["listen"], args_pytest["port"])
|
||||
yield client
|
||||
del client
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
@fixture
|
||||
def client(self, shared_client, request):
|
||||
shared_client.set_test_name(f"execution[{request.node.name}]")
|
||||
yield shared_client
|
||||
|
||||
@fixture
|
||||
def builder(self, request):
|
||||
yield GraphBuilder(prefix=request.node.name)
|
||||
|
||||
def test_lazy_input(self, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
input2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1)
|
||||
mask = g.node("StubMask", value=0.0, height=512, width=512, batch_size=1)
|
||||
|
||||
lazy_mix = g.node("TestLazyMixImages", image1=input1.out(0), image2=input2.out(0), mask=mask.out(0))
|
||||
output = g.node("SaveImage", images=lazy_mix.out(0))
|
||||
result = client.run(g)
|
||||
|
||||
result_image = result.get_images(output)[0]
|
||||
assert numpy.array(result_image).any() == 0, "Image should be black"
|
||||
assert result.did_run(input1)
|
||||
assert not result.did_run(input2)
|
||||
assert result.did_run(mask)
|
||||
assert result.did_run(lazy_mix)
|
||||
|
||||
def test_full_cache(self, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
input2 = g.node("StubImage", content="NOISE", height=512, width=512, batch_size=1)
|
||||
mask = g.node("StubMask", value=0.5, height=512, width=512, batch_size=1)
|
||||
|
||||
lazy_mix = g.node("TestLazyMixImages", image1=input1.out(0), image2=input2.out(0), mask=mask.out(0))
|
||||
g.node("SaveImage", images=lazy_mix.out(0))
|
||||
|
||||
client.run(g)
|
||||
result2 = client.run(g)
|
||||
for node_id, node in g.nodes.items():
|
||||
assert not result2.did_run(node), f"Node {node_id} ran, but should have been cached"
|
||||
|
||||
def test_partial_cache(self, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
input2 = g.node("StubImage", content="NOISE", height=512, width=512, batch_size=1)
|
||||
mask = g.node("StubMask", value=0.5, height=512, width=512, batch_size=1)
|
||||
|
||||
lazy_mix = g.node("TestLazyMixImages", image1=input1.out(0), image2=input2.out(0), mask=mask.out(0))
|
||||
g.node("SaveImage", images=lazy_mix.out(0))
|
||||
|
||||
client.run(g)
|
||||
mask.inputs['value'] = 0.4
|
||||
result2 = client.run(g)
|
||||
assert not result2.did_run(input1), "Input1 should have been cached"
|
||||
assert not result2.did_run(input2), "Input2 should have been cached"
|
||||
|
||||
def test_error(self, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
# Different size of the two images
|
||||
input2 = g.node("StubImage", content="NOISE", height=256, width=256, batch_size=1)
|
||||
mask = g.node("StubMask", value=0.5, height=512, width=512, batch_size=1)
|
||||
|
||||
lazy_mix = g.node("TestLazyMixImages", image1=input1.out(0), image2=input2.out(0), mask=mask.out(0))
|
||||
g.node("SaveImage", images=lazy_mix.out(0))
|
||||
|
||||
try:
|
||||
client.run(g)
|
||||
assert False, "Should have raised an error"
|
||||
except Exception as e:
|
||||
assert 'prompt_id' in e.args[0], f"Did not get back a proper error message: {e}"
|
||||
|
||||
@pytest.mark.parametrize("test_value, expect_error", [
|
||||
(5, True),
|
||||
("foo", True),
|
||||
(5.0, False),
|
||||
])
|
||||
def test_validation_error_literal(self, test_value, expect_error, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
validation1 = g.node("TestCustomValidation1", input1=test_value, input2=3.0)
|
||||
g.node("SaveImage", images=validation1.out(0))
|
||||
|
||||
if expect_error:
|
||||
with pytest.raises(urllib.error.HTTPError):
|
||||
client.run(g)
|
||||
else:
|
||||
client.run(g)
|
||||
|
||||
@pytest.mark.parametrize("test_type, test_value", [
|
||||
("StubInt", 5),
|
||||
("StubFloat", 5.0)
|
||||
])
|
||||
def test_validation_error_edge1(self, test_type, test_value, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
stub = g.node(test_type, value=test_value)
|
||||
validation1 = g.node("TestCustomValidation1", input1=stub.out(0), input2=3.0)
|
||||
g.node("SaveImage", images=validation1.out(0))
|
||||
|
||||
with pytest.raises(urllib.error.HTTPError):
|
||||
client.run(g)
|
||||
|
||||
@pytest.mark.parametrize("test_type, test_value, expect_error", [
|
||||
("StubInt", 5, True),
|
||||
("StubFloat", 5.0, False)
|
||||
])
|
||||
def test_validation_error_edge2(self, test_type, test_value, expect_error, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
stub = g.node(test_type, value=test_value)
|
||||
validation2 = g.node("TestCustomValidation2", input1=stub.out(0), input2=3.0)
|
||||
g.node("SaveImage", images=validation2.out(0))
|
||||
|
||||
if expect_error:
|
||||
with pytest.raises(urllib.error.HTTPError):
|
||||
client.run(g)
|
||||
else:
|
||||
client.run(g)
|
||||
|
||||
@pytest.mark.parametrize("test_type, test_value, expect_error", [
|
||||
("StubInt", 5, True),
|
||||
("StubFloat", 5.0, False)
|
||||
])
|
||||
def test_validation_error_edge3(self, test_type, test_value, expect_error, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
stub = g.node(test_type, value=test_value)
|
||||
validation3 = g.node("TestCustomValidation3", input1=stub.out(0), input2=3.0)
|
||||
g.node("SaveImage", images=validation3.out(0))
|
||||
|
||||
if expect_error:
|
||||
with pytest.raises(urllib.error.HTTPError):
|
||||
client.run(g)
|
||||
else:
|
||||
client.run(g)
|
||||
|
||||
@pytest.mark.parametrize("test_type, test_value, expect_error", [
|
||||
("StubInt", 5, True),
|
||||
("StubFloat", 5.0, False)
|
||||
])
|
||||
def test_validation_error_edge4(self, test_type, test_value, expect_error, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
stub = g.node(test_type, value=test_value)
|
||||
validation4 = g.node("TestCustomValidation4", input1=stub.out(0), input2=3.0)
|
||||
g.node("SaveImage", images=validation4.out(0))
|
||||
|
||||
if expect_error:
|
||||
with pytest.raises(urllib.error.HTTPError):
|
||||
client.run(g)
|
||||
else:
|
||||
client.run(g)
|
||||
|
||||
@pytest.mark.parametrize("test_value1, test_value2, expect_error", [
|
||||
(0.0, 0.5, False),
|
||||
(0.0, 5.0, False),
|
||||
(0.0, 7.0, True)
|
||||
])
|
||||
def test_validation_error_kwargs(self, test_value1, test_value2, expect_error, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
validation5 = g.node("TestCustomValidation5", input1=test_value1, input2=test_value2)
|
||||
g.node("SaveImage", images=validation5.out(0))
|
||||
|
||||
if expect_error:
|
||||
with pytest.raises(urllib.error.HTTPError):
|
||||
client.run(g)
|
||||
else:
|
||||
client.run(g)
|
||||
|
||||
def test_cycle_error(self, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
input2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1)
|
||||
mask = g.node("StubMask", value=0.5, height=512, width=512, batch_size=1)
|
||||
|
||||
lazy_mix1 = g.node("TestLazyMixImages", image1=input1.out(0), mask=mask.out(0))
|
||||
lazy_mix2 = g.node("TestLazyMixImages", image1=lazy_mix1.out(0), image2=input2.out(0), mask=mask.out(0))
|
||||
g.node("SaveImage", images=lazy_mix2.out(0))
|
||||
|
||||
# When the cycle exists on initial submission, it should raise a validation error
|
||||
with pytest.raises(urllib.error.HTTPError):
|
||||
client.run(g)
|
||||
|
||||
def test_dynamic_cycle_error(self, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
input2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1)
|
||||
generator = g.node("TestDynamicDependencyCycle", input1=input1.out(0), input2=input2.out(0))
|
||||
g.node("SaveImage", images=generator.out(0))
|
||||
|
||||
# When the cycle is in a graph that is generated dynamically, it should raise a runtime error
|
||||
try:
|
||||
client.run(g)
|
||||
assert False, "Should have raised an error"
|
||||
except Exception as e:
|
||||
assert 'prompt_id' in e.args[0], f"Did not get back a proper error message: {e}"
|
||||
assert e.args[0]['node_id'] == generator.id, "Error should have been on the generator node"
|
||||
|
||||
def test_missing_node_error(self, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
input2 = g.node("StubImage", id="removeme", content="WHITE", height=512, width=512, batch_size=1)
|
||||
input3 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1)
|
||||
mask = g.node("StubMask", value=0.5, height=512, width=512, batch_size=1)
|
||||
mix1 = g.node("TestLazyMixImages", image1=input1.out(0), image2=input2.out(0), mask=mask.out(0))
|
||||
mix2 = g.node("TestLazyMixImages", image1=input1.out(0), image2=input3.out(0), mask=mask.out(0))
|
||||
# We have multiple outputs. The first is invalid, but the second is valid
|
||||
g.node("SaveImage", images=mix1.out(0))
|
||||
g.node("SaveImage", images=mix2.out(0))
|
||||
g.remove_node("removeme")
|
||||
|
||||
client.run(g)
|
||||
|
||||
# Add back in the missing node to make sure the error doesn't break the server
|
||||
input2 = g.node("StubImage", id="removeme", content="WHITE", height=512, width=512, batch_size=1)
|
||||
client.run(g)
|
||||
|
||||
def test_custom_is_changed(self, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
# Creating the nodes in this specific order previously caused a bug
|
||||
save = g.node("SaveImage")
|
||||
is_changed = g.node("TestCustomIsChanged", should_change=False)
|
||||
input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
|
||||
save.set_input('images', is_changed.out(0))
|
||||
is_changed.set_input('image', input1.out(0))
|
||||
|
||||
result1 = client.run(g)
|
||||
result2 = client.run(g)
|
||||
is_changed.set_input('should_change', True)
|
||||
result3 = client.run(g)
|
||||
result4 = client.run(g)
|
||||
assert result1.did_run(is_changed), "is_changed should have been run"
|
||||
assert not result2.did_run(is_changed), "is_changed should have been cached"
|
||||
assert result3.did_run(is_changed), "is_changed should have been re-run"
|
||||
assert result4.did_run(is_changed), "is_changed should not have been cached"
|
||||
|
||||
def test_undeclared_inputs(self, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
input2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1)
|
||||
input3 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
input4 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
average = g.node("TestVariadicAverage", input1=input1.out(0), input2=input2.out(0), input3=input3.out(0), input4=input4.out(0))
|
||||
output = g.node("SaveImage", images=average.out(0))
|
||||
|
||||
result = client.run(g)
|
||||
result_image = result.get_images(output)[0]
|
||||
expected = 255 // 4
|
||||
assert numpy.array(result_image).min() == expected and numpy.array(result_image).max() == expected, "Image should be grey"
|
||||
|
||||
def test_for_loop(self, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
iterations = 4
|
||||
input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
input2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1)
|
||||
is_changed = g.node("TestCustomIsChanged", should_change=True, image=input2.out(0))
|
||||
for_open = g.node("TestForLoopOpen", remaining=iterations, initial_value1=is_changed.out(0))
|
||||
average = g.node("TestVariadicAverage", input1=input1.out(0), input2=for_open.out(2))
|
||||
for_close = g.node("TestForLoopClose", flow_control=for_open.out(0), initial_value1=average.out(0))
|
||||
output = g.node("SaveImage", images=for_close.out(0))
|
||||
|
||||
for iterations in range(1, 5):
|
||||
for_open.set_input('remaining', iterations)
|
||||
result = client.run(g)
|
||||
result_image = result.get_images(output)[0]
|
||||
expected = 255 // (2 ** iterations)
|
||||
assert numpy.array(result_image).min() == expected and numpy.array(result_image).max() == expected, "Image should be grey"
|
||||
assert result.did_run(is_changed)
|
||||
|
||||
def test_mixed_expansion_returns(self, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
val_list = g.node("TestMakeListNode", value1=0.1, value2=0.2, value3=0.3)
|
||||
mixed = g.node("TestMixedExpansionReturns", input1=val_list.out(0))
|
||||
output_dynamic = g.node("SaveImage", images=mixed.out(0))
|
||||
output_literal = g.node("SaveImage", images=mixed.out(1))
|
||||
|
||||
result = client.run(g)
|
||||
images_dynamic = result.get_images(output_dynamic)
|
||||
assert len(images_dynamic) == 3, "Should have 2 images"
|
||||
assert numpy.array(images_dynamic[0]).min() == 25 and numpy.array(images_dynamic[0]).max() == 25, "First image should be 0.1"
|
||||
assert numpy.array(images_dynamic[1]).min() == 51 and numpy.array(images_dynamic[1]).max() == 51, "Second image should be 0.2"
|
||||
assert numpy.array(images_dynamic[2]).min() == 76 and numpy.array(images_dynamic[2]).max() == 76, "Third image should be 0.3"
|
||||
|
||||
images_literal = result.get_images(output_literal)
|
||||
assert len(images_literal) == 3, "Should have 2 images"
|
||||
for i in range(3):
|
||||
assert numpy.array(images_literal[i]).min() == 255 and numpy.array(images_literal[i]).max() == 255, "All images should be white"
|
||||
|
||||
def test_mixed_lazy_results(self, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
val_list = g.node("TestMakeListNode", value1=0.0, value2=0.5, value3=1.0)
|
||||
mask = g.node("StubMask", value=val_list.out(0), height=512, width=512, batch_size=1)
|
||||
input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
input2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1)
|
||||
mix = g.node("TestLazyMixImages", image1=input1.out(0), image2=input2.out(0), mask=mask.out(0))
|
||||
rebatch = g.node("RebatchImages", images=mix.out(0), batch_size=3)
|
||||
output = g.node("SaveImage", images=rebatch.out(0))
|
||||
|
||||
result = client.run(g)
|
||||
images = result.get_images(output)
|
||||
assert len(images) == 3, "Should have 3 image"
|
||||
assert numpy.array(images[0]).min() == 0 and numpy.array(images[0]).max() == 0, "First image should be 0.0"
|
||||
assert numpy.array(images[1]).min() == 127 and numpy.array(images[1]).max() == 127, "Second image should be 0.5"
|
||||
assert numpy.array(images[2]).min() == 255 and numpy.array(images[2]).max() == 255, "Third image should be 1.0"
|
||||
|
||||
def test_output_reuse(self, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
|
||||
output1 = g.node("SaveImage", images=input1.out(0))
|
||||
output2 = g.node("SaveImage", images=input1.out(0))
|
||||
|
||||
result = client.run(g)
|
||||
images1 = result.get_images(output1)
|
||||
images2 = result.get_images(output2)
|
||||
assert len(images1) == 1, "Should have 1 image"
|
||||
assert len(images2) == 1, "Should have 1 image"
|
||||
|
||||
|
||||
# This tests that only constant outputs are used in the call to `IS_CHANGED`
|
||||
def test_is_changed_with_outputs(self, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
input1 = g.node("StubConstantImage", value=0.5, height=512, width=512, batch_size=1)
|
||||
test_node = g.node("TestIsChangedWithConstants", image=input1.out(0), value=0.5)
|
||||
|
||||
output = g.node("PreviewImage", images=test_node.out(0))
|
||||
|
||||
result = client.run(g)
|
||||
images = result.get_images(output)
|
||||
assert len(images) == 1, "Should have 1 image"
|
||||
assert numpy.array(images[0]).min() == 63 and numpy.array(images[0]).max() == 63, "Image should have value 0.25"
|
||||
|
||||
result = client.run(g)
|
||||
images = result.get_images(output)
|
||||
assert len(images) == 1, "Should have 1 image"
|
||||
assert numpy.array(images[0]).min() == 63 and numpy.array(images[0]).max() == 63, "Image should have value 0.25"
|
||||
assert not result.did_run(test_node), "The execution should have been cached"
|
||||
|
||||
# This tests that nodes with OUTPUT_IS_LIST function correctly when they receive an ExecutionBlocker
|
||||
# as input. We also test that when that list (containing an ExecutionBlocker) is passed to a node,
|
||||
# only that one entry in the list is blocked.
|
||||
def test_execution_block_list_output(self, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
image1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
image2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1)
|
||||
image3 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
image_list = g.node("TestMakeListNode", value1=image1.out(0), value2=image2.out(0), value3=image3.out(0))
|
||||
int1 = g.node("StubInt", value=1)
|
||||
int2 = g.node("StubInt", value=2)
|
||||
int3 = g.node("StubInt", value=3)
|
||||
int_list = g.node("TestMakeListNode", value1=int1.out(0), value2=int2.out(0), value3=int3.out(0))
|
||||
compare = g.node("TestIntConditions", a=int_list.out(0), b=2, operation="==")
|
||||
blocker = g.node("TestExecutionBlocker", input=image_list.out(0), block=compare.out(0), verbose=False)
|
||||
|
||||
list_output = g.node("TestMakeListNode", value1=blocker.out(0))
|
||||
output = g.node("PreviewImage", images=list_output.out(0))
|
||||
|
||||
result = client.run(g)
|
||||
assert result.did_run(output), "The execution should have run"
|
||||
images = result.get_images(output)
|
||||
assert len(images) == 2, "Should have 2 images"
|
||||
assert numpy.array(images[0]).min() == 0 and numpy.array(images[0]).max() == 0, "First image should be black"
|
||||
assert numpy.array(images[1]).min() == 0 and numpy.array(images[1]).max() == 0, "Second image should also be black"
|
||||
238
tests/inference/test_inference.py
Normal file
238
tests/inference/test_inference.py
Normal file
@@ -0,0 +1,238 @@
|
||||
from copy import deepcopy
|
||||
from io import BytesIO
|
||||
from urllib import request
|
||||
import numpy
|
||||
import os
|
||||
from PIL import Image
|
||||
import pytest
|
||||
from pytest import fixture
|
||||
import time
|
||||
import torch
|
||||
from typing import Union
|
||||
import json
|
||||
import subprocess
|
||||
import websocket #NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
|
||||
import uuid
|
||||
import urllib.request
|
||||
import urllib.parse
|
||||
|
||||
|
||||
from comfy.samplers import KSampler
|
||||
|
||||
"""
|
||||
These tests generate and save images through a range of parameters
|
||||
"""
|
||||
|
||||
class ComfyGraph:
|
||||
def __init__(self,
|
||||
graph: dict,
|
||||
sampler_nodes: list[str],
|
||||
):
|
||||
self.graph = graph
|
||||
self.sampler_nodes = sampler_nodes
|
||||
|
||||
def set_prompt(self, prompt, negative_prompt=None):
|
||||
# Sets the prompt for the sampler nodes (eg. base and refiner)
|
||||
for node in self.sampler_nodes:
|
||||
prompt_node = self.graph[node]['inputs']['positive'][0]
|
||||
self.graph[prompt_node]['inputs']['text'] = prompt
|
||||
if negative_prompt:
|
||||
negative_prompt_node = self.graph[node]['inputs']['negative'][0]
|
||||
self.graph[negative_prompt_node]['inputs']['text'] = negative_prompt
|
||||
|
||||
def set_sampler_name(self, sampler_name:str, ):
|
||||
# sets the sampler name for the sampler nodes (eg. base and refiner)
|
||||
for node in self.sampler_nodes:
|
||||
self.graph[node]['inputs']['sampler_name'] = sampler_name
|
||||
|
||||
def set_scheduler(self, scheduler:str):
|
||||
# sets the sampler name for the sampler nodes (eg. base and refiner)
|
||||
for node in self.sampler_nodes:
|
||||
self.graph[node]['inputs']['scheduler'] = scheduler
|
||||
|
||||
def set_filename_prefix(self, prefix:str):
|
||||
# sets the filename prefix for the save nodes
|
||||
for node in self.graph:
|
||||
if self.graph[node]['class_type'] == 'SaveImage':
|
||||
self.graph[node]['inputs']['filename_prefix'] = prefix
|
||||
|
||||
|
||||
class ComfyClient:
|
||||
# From examples/websockets_api_example.py
|
||||
|
||||
def connect(self,
|
||||
listen:str = '127.0.0.1',
|
||||
port:Union[str,int] = 8188,
|
||||
client_id: str = str(uuid.uuid4())
|
||||
):
|
||||
self.client_id = client_id
|
||||
self.server_address = f"{listen}:{port}"
|
||||
ws = websocket.WebSocket()
|
||||
ws.connect("ws://{}/ws?clientId={}".format(self.server_address, self.client_id))
|
||||
self.ws = ws
|
||||
|
||||
def queue_prompt(self, prompt):
|
||||
p = {"prompt": prompt, "client_id": self.client_id}
|
||||
data = json.dumps(p).encode('utf-8')
|
||||
req = urllib.request.Request("http://{}/prompt".format(self.server_address), data=data)
|
||||
return json.loads(urllib.request.urlopen(req).read())
|
||||
|
||||
def get_image(self, filename, subfolder, folder_type):
|
||||
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
|
||||
url_values = urllib.parse.urlencode(data)
|
||||
with urllib.request.urlopen("http://{}/view?{}".format(self.server_address, url_values)) as response:
|
||||
return response.read()
|
||||
|
||||
def get_history(self, prompt_id):
|
||||
with urllib.request.urlopen("http://{}/history/{}".format(self.server_address, prompt_id)) as response:
|
||||
return json.loads(response.read())
|
||||
|
||||
def get_images(self, graph, save=True):
|
||||
prompt = graph
|
||||
if not save:
|
||||
# Replace save nodes with preview nodes
|
||||
prompt_str = json.dumps(prompt)
|
||||
prompt_str = prompt_str.replace('SaveImage', 'PreviewImage')
|
||||
prompt = json.loads(prompt_str)
|
||||
|
||||
prompt_id = self.queue_prompt(prompt)['prompt_id']
|
||||
output_images = {}
|
||||
while True:
|
||||
out = self.ws.recv()
|
||||
if isinstance(out, str):
|
||||
message = json.loads(out)
|
||||
if message['type'] == 'executing':
|
||||
data = message['data']
|
||||
if data['node'] is None and data['prompt_id'] == prompt_id:
|
||||
break #Execution is done
|
||||
else:
|
||||
continue #previews are binary data
|
||||
|
||||
history = self.get_history(prompt_id)[prompt_id]
|
||||
for node_id in history['outputs']:
|
||||
node_output = history['outputs'][node_id]
|
||||
images_output = []
|
||||
if 'images' in node_output:
|
||||
for image in node_output['images']:
|
||||
image_data = self.get_image(image['filename'], image['subfolder'], image['type'])
|
||||
images_output.append(image_data)
|
||||
output_images[node_id] = images_output
|
||||
|
||||
return output_images
|
||||
|
||||
#
|
||||
# Initialize graphs
|
||||
#
|
||||
default_graph_file = 'tests/inference/graphs/default_graph_sdxl1_0.json'
|
||||
with open(default_graph_file, 'r') as file:
|
||||
default_graph = json.loads(file.read())
|
||||
DEFAULT_COMFY_GRAPH = ComfyGraph(graph=default_graph, sampler_nodes=['10','14'])
|
||||
DEFAULT_COMFY_GRAPH_ID = os.path.splitext(os.path.basename(default_graph_file))[0]
|
||||
|
||||
#
|
||||
# Loop through these variables
|
||||
#
|
||||
comfy_graph_list = [DEFAULT_COMFY_GRAPH]
|
||||
comfy_graph_ids = [DEFAULT_COMFY_GRAPH_ID]
|
||||
prompt_list = [
|
||||
'a painting of a cat',
|
||||
]
|
||||
|
||||
sampler_list = KSampler.SAMPLERS
|
||||
scheduler_list = KSampler.SCHEDULERS
|
||||
|
||||
@pytest.mark.inference
|
||||
@pytest.mark.parametrize("sampler", sampler_list)
|
||||
@pytest.mark.parametrize("scheduler", scheduler_list)
|
||||
@pytest.mark.parametrize("prompt", prompt_list)
|
||||
class TestInference:
|
||||
#
|
||||
# Initialize server and client
|
||||
#
|
||||
@fixture(scope="class", autouse=True)
|
||||
def _server(self, args_pytest):
|
||||
# Start server
|
||||
p = subprocess.Popen([
|
||||
'python','main.py',
|
||||
'--output-directory', args_pytest["output_dir"],
|
||||
'--listen', args_pytest["listen"],
|
||||
'--port', str(args_pytest["port"]),
|
||||
])
|
||||
yield
|
||||
p.kill()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
def start_client(self, listen:str, port:int):
|
||||
# Start client
|
||||
comfy_client = ComfyClient()
|
||||
# Connect to server (with retries)
|
||||
n_tries = 5
|
||||
for i in range(n_tries):
|
||||
time.sleep(4)
|
||||
try:
|
||||
comfy_client.connect(listen=listen, port=port)
|
||||
except ConnectionRefusedError as e:
|
||||
print(e)
|
||||
print(f"({i+1}/{n_tries}) Retrying...")
|
||||
else:
|
||||
break
|
||||
return comfy_client
|
||||
|
||||
#
|
||||
# Client and graph fixtures with server warmup
|
||||
#
|
||||
# Returns a "_client_graph", which is client-graph pair corresponding to an initialized server
|
||||
# The "graph" is the default graph
|
||||
@fixture(scope="class", params=comfy_graph_list, ids=comfy_graph_ids, autouse=True)
|
||||
def _client_graph(self, request, args_pytest, _server) -> (ComfyClient, ComfyGraph):
|
||||
comfy_graph = request.param
|
||||
|
||||
# Start client
|
||||
comfy_client = self.start_client(args_pytest["listen"], args_pytest["port"])
|
||||
|
||||
# Warm up pipeline
|
||||
comfy_client.get_images(graph=comfy_graph.graph, save=False)
|
||||
|
||||
yield comfy_client, comfy_graph
|
||||
del comfy_client
|
||||
del comfy_graph
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
@fixture
|
||||
def client(self, _client_graph):
|
||||
client = _client_graph[0]
|
||||
yield client
|
||||
|
||||
@fixture
|
||||
def comfy_graph(self, _client_graph):
|
||||
# avoid mutating the graph
|
||||
graph = deepcopy(_client_graph[1])
|
||||
yield graph
|
||||
|
||||
def test_comfy(
|
||||
self,
|
||||
client,
|
||||
comfy_graph,
|
||||
sampler,
|
||||
scheduler,
|
||||
prompt,
|
||||
request
|
||||
):
|
||||
test_info = request.node.name
|
||||
comfy_graph.set_filename_prefix(test_info)
|
||||
# Settings for comfy graph
|
||||
comfy_graph.set_sampler_name(sampler)
|
||||
comfy_graph.set_scheduler(scheduler)
|
||||
comfy_graph.set_prompt(prompt)
|
||||
|
||||
# Generate
|
||||
images = client.get_images(comfy_graph.graph)
|
||||
|
||||
assert len(images) != 0, "No images generated"
|
||||
# assert all images are not blank
|
||||
for images_output in images.values():
|
||||
for image_data in images_output:
|
||||
pil_image = Image.open(BytesIO(image_data))
|
||||
assert numpy.array(pil_image).any() != 0, "Image is blank"
|
||||
|
||||
|
||||
23
tests/inference/testing_nodes/testing-pack/__init__.py
Normal file
23
tests/inference/testing_nodes/testing-pack/__init__.py
Normal file
@@ -0,0 +1,23 @@
|
||||
from .specific_tests import TEST_NODE_CLASS_MAPPINGS, TEST_NODE_DISPLAY_NAME_MAPPINGS
|
||||
from .flow_control import FLOW_CONTROL_NODE_CLASS_MAPPINGS, FLOW_CONTROL_NODE_DISPLAY_NAME_MAPPINGS
|
||||
from .util import UTILITY_NODE_CLASS_MAPPINGS, UTILITY_NODE_DISPLAY_NAME_MAPPINGS
|
||||
from .conditions import CONDITION_NODE_CLASS_MAPPINGS, CONDITION_NODE_DISPLAY_NAME_MAPPINGS
|
||||
from .stubs import TEST_STUB_NODE_CLASS_MAPPINGS, TEST_STUB_NODE_DISPLAY_NAME_MAPPINGS
|
||||
|
||||
# NODE_CLASS_MAPPINGS = GENERAL_NODE_CLASS_MAPPINGS.update(COMPONENT_NODE_CLASS_MAPPINGS)
|
||||
# NODE_DISPLAY_NAME_MAPPINGS = GENERAL_NODE_DISPLAY_NAME_MAPPINGS.update(COMPONENT_NODE_DISPLAY_NAME_MAPPINGS)
|
||||
|
||||
NODE_CLASS_MAPPINGS = {}
|
||||
NODE_CLASS_MAPPINGS.update(TEST_NODE_CLASS_MAPPINGS)
|
||||
NODE_CLASS_MAPPINGS.update(FLOW_CONTROL_NODE_CLASS_MAPPINGS)
|
||||
NODE_CLASS_MAPPINGS.update(UTILITY_NODE_CLASS_MAPPINGS)
|
||||
NODE_CLASS_MAPPINGS.update(CONDITION_NODE_CLASS_MAPPINGS)
|
||||
NODE_CLASS_MAPPINGS.update(TEST_STUB_NODE_CLASS_MAPPINGS)
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {}
|
||||
NODE_DISPLAY_NAME_MAPPINGS.update(TEST_NODE_DISPLAY_NAME_MAPPINGS)
|
||||
NODE_DISPLAY_NAME_MAPPINGS.update(FLOW_CONTROL_NODE_DISPLAY_NAME_MAPPINGS)
|
||||
NODE_DISPLAY_NAME_MAPPINGS.update(UTILITY_NODE_DISPLAY_NAME_MAPPINGS)
|
||||
NODE_DISPLAY_NAME_MAPPINGS.update(CONDITION_NODE_DISPLAY_NAME_MAPPINGS)
|
||||
NODE_DISPLAY_NAME_MAPPINGS.update(TEST_STUB_NODE_DISPLAY_NAME_MAPPINGS)
|
||||
|
||||
194
tests/inference/testing_nodes/testing-pack/conditions.py
Normal file
194
tests/inference/testing_nodes/testing-pack/conditions.py
Normal file
@@ -0,0 +1,194 @@
|
||||
import re
|
||||
import torch
|
||||
|
||||
class TestIntConditions:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"a": ("INT", {"default": 0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 1}),
|
||||
"b": ("INT", {"default": 0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 1}),
|
||||
"operation": (["==", "!=", "<", ">", "<=", ">="],),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("BOOLEAN",)
|
||||
FUNCTION = "int_condition"
|
||||
|
||||
CATEGORY = "Testing/Logic"
|
||||
|
||||
def int_condition(self, a, b, operation):
|
||||
if operation == "==":
|
||||
return (a == b,)
|
||||
elif operation == "!=":
|
||||
return (a != b,)
|
||||
elif operation == "<":
|
||||
return (a < b,)
|
||||
elif operation == ">":
|
||||
return (a > b,)
|
||||
elif operation == "<=":
|
||||
return (a <= b,)
|
||||
elif operation == ">=":
|
||||
return (a >= b,)
|
||||
|
||||
|
||||
class TestFloatConditions:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"a": ("FLOAT", {"default": 0, "min": -999999999999.0, "max": 999999999999.0, "step": 1}),
|
||||
"b": ("FLOAT", {"default": 0, "min": -999999999999.0, "max": 999999999999.0, "step": 1}),
|
||||
"operation": (["==", "!=", "<", ">", "<=", ">="],),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("BOOLEAN",)
|
||||
FUNCTION = "float_condition"
|
||||
|
||||
CATEGORY = "Testing/Logic"
|
||||
|
||||
def float_condition(self, a, b, operation):
|
||||
if operation == "==":
|
||||
return (a == b,)
|
||||
elif operation == "!=":
|
||||
return (a != b,)
|
||||
elif operation == "<":
|
||||
return (a < b,)
|
||||
elif operation == ">":
|
||||
return (a > b,)
|
||||
elif operation == "<=":
|
||||
return (a <= b,)
|
||||
elif operation == ">=":
|
||||
return (a >= b,)
|
||||
|
||||
class TestStringConditions:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"a": ("STRING", {"multiline": False}),
|
||||
"b": ("STRING", {"multiline": False}),
|
||||
"operation": (["a == b", "a != b", "a IN b", "a MATCH REGEX(b)", "a BEGINSWITH b", "a ENDSWITH b"],),
|
||||
"case_sensitive": ("BOOLEAN", {"default": True}),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("BOOLEAN",)
|
||||
FUNCTION = "string_condition"
|
||||
|
||||
CATEGORY = "Testing/Logic"
|
||||
|
||||
def string_condition(self, a, b, operation, case_sensitive):
|
||||
if not case_sensitive:
|
||||
a = a.lower()
|
||||
b = b.lower()
|
||||
|
||||
if operation == "a == b":
|
||||
return (a == b,)
|
||||
elif operation == "a != b":
|
||||
return (a != b,)
|
||||
elif operation == "a IN b":
|
||||
return (a in b,)
|
||||
elif operation == "a MATCH REGEX(b)":
|
||||
try:
|
||||
return (re.match(b, a) is not None,)
|
||||
except:
|
||||
return (False,)
|
||||
elif operation == "a BEGINSWITH b":
|
||||
return (a.startswith(b),)
|
||||
elif operation == "a ENDSWITH b":
|
||||
return (a.endswith(b),)
|
||||
|
||||
class TestToBoolNode:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"value": ("*",),
|
||||
},
|
||||
"optional": {
|
||||
"invert": ("BOOLEAN", {"default": False}),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("BOOLEAN",)
|
||||
FUNCTION = "to_bool"
|
||||
|
||||
CATEGORY = "Testing/Logic"
|
||||
|
||||
def to_bool(self, value, invert = False):
|
||||
if isinstance(value, torch.Tensor):
|
||||
if value.max().item() == 0 and value.min().item() == 0:
|
||||
result = False
|
||||
else:
|
||||
result = True
|
||||
else:
|
||||
try:
|
||||
result = bool(value)
|
||||
except:
|
||||
# Can't convert it? Well then it's something or other. I dunno, I'm not a Python programmer.
|
||||
result = True
|
||||
|
||||
if invert:
|
||||
result = not result
|
||||
|
||||
return (result,)
|
||||
|
||||
class TestBoolOperationNode:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"a": ("BOOLEAN",),
|
||||
"b": ("BOOLEAN",),
|
||||
"op": (["a AND b", "a OR b", "a XOR b", "NOT a"],),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("BOOLEAN",)
|
||||
FUNCTION = "bool_operation"
|
||||
|
||||
CATEGORY = "Testing/Logic"
|
||||
|
||||
def bool_operation(self, a, b, op):
|
||||
if op == "a AND b":
|
||||
return (a and b,)
|
||||
elif op == "a OR b":
|
||||
return (a or b,)
|
||||
elif op == "a XOR b":
|
||||
return (a ^ b,)
|
||||
elif op == "NOT a":
|
||||
return (not a,)
|
||||
|
||||
|
||||
CONDITION_NODE_CLASS_MAPPINGS = {
|
||||
"TestIntConditions": TestIntConditions,
|
||||
"TestFloatConditions": TestFloatConditions,
|
||||
"TestStringConditions": TestStringConditions,
|
||||
"TestToBoolNode": TestToBoolNode,
|
||||
"TestBoolOperationNode": TestBoolOperationNode,
|
||||
}
|
||||
|
||||
CONDITION_NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"TestIntConditions": "Int Condition",
|
||||
"TestFloatConditions": "Float Condition",
|
||||
"TestStringConditions": "String Condition",
|
||||
"TestToBoolNode": "To Bool",
|
||||
"TestBoolOperationNode": "Bool Operation",
|
||||
}
|
||||
173
tests/inference/testing_nodes/testing-pack/flow_control.py
Normal file
173
tests/inference/testing_nodes/testing-pack/flow_control.py
Normal file
@@ -0,0 +1,173 @@
|
||||
from comfy_execution.graph_utils import GraphBuilder, is_link
|
||||
from comfy_execution.graph import ExecutionBlocker
|
||||
from .tools import VariantSupport
|
||||
|
||||
NUM_FLOW_SOCKETS = 5
|
||||
@VariantSupport()
|
||||
class TestWhileLoopOpen:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
inputs = {
|
||||
"required": {
|
||||
"condition": ("BOOLEAN", {"default": True}),
|
||||
},
|
||||
"optional": {
|
||||
},
|
||||
}
|
||||
for i in range(NUM_FLOW_SOCKETS):
|
||||
inputs["optional"][f"initial_value{i}"] = ("*",)
|
||||
return inputs
|
||||
|
||||
RETURN_TYPES = tuple(["FLOW_CONTROL"] + ["*"] * NUM_FLOW_SOCKETS)
|
||||
RETURN_NAMES = tuple(["FLOW_CONTROL"] + [f"value{i}" for i in range(NUM_FLOW_SOCKETS)])
|
||||
FUNCTION = "while_loop_open"
|
||||
|
||||
CATEGORY = "Testing/Flow"
|
||||
|
||||
def while_loop_open(self, condition, **kwargs):
|
||||
values = []
|
||||
for i in range(NUM_FLOW_SOCKETS):
|
||||
values.append(kwargs.get(f"initial_value{i}", None))
|
||||
return tuple(["stub"] + values)
|
||||
|
||||
@VariantSupport()
|
||||
class TestWhileLoopClose:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
inputs = {
|
||||
"required": {
|
||||
"flow_control": ("FLOW_CONTROL", {"rawLink": True}),
|
||||
"condition": ("BOOLEAN", {"forceInput": True}),
|
||||
},
|
||||
"optional": {
|
||||
},
|
||||
"hidden": {
|
||||
"dynprompt": "DYNPROMPT",
|
||||
"unique_id": "UNIQUE_ID",
|
||||
}
|
||||
}
|
||||
for i in range(NUM_FLOW_SOCKETS):
|
||||
inputs["optional"][f"initial_value{i}"] = ("*",)
|
||||
return inputs
|
||||
|
||||
RETURN_TYPES = tuple(["*"] * NUM_FLOW_SOCKETS)
|
||||
RETURN_NAMES = tuple([f"value{i}" for i in range(NUM_FLOW_SOCKETS)])
|
||||
FUNCTION = "while_loop_close"
|
||||
|
||||
CATEGORY = "Testing/Flow"
|
||||
|
||||
def explore_dependencies(self, node_id, dynprompt, upstream):
|
||||
node_info = dynprompt.get_node(node_id)
|
||||
if "inputs" not in node_info:
|
||||
return
|
||||
for k, v in node_info["inputs"].items():
|
||||
if is_link(v):
|
||||
parent_id = v[0]
|
||||
if parent_id not in upstream:
|
||||
upstream[parent_id] = []
|
||||
self.explore_dependencies(parent_id, dynprompt, upstream)
|
||||
upstream[parent_id].append(node_id)
|
||||
|
||||
def collect_contained(self, node_id, upstream, contained):
|
||||
if node_id not in upstream:
|
||||
return
|
||||
for child_id in upstream[node_id]:
|
||||
if child_id not in contained:
|
||||
contained[child_id] = True
|
||||
self.collect_contained(child_id, upstream, contained)
|
||||
|
||||
|
||||
def while_loop_close(self, flow_control, condition, dynprompt=None, unique_id=None, **kwargs):
|
||||
assert dynprompt is not None
|
||||
if not condition:
|
||||
# We're done with the loop
|
||||
values = []
|
||||
for i in range(NUM_FLOW_SOCKETS):
|
||||
values.append(kwargs.get(f"initial_value{i}", None))
|
||||
return tuple(values)
|
||||
|
||||
# We want to loop
|
||||
upstream = {}
|
||||
# Get the list of all nodes between the open and close nodes
|
||||
self.explore_dependencies(unique_id, dynprompt, upstream)
|
||||
|
||||
contained = {}
|
||||
open_node = flow_control[0]
|
||||
self.collect_contained(open_node, upstream, contained)
|
||||
contained[unique_id] = True
|
||||
contained[open_node] = True
|
||||
|
||||
# We'll use the default prefix, but to avoid having node names grow exponentially in size,
|
||||
# we'll use "Recurse" for the name of the recursively-generated copy of this node.
|
||||
graph = GraphBuilder()
|
||||
for node_id in contained:
|
||||
original_node = dynprompt.get_node(node_id)
|
||||
node = graph.node(original_node["class_type"], "Recurse" if node_id == unique_id else node_id)
|
||||
node.set_override_display_id(node_id)
|
||||
for node_id in contained:
|
||||
original_node = dynprompt.get_node(node_id)
|
||||
node = graph.lookup_node("Recurse" if node_id == unique_id else node_id)
|
||||
assert node is not None
|
||||
for k, v in original_node["inputs"].items():
|
||||
if is_link(v) and v[0] in contained:
|
||||
parent = graph.lookup_node(v[0])
|
||||
assert parent is not None
|
||||
node.set_input(k, parent.out(v[1]))
|
||||
else:
|
||||
node.set_input(k, v)
|
||||
new_open = graph.lookup_node(open_node)
|
||||
assert new_open is not None
|
||||
for i in range(NUM_FLOW_SOCKETS):
|
||||
key = f"initial_value{i}"
|
||||
new_open.set_input(key, kwargs.get(key, None))
|
||||
my_clone = graph.lookup_node("Recurse")
|
||||
assert my_clone is not None
|
||||
result = map(lambda x: my_clone.out(x), range(NUM_FLOW_SOCKETS))
|
||||
return {
|
||||
"result": tuple(result),
|
||||
"expand": graph.finalize(),
|
||||
}
|
||||
|
||||
@VariantSupport()
|
||||
class TestExecutionBlockerNode:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
inputs = {
|
||||
"required": {
|
||||
"input": ("*",),
|
||||
"block": ("BOOLEAN",),
|
||||
"verbose": ("BOOLEAN", {"default": False}),
|
||||
},
|
||||
}
|
||||
return inputs
|
||||
|
||||
RETURN_TYPES = ("*",)
|
||||
RETURN_NAMES = ("output",)
|
||||
FUNCTION = "execution_blocker"
|
||||
|
||||
CATEGORY = "Testing/Flow"
|
||||
|
||||
def execution_blocker(self, input, block, verbose):
|
||||
if block:
|
||||
return (ExecutionBlocker("Blocked Execution" if verbose else None),)
|
||||
return (input,)
|
||||
|
||||
FLOW_CONTROL_NODE_CLASS_MAPPINGS = {
|
||||
"TestWhileLoopOpen": TestWhileLoopOpen,
|
||||
"TestWhileLoopClose": TestWhileLoopClose,
|
||||
"TestExecutionBlocker": TestExecutionBlockerNode,
|
||||
}
|
||||
FLOW_CONTROL_NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"TestWhileLoopOpen": "While Loop Open",
|
||||
"TestWhileLoopClose": "While Loop Close",
|
||||
"TestExecutionBlocker": "Execution Blocker",
|
||||
}
|
||||
362
tests/inference/testing_nodes/testing-pack/specific_tests.py
Normal file
362
tests/inference/testing_nodes/testing-pack/specific_tests.py
Normal file
@@ -0,0 +1,362 @@
|
||||
import torch
|
||||
from .tools import VariantSupport
|
||||
from comfy_execution.graph_utils import GraphBuilder
|
||||
|
||||
class TestLazyMixImages:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"image1": ("IMAGE",{"lazy": True}),
|
||||
"image2": ("IMAGE",{"lazy": True}),
|
||||
"mask": ("MASK",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "mix"
|
||||
|
||||
CATEGORY = "Testing/Nodes"
|
||||
|
||||
def check_lazy_status(self, mask, image1, image2):
|
||||
mask_min = mask.min()
|
||||
mask_max = mask.max()
|
||||
needed = []
|
||||
if image1 is None and (mask_min != 1.0 or mask_max != 1.0):
|
||||
needed.append("image1")
|
||||
if image2 is None and (mask_min != 0.0 or mask_max != 0.0):
|
||||
needed.append("image2")
|
||||
return needed
|
||||
|
||||
# Not trying to handle different batch sizes here just to keep the demo simple
|
||||
def mix(self, mask, image1, image2):
|
||||
mask_min = mask.min()
|
||||
mask_max = mask.max()
|
||||
if mask_min == 0.0 and mask_max == 0.0:
|
||||
return (image1,)
|
||||
elif mask_min == 1.0 and mask_max == 1.0:
|
||||
return (image2,)
|
||||
|
||||
if len(mask.shape) == 2:
|
||||
mask = mask.unsqueeze(0)
|
||||
if len(mask.shape) == 3:
|
||||
mask = mask.unsqueeze(3)
|
||||
if mask.shape[3] < image1.shape[3]:
|
||||
mask = mask.repeat(1, 1, 1, image1.shape[3])
|
||||
|
||||
result = image1 * (1. - mask) + image2 * mask,
|
||||
return (result[0],)
|
||||
|
||||
class TestVariadicAverage:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"input1": ("IMAGE",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "variadic_average"
|
||||
|
||||
CATEGORY = "Testing/Nodes"
|
||||
|
||||
def variadic_average(self, input1, **kwargs):
|
||||
inputs = [input1]
|
||||
while 'input' + str(len(inputs) + 1) in kwargs:
|
||||
inputs.append(kwargs['input' + str(len(inputs) + 1)])
|
||||
return (torch.stack(inputs).mean(dim=0),)
|
||||
|
||||
|
||||
class TestCustomIsChanged:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"image": ("IMAGE",),
|
||||
},
|
||||
"optional": {
|
||||
"should_change": ("BOOL", {"default": False}),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "custom_is_changed"
|
||||
|
||||
CATEGORY = "Testing/Nodes"
|
||||
|
||||
def custom_is_changed(self, image, should_change=False):
|
||||
return (image,)
|
||||
|
||||
@classmethod
|
||||
def IS_CHANGED(cls, should_change=False, *args, **kwargs):
|
||||
if should_change:
|
||||
return float("NaN")
|
||||
else:
|
||||
return False
|
||||
|
||||
class TestIsChangedWithConstants:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"image": ("IMAGE",),
|
||||
"value": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0}),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "custom_is_changed"
|
||||
|
||||
CATEGORY = "Testing/Nodes"
|
||||
|
||||
def custom_is_changed(self, image, value):
|
||||
return (image * value,)
|
||||
|
||||
@classmethod
|
||||
def IS_CHANGED(cls, image, value):
|
||||
if image is None:
|
||||
return value
|
||||
else:
|
||||
return image.mean().item() * value
|
||||
|
||||
class TestCustomValidation1:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"input1": ("IMAGE,FLOAT",),
|
||||
"input2": ("IMAGE,FLOAT",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "custom_validation1"
|
||||
|
||||
CATEGORY = "Testing/Nodes"
|
||||
|
||||
def custom_validation1(self, input1, input2):
|
||||
if isinstance(input1, float) and isinstance(input2, float):
|
||||
result = torch.ones([1, 512, 512, 3]) * input1 * input2
|
||||
else:
|
||||
result = input1 * input2
|
||||
return (result,)
|
||||
|
||||
@classmethod
|
||||
def VALIDATE_INPUTS(cls, input1=None, input2=None):
|
||||
if input1 is not None:
|
||||
if not isinstance(input1, (torch.Tensor, float)):
|
||||
return f"Invalid type of input1: {type(input1)}"
|
||||
if input2 is not None:
|
||||
if not isinstance(input2, (torch.Tensor, float)):
|
||||
return f"Invalid type of input2: {type(input2)}"
|
||||
|
||||
return True
|
||||
|
||||
class TestCustomValidation2:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"input1": ("IMAGE,FLOAT",),
|
||||
"input2": ("IMAGE,FLOAT",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "custom_validation2"
|
||||
|
||||
CATEGORY = "Testing/Nodes"
|
||||
|
||||
def custom_validation2(self, input1, input2):
|
||||
if isinstance(input1, float) and isinstance(input2, float):
|
||||
result = torch.ones([1, 512, 512, 3]) * input1 * input2
|
||||
else:
|
||||
result = input1 * input2
|
||||
return (result,)
|
||||
|
||||
@classmethod
|
||||
def VALIDATE_INPUTS(cls, input_types, input1=None, input2=None):
|
||||
if input1 is not None:
|
||||
if not isinstance(input1, (torch.Tensor, float)):
|
||||
return f"Invalid type of input1: {type(input1)}"
|
||||
if input2 is not None:
|
||||
if not isinstance(input2, (torch.Tensor, float)):
|
||||
return f"Invalid type of input2: {type(input2)}"
|
||||
|
||||
if 'input1' in input_types:
|
||||
if input_types['input1'] not in ["IMAGE", "FLOAT"]:
|
||||
return f"Invalid type of input1: {input_types['input1']}"
|
||||
if 'input2' in input_types:
|
||||
if input_types['input2'] not in ["IMAGE", "FLOAT"]:
|
||||
return f"Invalid type of input2: {input_types['input2']}"
|
||||
|
||||
return True
|
||||
|
||||
@VariantSupport()
|
||||
class TestCustomValidation3:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"input1": ("IMAGE,FLOAT",),
|
||||
"input2": ("IMAGE,FLOAT",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "custom_validation3"
|
||||
|
||||
CATEGORY = "Testing/Nodes"
|
||||
|
||||
def custom_validation3(self, input1, input2):
|
||||
if isinstance(input1, float) and isinstance(input2, float):
|
||||
result = torch.ones([1, 512, 512, 3]) * input1 * input2
|
||||
else:
|
||||
result = input1 * input2
|
||||
return (result,)
|
||||
|
||||
class TestCustomValidation4:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"input1": ("FLOAT",),
|
||||
"input2": ("FLOAT",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "custom_validation4"
|
||||
|
||||
CATEGORY = "Testing/Nodes"
|
||||
|
||||
def custom_validation4(self, input1, input2):
|
||||
result = torch.ones([1, 512, 512, 3]) * input1 * input2
|
||||
return (result,)
|
||||
|
||||
@classmethod
|
||||
def VALIDATE_INPUTS(cls, input1, input2):
|
||||
if input1 is not None:
|
||||
if not isinstance(input1, float):
|
||||
return f"Invalid type of input1: {type(input1)}"
|
||||
if input2 is not None:
|
||||
if not isinstance(input2, float):
|
||||
return f"Invalid type of input2: {type(input2)}"
|
||||
|
||||
return True
|
||||
|
||||
class TestCustomValidation5:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"input1": ("FLOAT", {"min": 0.0, "max": 1.0}),
|
||||
"input2": ("FLOAT", {"min": 0.0, "max": 1.0}),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "custom_validation5"
|
||||
|
||||
CATEGORY = "Testing/Nodes"
|
||||
|
||||
def custom_validation5(self, input1, input2):
|
||||
value = input1 * input2
|
||||
return (torch.ones([1, 512, 512, 3]) * value,)
|
||||
|
||||
@classmethod
|
||||
def VALIDATE_INPUTS(cls, **kwargs):
|
||||
if kwargs['input2'] == 7.0:
|
||||
return "7s are not allowed. I've never liked 7s."
|
||||
return True
|
||||
|
||||
class TestDynamicDependencyCycle:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"input1": ("IMAGE",),
|
||||
"input2": ("IMAGE",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "dynamic_dependency_cycle"
|
||||
|
||||
CATEGORY = "Testing/Nodes"
|
||||
|
||||
def dynamic_dependency_cycle(self, input1, input2):
|
||||
g = GraphBuilder()
|
||||
mask = g.node("StubMask", value=0.5, height=512, width=512, batch_size=1)
|
||||
mix1 = g.node("TestLazyMixImages", image1=input1, mask=mask.out(0))
|
||||
mix2 = g.node("TestLazyMixImages", image1=mix1.out(0), image2=input2, mask=mask.out(0))
|
||||
|
||||
# Create the cyle
|
||||
mix1.set_input("image2", mix2.out(0))
|
||||
|
||||
return {
|
||||
"result": (mix2.out(0),),
|
||||
"expand": g.finalize(),
|
||||
}
|
||||
|
||||
class TestMixedExpansionReturns:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"input1": ("FLOAT",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE","IMAGE")
|
||||
FUNCTION = "mixed_expansion_returns"
|
||||
|
||||
CATEGORY = "Testing/Nodes"
|
||||
|
||||
def mixed_expansion_returns(self, input1):
|
||||
white_image = torch.ones([1, 512, 512, 3])
|
||||
if input1 <= 0.1:
|
||||
return (torch.ones([1, 512, 512, 3]) * 0.1, white_image)
|
||||
elif input1 <= 0.2:
|
||||
return {
|
||||
"result": (torch.ones([1, 512, 512, 3]) * 0.2, white_image),
|
||||
}
|
||||
else:
|
||||
g = GraphBuilder()
|
||||
mask = g.node("StubMask", value=0.3, height=512, width=512, batch_size=1)
|
||||
black = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
white = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1)
|
||||
mix = g.node("TestLazyMixImages", image1=black.out(0), image2=white.out(0), mask=mask.out(0))
|
||||
return {
|
||||
"result": (mix.out(0), white_image),
|
||||
"expand": g.finalize(),
|
||||
}
|
||||
|
||||
TEST_NODE_CLASS_MAPPINGS = {
|
||||
"TestLazyMixImages": TestLazyMixImages,
|
||||
"TestVariadicAverage": TestVariadicAverage,
|
||||
"TestCustomIsChanged": TestCustomIsChanged,
|
||||
"TestIsChangedWithConstants": TestIsChangedWithConstants,
|
||||
"TestCustomValidation1": TestCustomValidation1,
|
||||
"TestCustomValidation2": TestCustomValidation2,
|
||||
"TestCustomValidation3": TestCustomValidation3,
|
||||
"TestCustomValidation4": TestCustomValidation4,
|
||||
"TestCustomValidation5": TestCustomValidation5,
|
||||
"TestDynamicDependencyCycle": TestDynamicDependencyCycle,
|
||||
"TestMixedExpansionReturns": TestMixedExpansionReturns,
|
||||
}
|
||||
|
||||
TEST_NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"TestLazyMixImages": "Lazy Mix Images",
|
||||
"TestVariadicAverage": "Variadic Average",
|
||||
"TestCustomIsChanged": "Custom IsChanged",
|
||||
"TestIsChangedWithConstants": "IsChanged With Constants",
|
||||
"TestCustomValidation1": "Custom Validation 1",
|
||||
"TestCustomValidation2": "Custom Validation 2",
|
||||
"TestCustomValidation3": "Custom Validation 3",
|
||||
"TestCustomValidation4": "Custom Validation 4",
|
||||
"TestCustomValidation5": "Custom Validation 5",
|
||||
"TestDynamicDependencyCycle": "Dynamic Dependency Cycle",
|
||||
"TestMixedExpansionReturns": "Mixed Expansion Returns",
|
||||
}
|
||||
129
tests/inference/testing_nodes/testing-pack/stubs.py
Normal file
129
tests/inference/testing_nodes/testing-pack/stubs.py
Normal file
@@ -0,0 +1,129 @@
|
||||
import torch
|
||||
|
||||
class StubImage:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"content": (['WHITE', 'BLACK', 'NOISE'],),
|
||||
"height": ("INT", {"default": 512, "min": 1, "max": 1024 ** 3, "step": 1}),
|
||||
"width": ("INT", {"default": 512, "min": 1, "max": 4096 ** 3, "step": 1}),
|
||||
"batch_size": ("INT", {"default": 1, "min": 1, "max": 1024 ** 3, "step": 1}),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "stub_image"
|
||||
|
||||
CATEGORY = "Testing/Stub Nodes"
|
||||
|
||||
def stub_image(self, content, height, width, batch_size):
|
||||
if content == "WHITE":
|
||||
return (torch.ones(batch_size, height, width, 3),)
|
||||
elif content == "BLACK":
|
||||
return (torch.zeros(batch_size, height, width, 3),)
|
||||
elif content == "NOISE":
|
||||
return (torch.rand(batch_size, height, width, 3),)
|
||||
|
||||
class StubConstantImage:
|
||||
def __init__(self):
|
||||
pass
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"value": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
"height": ("INT", {"default": 512, "min": 1, "max": 1024 ** 3, "step": 1}),
|
||||
"width": ("INT", {"default": 512, "min": 1, "max": 4096 ** 3, "step": 1}),
|
||||
"batch_size": ("INT", {"default": 1, "min": 1, "max": 1024 ** 3, "step": 1}),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "stub_constant_image"
|
||||
|
||||
CATEGORY = "Testing/Stub Nodes"
|
||||
|
||||
def stub_constant_image(self, value, height, width, batch_size):
|
||||
return (torch.ones(batch_size, height, width, 3) * value,)
|
||||
|
||||
class StubMask:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"value": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
"height": ("INT", {"default": 512, "min": 1, "max": 1024 ** 3, "step": 1}),
|
||||
"width": ("INT", {"default": 512, "min": 1, "max": 4096 ** 3, "step": 1}),
|
||||
"batch_size": ("INT", {"default": 1, "min": 1, "max": 1024 ** 3, "step": 1}),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("MASK",)
|
||||
FUNCTION = "stub_mask"
|
||||
|
||||
CATEGORY = "Testing/Stub Nodes"
|
||||
|
||||
def stub_mask(self, value, height, width, batch_size):
|
||||
return (torch.ones(batch_size, height, width) * value,)
|
||||
|
||||
class StubInt:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"value": ("INT", {"default": 0, "min": -0xffffffff, "max": 0xffffffff, "step": 1}),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("INT",)
|
||||
FUNCTION = "stub_int"
|
||||
|
||||
CATEGORY = "Testing/Stub Nodes"
|
||||
|
||||
def stub_int(self, value):
|
||||
return (value,)
|
||||
|
||||
class StubFloat:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"value": ("FLOAT", {"default": 0.0, "min": -1.0e38, "max": 1.0e38, "step": 0.01}),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("FLOAT",)
|
||||
FUNCTION = "stub_float"
|
||||
|
||||
CATEGORY = "Testing/Stub Nodes"
|
||||
|
||||
def stub_float(self, value):
|
||||
return (value,)
|
||||
|
||||
TEST_STUB_NODE_CLASS_MAPPINGS = {
|
||||
"StubImage": StubImage,
|
||||
"StubConstantImage": StubConstantImage,
|
||||
"StubMask": StubMask,
|
||||
"StubInt": StubInt,
|
||||
"StubFloat": StubFloat,
|
||||
}
|
||||
TEST_STUB_NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"StubImage": "Stub Image",
|
||||
"StubConstantImage": "Stub Constant Image",
|
||||
"StubMask": "Stub Mask",
|
||||
"StubInt": "Stub Int",
|
||||
"StubFloat": "Stub Float",
|
||||
}
|
||||
53
tests/inference/testing_nodes/testing-pack/tools.py
Normal file
53
tests/inference/testing_nodes/testing-pack/tools.py
Normal file
@@ -0,0 +1,53 @@
|
||||
|
||||
def MakeSmartType(t):
|
||||
if isinstance(t, str):
|
||||
return SmartType(t)
|
||||
return t
|
||||
|
||||
class SmartType(str):
|
||||
def __ne__(self, other):
|
||||
if self == "*" or other == "*":
|
||||
return False
|
||||
selfset = set(self.split(','))
|
||||
otherset = set(other.split(','))
|
||||
return not selfset.issubset(otherset)
|
||||
|
||||
def VariantSupport():
|
||||
def decorator(cls):
|
||||
if hasattr(cls, "INPUT_TYPES"):
|
||||
old_input_types = getattr(cls, "INPUT_TYPES")
|
||||
def new_input_types(*args, **kwargs):
|
||||
types = old_input_types(*args, **kwargs)
|
||||
for category in ["required", "optional"]:
|
||||
if category not in types:
|
||||
continue
|
||||
for key, value in types[category].items():
|
||||
if isinstance(value, tuple):
|
||||
types[category][key] = (MakeSmartType(value[0]),) + value[1:]
|
||||
return types
|
||||
setattr(cls, "INPUT_TYPES", new_input_types)
|
||||
if hasattr(cls, "RETURN_TYPES"):
|
||||
old_return_types = cls.RETURN_TYPES
|
||||
setattr(cls, "RETURN_TYPES", tuple(MakeSmartType(x) for x in old_return_types))
|
||||
if hasattr(cls, "VALIDATE_INPUTS"):
|
||||
# Reflection is used to determine what the function signature is, so we can't just change the function signature
|
||||
raise NotImplementedError("VariantSupport does not support VALIDATE_INPUTS yet")
|
||||
else:
|
||||
def validate_inputs(input_types):
|
||||
inputs = cls.INPUT_TYPES()
|
||||
for key, value in input_types.items():
|
||||
if isinstance(value, SmartType):
|
||||
continue
|
||||
if "required" in inputs and key in inputs["required"]:
|
||||
expected_type = inputs["required"][key][0]
|
||||
elif "optional" in inputs and key in inputs["optional"]:
|
||||
expected_type = inputs["optional"][key][0]
|
||||
else:
|
||||
expected_type = None
|
||||
if expected_type is not None and MakeSmartType(value) != expected_type:
|
||||
return f"Invalid type of {key}: {value} (expected {expected_type})"
|
||||
return True
|
||||
setattr(cls, "VALIDATE_INPUTS", validate_inputs)
|
||||
return cls
|
||||
return decorator
|
||||
|
||||
364
tests/inference/testing_nodes/testing-pack/util.py
Normal file
364
tests/inference/testing_nodes/testing-pack/util.py
Normal file
@@ -0,0 +1,364 @@
|
||||
from comfy_execution.graph_utils import GraphBuilder
|
||||
from .tools import VariantSupport
|
||||
|
||||
@VariantSupport()
|
||||
class TestAccumulateNode:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"to_add": ("*",),
|
||||
},
|
||||
"optional": {
|
||||
"accumulation": ("ACCUMULATION",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("ACCUMULATION",)
|
||||
FUNCTION = "accumulate"
|
||||
|
||||
CATEGORY = "Testing/Lists"
|
||||
|
||||
def accumulate(self, to_add, accumulation = None):
|
||||
if accumulation is None:
|
||||
value = [to_add]
|
||||
else:
|
||||
value = accumulation["accum"] + [to_add]
|
||||
return ({"accum": value},)
|
||||
|
||||
@VariantSupport()
|
||||
class TestAccumulationHeadNode:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"accumulation": ("ACCUMULATION",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("ACCUMULATION", "*",)
|
||||
FUNCTION = "accumulation_head"
|
||||
|
||||
CATEGORY = "Testing/Lists"
|
||||
|
||||
def accumulation_head(self, accumulation):
|
||||
accum = accumulation["accum"]
|
||||
if len(accum) == 0:
|
||||
return (accumulation, None)
|
||||
else:
|
||||
return ({"accum": accum[1:]}, accum[0])
|
||||
|
||||
class TestAccumulationTailNode:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"accumulation": ("ACCUMULATION",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("ACCUMULATION", "*",)
|
||||
FUNCTION = "accumulation_tail"
|
||||
|
||||
CATEGORY = "Testing/Lists"
|
||||
|
||||
def accumulation_tail(self, accumulation):
|
||||
accum = accumulation["accum"]
|
||||
if len(accum) == 0:
|
||||
return (None, accumulation)
|
||||
else:
|
||||
return ({"accum": accum[:-1]}, accum[-1])
|
||||
|
||||
@VariantSupport()
|
||||
class TestAccumulationToListNode:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"accumulation": ("ACCUMULATION",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("*",)
|
||||
OUTPUT_IS_LIST = (True,)
|
||||
|
||||
FUNCTION = "accumulation_to_list"
|
||||
|
||||
CATEGORY = "Testing/Lists"
|
||||
|
||||
def accumulation_to_list(self, accumulation):
|
||||
return (accumulation["accum"],)
|
||||
|
||||
@VariantSupport()
|
||||
class TestListToAccumulationNode:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"list": ("*",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("ACCUMULATION",)
|
||||
INPUT_IS_LIST = (True,)
|
||||
|
||||
FUNCTION = "list_to_accumulation"
|
||||
|
||||
CATEGORY = "Testing/Lists"
|
||||
|
||||
def list_to_accumulation(self, list):
|
||||
return ({"accum": list},)
|
||||
|
||||
@VariantSupport()
|
||||
class TestAccumulationGetLengthNode:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"accumulation": ("ACCUMULATION",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("INT",)
|
||||
|
||||
FUNCTION = "accumlength"
|
||||
|
||||
CATEGORY = "Testing/Lists"
|
||||
|
||||
def accumlength(self, accumulation):
|
||||
return (len(accumulation['accum']),)
|
||||
|
||||
@VariantSupport()
|
||||
class TestAccumulationGetItemNode:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"accumulation": ("ACCUMULATION",),
|
||||
"index": ("INT", {"default":0, "step":1})
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("*",)
|
||||
|
||||
FUNCTION = "get_item"
|
||||
|
||||
CATEGORY = "Testing/Lists"
|
||||
|
||||
def get_item(self, accumulation, index):
|
||||
return (accumulation['accum'][index],)
|
||||
|
||||
@VariantSupport()
|
||||
class TestAccumulationSetItemNode:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"accumulation": ("ACCUMULATION",),
|
||||
"index": ("INT", {"default":0, "step":1}),
|
||||
"value": ("*",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("ACCUMULATION",)
|
||||
|
||||
FUNCTION = "set_item"
|
||||
|
||||
CATEGORY = "Testing/Lists"
|
||||
|
||||
def set_item(self, accumulation, index, value):
|
||||
new_accum = accumulation['accum'][:]
|
||||
new_accum[index] = value
|
||||
return ({"accum": new_accum},)
|
||||
|
||||
class TestIntMathOperation:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"a": ("INT", {"default": 0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 1}),
|
||||
"b": ("INT", {"default": 0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 1}),
|
||||
"operation": (["add", "subtract", "multiply", "divide", "modulo", "power"],),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("INT",)
|
||||
FUNCTION = "int_math_operation"
|
||||
|
||||
CATEGORY = "Testing/Logic"
|
||||
|
||||
def int_math_operation(self, a, b, operation):
|
||||
if operation == "add":
|
||||
return (a + b,)
|
||||
elif operation == "subtract":
|
||||
return (a - b,)
|
||||
elif operation == "multiply":
|
||||
return (a * b,)
|
||||
elif operation == "divide":
|
||||
return (a // b,)
|
||||
elif operation == "modulo":
|
||||
return (a % b,)
|
||||
elif operation == "power":
|
||||
return (a ** b,)
|
||||
|
||||
|
||||
from .flow_control import NUM_FLOW_SOCKETS
|
||||
@VariantSupport()
|
||||
class TestForLoopOpen:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"remaining": ("INT", {"default": 1, "min": 0, "max": 100000, "step": 1}),
|
||||
},
|
||||
"optional": {
|
||||
f"initial_value{i}": ("*",) for i in range(1, NUM_FLOW_SOCKETS)
|
||||
},
|
||||
"hidden": {
|
||||
"initial_value0": ("*",)
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = tuple(["FLOW_CONTROL", "INT",] + ["*"] * (NUM_FLOW_SOCKETS-1))
|
||||
RETURN_NAMES = tuple(["flow_control", "remaining"] + [f"value{i}" for i in range(1, NUM_FLOW_SOCKETS)])
|
||||
FUNCTION = "for_loop_open"
|
||||
|
||||
CATEGORY = "Testing/Flow"
|
||||
|
||||
def for_loop_open(self, remaining, **kwargs):
|
||||
graph = GraphBuilder()
|
||||
if "initial_value0" in kwargs:
|
||||
remaining = kwargs["initial_value0"]
|
||||
while_open = graph.node("TestWhileLoopOpen", condition=remaining, initial_value0=remaining, **{(f"initial_value{i}"): kwargs.get(f"initial_value{i}", None) for i in range(1, NUM_FLOW_SOCKETS)})
|
||||
outputs = [kwargs.get(f"initial_value{i}", None) for i in range(1, NUM_FLOW_SOCKETS)]
|
||||
return {
|
||||
"result": tuple(["stub", remaining] + outputs),
|
||||
"expand": graph.finalize(),
|
||||
}
|
||||
|
||||
@VariantSupport()
|
||||
class TestForLoopClose:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"flow_control": ("FLOW_CONTROL", {"rawLink": True}),
|
||||
},
|
||||
"optional": {
|
||||
f"initial_value{i}": ("*",{"rawLink": True}) for i in range(1, NUM_FLOW_SOCKETS)
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = tuple(["*"] * (NUM_FLOW_SOCKETS-1))
|
||||
RETURN_NAMES = tuple([f"value{i}" for i in range(1, NUM_FLOW_SOCKETS)])
|
||||
FUNCTION = "for_loop_close"
|
||||
|
||||
CATEGORY = "Testing/Flow"
|
||||
|
||||
def for_loop_close(self, flow_control, **kwargs):
|
||||
graph = GraphBuilder()
|
||||
while_open = flow_control[0]
|
||||
sub = graph.node("TestIntMathOperation", operation="subtract", a=[while_open,1], b=1)
|
||||
cond = graph.node("TestToBoolNode", value=sub.out(0))
|
||||
input_values = {f"initial_value{i}": kwargs.get(f"initial_value{i}", None) for i in range(1, NUM_FLOW_SOCKETS)}
|
||||
while_close = graph.node("TestWhileLoopClose",
|
||||
flow_control=flow_control,
|
||||
condition=cond.out(0),
|
||||
initial_value0=sub.out(0),
|
||||
**input_values)
|
||||
return {
|
||||
"result": tuple([while_close.out(i) for i in range(1, NUM_FLOW_SOCKETS)]),
|
||||
"expand": graph.finalize(),
|
||||
}
|
||||
|
||||
NUM_LIST_SOCKETS = 10
|
||||
@VariantSupport()
|
||||
class TestMakeListNode:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"value1": ("*",),
|
||||
},
|
||||
"optional": {
|
||||
f"value{i}": ("*",) for i in range(1, NUM_LIST_SOCKETS)
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("*",)
|
||||
FUNCTION = "make_list"
|
||||
OUTPUT_IS_LIST = (True,)
|
||||
|
||||
CATEGORY = "Testing/Lists"
|
||||
|
||||
def make_list(self, **kwargs):
|
||||
result = []
|
||||
for i in range(NUM_LIST_SOCKETS):
|
||||
if f"value{i}" in kwargs:
|
||||
result.append(kwargs[f"value{i}"])
|
||||
return (result,)
|
||||
|
||||
UTILITY_NODE_CLASS_MAPPINGS = {
|
||||
"TestAccumulateNode": TestAccumulateNode,
|
||||
"TestAccumulationHeadNode": TestAccumulationHeadNode,
|
||||
"TestAccumulationTailNode": TestAccumulationTailNode,
|
||||
"TestAccumulationToListNode": TestAccumulationToListNode,
|
||||
"TestListToAccumulationNode": TestListToAccumulationNode,
|
||||
"TestAccumulationGetLengthNode": TestAccumulationGetLengthNode,
|
||||
"TestAccumulationGetItemNode": TestAccumulationGetItemNode,
|
||||
"TestAccumulationSetItemNode": TestAccumulationSetItemNode,
|
||||
"TestForLoopOpen": TestForLoopOpen,
|
||||
"TestForLoopClose": TestForLoopClose,
|
||||
"TestIntMathOperation": TestIntMathOperation,
|
||||
"TestMakeListNode": TestMakeListNode,
|
||||
}
|
||||
UTILITY_NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"TestAccumulateNode": "Accumulate",
|
||||
"TestAccumulationHeadNode": "Accumulation Head",
|
||||
"TestAccumulationTailNode": "Accumulation Tail",
|
||||
"TestAccumulationToListNode": "Accumulation to List",
|
||||
"TestListToAccumulationNode": "List to Accumulation",
|
||||
"TestAccumulationGetLengthNode": "Accumulation Get Length",
|
||||
"TestAccumulationGetItemNode": "Accumulation Get Item",
|
||||
"TestAccumulationSetItemNode": "Accumulation Set Item",
|
||||
"TestForLoopOpen": "For Loop Open",
|
||||
"TestForLoopClose": "For Loop Close",
|
||||
"TestIntMathOperation": "Int Math Operation",
|
||||
"TestMakeListNode": "Make List",
|
||||
}
|
||||
Reference in New Issue
Block a user