diff --git a/README.md b/README.md index 6396829..001172d 100644 --- a/README.md +++ b/README.md @@ -62,6 +62,14 @@ If those nodes have descriptions written in HTML or Markdown, they will be conve ## UPDATES +**2024/09/03**: +* New `QUEUE TOO` Node focused on efficient image media loading. +* Better reporting in `AKASHIC` Node for core ComfyUI types. +* `MODE` setting for most nodes has been defaulted to `MATTE`. The older `NONE` setting has been removed. +* Thanks to [christian-byrne](https://github.com/christian-byrne) for squashing a bug in [the help sidebar!](https://github.com/Amorano/Jovimetrix/pull/55) +* Thanks to [Ainaemaet](https://github.com/Ainaemaet) for cleaning up the `STREAM READER` Node device list [when no devices are present](https://github.com/Amorano/Jovimetrix/pull/53)! +* Supports ComfyUI 0.2.0+, frontend 1.2.45+ + **2024/08/31**: * Better MASK/ALPHA support for `BLEND`, `ADJUST` and `QUEUE` * Cleaner Markdown outputs @@ -69,11 +77,11 @@ If those nodes have descriptions written in HTML or Markdown, they will be conve **2024/08/28**: -* New `STRINGER NODE` for string operations: Split, Join, Replace and Slice. +* New `STRINGER` Node for string operations: Split, Join, Replace and Slice. ![STRINGER NODE](https://github.com/user-attachments/assets/557bdef6-c0d3-4d01-a3dd-46f4a51952fa) -* `QUEUE NODE` now supports recursing directories. To filter pngs, jpgs, and gifs from the c:/images folder and its sub-folders: +* `QUEUE` Node now supports recursing directories. To filter pngs, jpgs, and gifs from the c:/images folder and its sub-folders: `c:/images;.png,.jpg,.gif` diff --git a/core/utility.py b/core/utility.py index e7c4b81..e1af973 100644 --- a/core/utility.py +++ b/core/utility.py @@ -23,13 +23,14 @@ from loguru import logger +import comfy.sd as comfy_sd from comfy.utils import ProgressBar from folder_paths import get_output_directory from Jovimetrix import DynamicInputType, deep_merge, comfy_message, parse_reset, \ Lexicon, JOVBaseNode, JOV_TYPE_ANY, ROOT, JOV_TYPE_IMAGE -from Jovimetrix.sup.util import parse_dynamic, path_next, \ +from Jovimetrix.sup.util import decode_tensor, parse_dynamic, path_next, \ parse_param, zip_longest_fill, EnumConvertType from Jovimetrix.sup.image import EnumInterpolation, EnumScaleMode, cv2tensor, cv2tensor_full, image_by_size, image_convert, \ @@ -96,10 +97,38 @@ def __parse(val) -> str: ret = val typ = ''.join(repr(type(val)).split("'")[1:2]) if isinstance(val, dict): - ret = json.dumps(val, indent=3) + # mixlab layer? + if (image := val.get('image', None)) is not None: + ret = image + if (mask := val.get('mask', None)) is not None: + while len(mask.shape) < len(image.shape): + mask = mask.unsqueeze(-1) + ret = torch.cat((image, mask), dim=-1) + if ret.ndim < 4: + ret = ret.unsqueeze(-1) + ret = decode_tensor(ret) + typ = "Mixlab Layer" + + # vector patch.... + elif 'xyzw' in val: + val = {"xyzw"[i]:x for i, x in enumerate(val["xyzw"])} + typ = "VECTOR" + # latents.... + elif 'samples' in val: + ret = decode_tensor(val['samples'][0]) + typ = "LATENT" + # empty bugger + elif len(val) == 0: + ret = "" + else: + try: + ret = json.dumps(val, indent=3) + except Exception as e: + ret = str(e) + elif isinstance(val, (tuple, set, list,)): ret = '' - if len(val) > 0: + if (size := len(val)) > 0: if type(val) == np.ndarray: if len(q := q()) == 1: ret += f"{q[0]}" @@ -107,22 +136,24 @@ def __parse(val) -> str: ret += f"{q[1]}x{q[0]}" else: ret += f"{q[1]}x{q[0]}x{q[2]}" - elif len(val) < 2: + # typ = "NUMPY ARRAY" + elif isinstance(val[0], (torch.Tensor,)): + ret = decode_tensor(val[0]) + typ = type(val[0]) + elif size == 1: + if isinstance(val[0], (list,)) and isinstance(val[0][0], (torch.Tensor,)): + ret = decode_tensor(val[0][0]) + typ = "CONDITIONING" + elif size < 2: ret = val[0] else: ret = '\n\t' + '\n\t'.join(str(v) for v in val) elif isinstance(val, bool): ret = "True" if val else "False" elif isinstance(val, torch.Tensor): - size = len(val.shape) - if size > 3: - b, h, w, cc = val.shape - else: - cc = 1 - b, h, w = val.shape - ret = f"{b}x{w}x{h}x{cc}" + ret = decode_tensor(val) else: - val = str(val) + ret = str(ret) return f"({ret}) [{typ}]" for x in o: diff --git a/pyproject.toml b/pyproject.toml index a442b3e..4902799 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [project] name = "jovimetrix" description = "Integrates Webcam, MIDI, Spout and GLSL shader support. Animation via tick. Parameter manipulation with wave generator. Math operations with Unary and Binary support. Value conversion for all major types (int, string, list, dict, Image, Mask). Shape mask generation, image stacking and channel ops, batch splitting, merging and randomizing, load images and video from anywhere, dynamic bus routing with a single node, export support for GIPHY, save output anywhere! flatten, crop, transform; check colorblindness, make stereogram or stereoscopic images, or liner interpolate values and more." -version = "1.2.32" +version = "1.2.33" license = { file = "LICENSE" } dependencies = [ "aenum>=3.1.15,<4", diff --git a/sup/util.py b/sup/util.py index c850210..e73068c 100644 --- a/sup/util.py +++ b/sup/util.py @@ -124,6 +124,14 @@ def parse_dynamic(data:dict, prefix:str, typ:EnumConvertType, default: Any) -> L return vals +def decode_tensor(tensor: torch.Tensor) -> str: + if tensor.ndim > 3: + b, h, w, cc = tensor.shape + else: + cc = 1 + b, h, w = tensor.shape + return f"{b}x{w}x{h}x{cc}" + def parse_value(val:Any, typ:EnumConvertType, default: Any, clip_min: Optional[float]=None, clip_max: Optional[float]=None, zero:int=0) -> List[Any]: