Skip to content

Commit

Permalink
new STRING op node
Browse files Browse the repository at this point in the history
Queue node to support sub-directories
  • Loading branch information
Amorano committed Aug 29, 2024
1 parent b63e386 commit cba185c
Show file tree
Hide file tree
Showing 9 changed files with 147 additions and 24 deletions.
16 changes: 16 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,22 @@ If those nodes have descriptions written in HTML or Markdown, they will be conve

## UPDATES

**2024/08/28**:

* New `STRINGER NODE` for string operations: Split, Join, Replace and Slice.

![STRINGER NODE](https://github.com/user-attachments/assets/557bdef6-c0d3-4d01-a3dd-46f4a51952fa)

* `QUEUE NODE` now supports recursing directories. To filter pngs, jpgs, and gifs from the c:/images folder and its sub-folders:

`c:/images;.png,.jpg,.gif`

You can add as many extensions as you need, but keep in mind not every image type is supported just because you filter for it -- the Queue node will still return strings where it cant find/load a file type.

![QUEUE NODE](https://github.com/user-attachments/assets/9686b900-24a2-46ab-88ba-9e3c929b439c)

* Supports ComfyUI 0.1.3+, frontend 1.2.39+

**2024/08/25**:
* Added conversion coercion for Mixlab Layer types
![Mixlab supports](https://github.com/user-attachments/assets/05a53b98-b620-4743-b7b5-26da4140d443)
Expand Down
4 changes: 4 additions & 0 deletions __init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,7 @@ class Lexicon(metaclass=LexiconMeta):
FALSE = '🇫', "False"
FILEN = '💾', "File Name"
FILTER = '🔎', "Filter"
FIND = 'FIND', "Find"
FIXED = 'FIXED', "Fixed"
FLIP = '🙃', "Flip Input A and Input B with each other"
FLOAT = '🛟', "Float"
Expand Down Expand Up @@ -285,6 +286,8 @@ class Lexicon(metaclass=LexiconMeta):
RATE = 'RATE', "Rate"
RECORD = '⏺', "Arm record capture from selected device"
REGION = 'REGION', "Region"
RECURSE = 'RECURSE', "Search within sub-directories"
REPLACE = 'REPLACE', "String to use as replacement"
RESET = 'RESET', "Reset"
RGB = '🌈', "RGB (no alpha) Color"
RGB_A = '🌈A', "RGB (no alpha) Color"
Expand Down Expand Up @@ -399,6 +402,7 @@ def __call__(cls, *arg, **kw) -> Any:
# =============================================================================

class JOVBaseNode:
NOT_IDEMPOTENT = True
RETURN_TYPES = ()
FUNCTION = "run"
# instance map for caching
Expand Down
74 changes: 73 additions & 1 deletion core/calc.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
JOVBaseNode, ComfyAPIMessage, TimedOutException, JOV_TYPE_ANY, \
JOV_TYPE_FULL, JOV_TYPE_NUMBER, JOV_TYPE_VECTOR

from Jovimetrix.sup.util import parse_param, parse_value, vector_swap, \
from Jovimetrix.sup.util import parse_dynamic, parse_param, parse_value, vector_swap, \
zip_longest_fill, EnumConvertType, EnumSwizzle

from Jovimetrix.sup.anim import ease_op, wave_op, EnumWave, EnumEase
Expand Down Expand Up @@ -98,6 +98,13 @@ class EnumComparison(Enum):
IN = 82
NOT_IN = 83

class EnumConvertString(Enum):
SPLIT = 10
JOIN = 30
FIND = 40
REPLACE = 50
SLICE = 70 # start - end - step = -1, -1, 1

class EnumNumberType(Enum):
INT = 0
FLOAT = 10
Expand Down Expand Up @@ -675,6 +682,71 @@ def run(self, **kw) -> Tuple[Any, Any]:
pbar.update_absolute(idx)
return [values]

class StringerNode(JOVBaseNode):
NAME = "STRINGER (JOV) 🪀"
CATEGORY = f"JOVIMETRIX 🔺🟩🔵/{JOV_CATEGORY}"
RETURN_TYPES = ("STRING",)
RETURN_NAMES = (Lexicon.STRING,)
SORT = 44
DESCRIPTION = """
Manipulate strings through filtering
"""

@classmethod
def INPUT_TYPES(cls) -> dict:
d = super().INPUT_TYPES()
d = deep_merge(d, {
"optional": {
# split, join, replace, trim/lift
Lexicon.FUNC: (EnumConvertString._member_names_, {"default": EnumConvertString.SPLIT.name,
"tooltips":"Operation to perform on the input string"}),
Lexicon.KEY: ("STRING", {"default":"", "dynamicPrompt":False, "tooltips":"Delimiter (SPLIT/JOIN) or string to use as search string (FIND/REPLACE)."}),
Lexicon.REPLACE: ("STRING", {"default":"", "dynamicPrompt":False}),
Lexicon.RANGE: ("VEC3INT", {"default":(0, -1, 1), "tooltips":"Start, End and Step. Values will clip to the actual list size(s)."}),
}
})
return Lexicon._parse(d, cls)

def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]:
# turn any all inputs into the
data_list = parse_dynamic(kw, Lexicon.UNKNOWN, EnumConvertType.ANY, None)
if data_list is None:
logger.warn("no data for list")
return ([],)
# flat list of ALL the dynamic inputs...
data_list = [item for sublist in data_list for item in sublist]
# single operation mode -- like array node
op = parse_param(kw, Lexicon.FUNC, EnumConvertType.STRING, EnumConvertString.SPLIT.name)[0]
key = parse_param(kw, Lexicon.KEY, EnumConvertType.STRING, "")[0]
replace = parse_param(kw, Lexicon.REPLACE, EnumConvertType.STRING, "")[0]
stenst = parse_param(kw, Lexicon.RANGE, EnumConvertType.VEC3INT, [(0, -1, 1)])[0]
results = []
match EnumConvertString[op]:
case EnumConvertString.SPLIT:
results = data_list
if key != "":
results = [r.split(key) for r in data_list]
case EnumConvertString.JOIN:
results = [key.join(data_list)]
case EnumConvertString.FIND:
results = [r for r in data_list if r.find(key) > -1]
case EnumConvertString.REPLACE:
results = data_list
if key != "":
results = [r.replace(key, replace) for r in data_list]
case EnumConvertString.SLICE:
start, end, step = stenst
for x in data_list:
start = len(x) if start < 0 else min(max(0, start), len(x))
end = len(x) if end < 0 else min(max(0, end), len(x))
if step != 0:
results.append(x[start:end:step])
else:
results.append(x)
if len(results) == 0:
results = [""]
return (results,) if len(results) > 1 else (results[0],)

class SwizzleNode(JOVBaseNode):
NAME = "SWIZZLE (JOV) 😵"
CATEGORY = f"JOVIMETRIX 🔺🟩🔵/{JOV_CATEGORY}"
Expand Down
2 changes: 1 addition & 1 deletion core/compose.py
Original file line number Diff line number Diff line change
Expand Up @@ -352,7 +352,7 @@ def INPUT_TYPES(cls) -> dict:
{"default": EnumColorMatchMap.USER_MAP.name}),
Lexicon.COLORMAP: (EnumColorMap._member_names_,
{"default": EnumColorMap.HSV.name}),
Lexicon.VALUE: ("INT", {"default": 255, "mij": 0, "maj": 255}),
Lexicon.VALUE: ("INT", {"default": 255, "mij": 0, "maj": 255, "tooltips":"The number of colors to use from the LUT during the remap. Will quantize the LUT range."}),
Lexicon.FLIP: ("BOOLEAN", {"default": False}),
Lexicon.INVERT: ("BOOLEAN", {"default": False,
"tooltips": "Invert the color match output"}),
Expand Down
49 changes: 29 additions & 20 deletions core/utility.py
Original file line number Diff line number Diff line change
Expand Up @@ -530,10 +530,11 @@ def INPUT_TYPES(cls) -> dict:
d = deep_merge(d, {
"optional": {
Lexicon.QUEUE: ("STRING", {"multiline": True, "default": "./res/img/test-a.png"}),
Lexicon.VALUE: ("INT", {"mij": 0, "default": 0, "tooltips": "the current index for the current queue item"}),
Lexicon.VALUE: ("INT", {"mij": 0, "default": 0, "tooltips": "The current index for the current queue item"}),
Lexicon.WAIT: ("BOOLEAN", {"default": False, "tooltips":"Hold the item at the current queue index"}),
Lexicon.RESET: ("BOOLEAN", {"default": False, "tooltips":"reset the queue back to index 1"}),
Lexicon.BATCH: ("BOOLEAN", {"default": False, "tooltips":"load all items, if they are loadable items, i.e. batch load images from the Queue's list"}),
Lexicon.RESET: ("BOOLEAN", {"default": False, "tooltips":"Reset the queue back to index 1"}),
Lexicon.BATCH: ("BOOLEAN", {"default": False, "tooltips":"Load all items, if they are loadable items, i.e. batch load images from the Queue's list"}),
Lexicon.RECURSE: ("BOOLEAN", {"default": False}),
},
"outputs": {
0: (Lexicon.ANY_OUT, {"tooltips":"Current item selected from the Queue list"}),
Expand All @@ -546,35 +547,41 @@ def INPUT_TYPES(cls) -> dict:
return Lexicon._parse(d, cls)

@classmethod
def IS_CHANGED(cls) -> float:
def IS_CHANGED(cls, *arg, **kw) -> float:
return float("nan")

def __init__(self) -> None:
self.__formats = image_formats()
# print('formats', self.__formats)
self.__formats.extend(self.VIDEO_FORMATS)
self.__index = 0
self.__q = None
self.__index_last = None
self.__len = 0
self.__previous = None
self.__last_q_value = {}

def __parse(self, data) -> list:
def __parse(self, data: Any, recurse: bool=False) -> list:
entries = []
for line in data.strip().split('\n'):
parts = [part.strip() for part in line.split(',')]
count = 1
if len(parts) > 2:
try: count = int(parts[-1])
except: pass
if len(line) == 0:
continue

# <directory>;png,gif,jpg
parts = [part.strip() for part in line.split(';')]
data = [parts[0]]
path = Path(parts[0])
path2 = Path(ROOT / parts[0])
if path.is_dir() or path2.is_dir():
philter = parts[1].split(';') if len(parts) > 1 and isinstance(parts[1], str) else image_formats()
philter.extend(self.VIDEO_FORMATS)
path = path if path.is_dir() else path2
file_names = [file.name for file in path.iterdir() if file.is_file()]
new_data = [str(path / fname) for fname in file_names if any(fname.endswith(pat) for pat in philter)]
if path.exists() or path2.exists():
philter = parts[1].split(',') if len(parts) > 1 and isinstance(parts[1], str) else self.__formats
path = path if path.exists() else path2

if recurse:
file_names = [str(file.resolve()) for file in path.rglob('*') if file.is_file()]
else:
file_names = [str(file.resolve()) for file in path.iterdir() if file.is_file()]
new_data = [fname for fname in file_names if any(fname.endswith(pat) for pat in philter)]

if len(new_data):
data = new_data
elif path.is_file() or path2.is_file():
Expand All @@ -588,12 +595,12 @@ def __parse(self, data) -> list:
elif len(results := glob.glob(str(path2))) > 0:
data = [x.replace('\\', '/') for x in results]

if len(data) and count > 0:
if len(data):
ret = []
for x in data:
try: ret.append(float(x))
except: ret.append(x)
entries.extend(ret * count)
entries.extend(ret)
return entries

def run(self, ident, **kw) -> None:
Expand All @@ -606,7 +613,7 @@ def process(q_data: Any) -> Tuple[torch.Tensor, torch.Tensor] | str | dict:
if not os.path.isfile(q_data):
return q_data
_, ext = os.path.splitext(q_data)
if ext in image_formats():
if ext in self.__formats:
data = image_load(q_data)[0]
self.__last_q_value[q_data] = data
elif ext == '.json':
Expand All @@ -626,8 +633,10 @@ def process(q_data: Any) -> Tuple[torch.Tensor, torch.Tensor] | str | dict:
# process Q into ...
# check if folder first, file, then string.
# entry is: data, <filter if folder:*.png,*.jpg>, <repeats:1+>
print(kw)
recurse = parse_param(kw, Lexicon.RECURSE, EnumConvertType.BOOLEAN, False)[0]
q = parse_param(kw, Lexicon.QUEUE, EnumConvertType.STRING, "")[0]
self.__q = self.__parse(q)
self.__q = self.__parse(q, recurse)
self.__len = len(self.__q)
self.__index_last = 0
self.__previous = self.__q[0] if len(self.__q) else None
Expand Down
1 change: 1 addition & 0 deletions node_list.json
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@
"STEREOSCOPIC (JOV) \ud83d\udd76\ufe0f": "Simulates depth perception in images by generating stereoscopic views",
"STREAM READER (JOV) \ud83d\udcfa": "Capture frames from various sources such as URLs, cameras, monitors, windows, or Spout streams",
"STREAM WRITER (JOV) \ud83c\udf9e\ufe0f": "Sends frames to a specified route, typically for live streaming or recording purposes",
"STRINGER (JOV) \ud83e\ude80": "Manipulate strings through filtering",
"SWIZZLE (JOV) \ud83d\ude35": "Swap components between two vectors based on specified swizzle patterns and values",
"TEXT GEN (JOV) \ud83d\udcdd": "Generates images containing text based on parameters such as font, size, alignment, color, and position",
"THRESHOLD (JOV) \ud83d\udcc9": "Define a range and apply it to an image for segmentation and feature extraction",
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[project]
name = "jovimetrix"
description = "Integrates Webcam, MIDI, Spout and GLSL shader support. Animation via tick. Parameter manipulation with wave generator. Math operations with Unary and Binary support. Value conversion for all major types (int, string, list, dict, Image, Mask). Shape mask generation, image stacking and channel ops, batch splitting, merging and randomizing, load images and video from anywhere, dynamic bus routing with a single node, export support for GIPHY, save output anywhere! flatten, crop, transform; check colorblindness, make stereogram or stereoscopic images, or liner interpolate values and more."
version = "1.2.30"
version = "1.2.31"
license = { file = "LICENSE" }
dependencies = [
"aenum>=3.1.15,<4",
Expand Down
2 changes: 1 addition & 1 deletion web/nodes/array.js
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/**
* File: batcher.js
* File: array.js
* Project: Jovimetrix
*
*/
Expand Down
21 changes: 21 additions & 0 deletions web/nodes/stringer.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
/**
* File: stringer.js
* Project: Jovimetrix
*
*/

import { app } from "../../../scripts/app.js"
import { nodeAddDynamic } from '../util/util_node.js'

const _id = "STRINGER (JOV) 🪀"
const _prefix = '❔'

app.registerExtension({
name: 'jovimetrix.node.' + _id,
async beforeRegisterNodeDef(nodeType, nodeData) {
if (nodeData.name !== _id) {
return;
}
nodeType = nodeAddDynamic(nodeType, _prefix);
}
})

0 comments on commit cba185c

Please sign in to comment.