Skip to content

Commit

Permalink
queue too marked as primary
Browse files Browse the repository at this point in the history
queue marked deprecated
  • Loading branch information
Amorano committed Sep 9, 2024
1 parent d4cddce commit 34380e3
Show file tree
Hide file tree
Showing 5 changed files with 105 additions and 78 deletions.
34 changes: 13 additions & 21 deletions __init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,11 +52,8 @@
from string import Template
from typing import Any, Dict, List, Literal, Tuple

import markdown

from aiohttp import web, ClientSession
from server import PromptServer
from nodes import load_custom_node

from loguru import logger

Expand Down Expand Up @@ -783,6 +780,7 @@ def poll(cls, ident, period=0.01, timeout=3) -> Any:
if isinstance(ident, (set, list, tuple, )):
ident = ident[0]
sid = str(ident)
# logger.debug(f'sid {sid} -- {cls.MESSAGE}')
while not (sid in cls.MESSAGE) and time.monotonic() - _t < timeout:
time.sleep(period)

Expand All @@ -798,25 +796,18 @@ def comfy_message(ident:str, route:str, data:dict) -> None:

try:

@PromptServer.instance.routes.get("/jovimetrix/reload")
async def reload(request) -> web.Response:

data = {k: dir(v) for k, v in sys.modules.copy().items()}
with open('a.json', 'w') as fhandle:
json.dump(data, fhandle, indent=4)

module = importlib.import_module("Jovimetrix")
# ensure the module is reloaded
importlib.reload(module)
load_custom_node('custom_nodes/Jovimetrix')
return web.Response(text='RELOADED JOVIMETRIX')
@PromptServer.instance.routes.get("/jovimetrix/message")
async def jovimetrix_message(request) -> Any:
return web.json_response(ComfyAPIMessage.MESSAGE)

@PromptServer.instance.routes.post("/jovimetrix/message")
async def jovimetrix_message(request) -> Any:
async def jovimetrix_message_post(request) -> Any:
json_data = await request.json()
did = json_data.get("id", None)
ComfyAPIMessage.MESSAGE[str(did)] = json_data
return web.json_response()
response = web.json_response()
if (did := json_data.get("id", None)) is not None:
ComfyAPIMessage.MESSAGE[str(did)] = json_data
response = web.json_response(json_data)
return response

@PromptServer.instance.routes.get("/jovimetrix/config")
async def jovimetrix_config(request) -> Any:
Expand Down Expand Up @@ -919,8 +910,9 @@ async def jovimetrix_doc_node_comfy(request) -> Any:

def parse_reset(ident:str) -> int:
try:
data = ComfyAPIMessage.poll(ident, timeout=0.01)
return data.get('cmd', None) == 'reset'
data = ComfyAPIMessage.poll(ident, timeout=0)
ret = data.get('cmd', None)
return ret == 'reset'
except TimedOutException as e:
return -1
except Exception as e:
Expand Down
74 changes: 38 additions & 36 deletions core/utility.py
Original file line number Diff line number Diff line change
Expand Up @@ -626,7 +626,7 @@ def run(self, ident, **kw) -> Tuple[Any, List[str], str, int, int]:
self.__index = 0

if (new_val := parse_param(kw, Lexicon.VALUE, EnumConvertType.INT, self.__index)[0]) > 0:
self.__index = new_val
self.__index = new_val - 1

if self.__q is None:
# process Q into ...
Expand All @@ -644,6 +644,7 @@ def run(self, ident, **kw) -> Tuple[Any, List[str], str, int, int]:
# make sure we have more to process if are a single fire queue
stop = parse_param(kw, Lexicon.STOP, EnumConvertType.BOOLEAN, False)[0]
if stop and self.__index >= self.__len:
comfy_message(ident, "jovi-queue-done", self.status)
interrupt_processing()
return self.__previous, self.__q, self.__current, self.__index_last+1, self.__len

Expand All @@ -655,42 +656,41 @@ def run(self, ident, **kw) -> Tuple[Any, List[str], str, int, int]:
if loop == True:
self.__index %= self.__len
else:
self.__index = max(0, self.__len-1)
self.__index = min(self.__index, self.__len-1)

self.__current = self.__q[self.__index]
data = self.__previous
self.__index_last = self.__index
info = f"QUEUE #{ident} [{self.__current}] ({self.__index})"
batched = False
if wait == True:
info += f" PAUSED"
else:
if (batched := parse_param(kw, Lexicon.BATCH, EnumConvertType.BOOLEAN, False)[0]) == True:
data = []
mw, mh, mc = 0, 0, 0
if (batched := parse_param(kw, Lexicon.BATCH, EnumConvertType.BOOLEAN, False)[0]) == True:
data = []
mw, mh, mc = 0, 0, 0
pbar = ProgressBar(self.__len)
for idx in range(self.__len):
ret = self.process(self.__q[idx])
if isinstance(ret, (np.ndarray,)):
h, w, c = ret.shape
mw, mh, mc = max(mw, w), max(mh, h), max(mc, c)
data.append(ret)
pbar.update_absolute(idx)

if mw != 0 or mh != 0 or mc != 0:
ret = []
pbar = ProgressBar(self.__len)
for idx in range(self.__len):
ret = self.process(self.__q[idx])
if isinstance(ret, (np.ndarray,)):
h, w, c = ret.shape
mw, mh, mc = max(mw, w), max(mh, h), max(mc, c)
data.append(ret)
for idx, d in enumerate(data):
d = image_convert(d, mc)
d = image_matte(d, (0,0,0,0), width=mw, height=mh)
# d = cv2tensor(d)
ret.append(d)
pbar.update_absolute(idx)

if mw != 0 or mh != 0 or mc != 0:
ret = []
pbar = ProgressBar(self.__len)
for idx, d in enumerate(data):
d = image_convert(d, mc)
d = image_matte(d, (0,0,0,0), width=mw, height=mh)
# d = cv2tensor(d)
ret.append(d)
pbar.update_absolute(idx)
# data = torch.cat(ret, dim=0)
data = ret
else:
data = self.process(self.__q[self.__index])
self.__index += 1
# data = torch.cat(ret, dim=0)
data = ret
elif wait == True:
info += f" PAUSED"
else:
data = self.process(self.__q[self.__index])
self.__index += 1

self.__previous = data
comfy_message(ident, "jovi-queue-ping", self.status)
Expand All @@ -703,7 +703,7 @@ def status(self) -> dict[str, Any]:
return {
"id": self.__ident,
"c": self.__current,
"i": self.__index_last+1,
"i": self.__index_last,
"s": self.__len,
"l": self.__q
}
Expand All @@ -714,6 +714,7 @@ class QueueNode(QueueBaseNode):
DESCRIPTION = """
Manage a queue of items, such as file paths or data. Supports various formats including images, videos, text files, and JSON files. You can specify the current index for the queue item, enable pausing the queue, or reset it back to the first index. The node outputs the current item in the queue, the entire queue, the current index, and the total number of items in the queue.
"""
DEPRECATED = True

@classmethod
def INPUT_TYPES(cls) -> dict:
Expand Down Expand Up @@ -751,16 +752,16 @@ class QueueTooNode(QueueBaseNode):
@classmethod
def INPUT_TYPES(cls) -> dict:
d = super().INPUT_TYPES()
d = deep_merge(d, {
d = {
"optional": {
Lexicon.QUEUE: ("STRING", {"multiline": True, "default": "./res/img/test-a.png"}),
Lexicon.BATCH: ("BOOLEAN", {"default": False, "tooltips":"Load all items, if they are loadable items, i.e. batch load images from the Queue's list"}),
Lexicon.RECURSE: ("BOOLEAN", {"default": False}),
Lexicon.STOP: ("BOOLEAN", {"default": False, "tooltips":"When the Queue is out of items, send a `HALT` to ComfyUI."}),
Lexicon.BATCH: ("BOOLEAN", {"default": False, "tooltips":"Load all items, if they are loadable items, i.e. batch load images from the Queue's list"}),
Lexicon.VALUE: ("INT", {"mij": 0, "default": 0, "tooltips": "The current index for the current queue item"}),
Lexicon.WAIT: ("BOOLEAN", {"default": False, "tooltips":"Hold the item at the current queue index"}),
Lexicon.RESET: ("BOOLEAN", {"default": False, "tooltips":"Reset the queue back to index 1"}),
Lexicon.STOP: ("BOOLEAN", {"default": False, "tooltips":"When the Queue is out of items, send a `HALT` to ComfyUI."}),
Lexicon.LOOP: ("BOOLEAN", {"default": False, "tooltips":"If the queue should loop around the end when reached. If `False`, at the end of the Queue, if there are more iterations, it will just send the previous image."}),
Lexicon.RESET: ("BOOLEAN", {"default": False, "tooltips":"Reset the queue back to index 1"}),
#
Lexicon.MODE: (EnumScaleMode._member_names_, {"default": EnumScaleMode.MATTE.name}),
Lexicon.WH: ("VEC2INT", {"default": (512, 512), "mij":MIN_IMAGE_SIZE, "label": [Lexicon.W, Lexicon.H]}),
Expand All @@ -775,8 +776,9 @@ def INPUT_TYPES(cls) -> dict:
4: (Lexicon.INDEX, {"tooltips":"Current index for the selected item in the Queue list"}),
5: (Lexicon.TOTAL, {"tooltips":"Total items in the current Queue List"}),
6: (Lexicon.TRIGGER, {"tooltips":"Send a True signal when the queue end index is reached"}),
}
})
},
"hidden": d.get("hidden", {}),
}
return Lexicon._parse(d, cls)

def run(self, ident, **kw) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, str, int, int]:
Expand Down
4 changes: 2 additions & 2 deletions web/nodes/delay.js
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import { api } from "../../../scripts/api.js";
import { app } from "../../../scripts/app.js";
import { domShowModal } from '../util/util.js'
import { apiPost } from '../util/util_api.js'
import { apiJovimetrix } from '../util/util_api.js'
import { bubbles } from '../util/util_fun.js'

const _id = "DELAY (JOV) ✋🏽"
Expand Down Expand Up @@ -60,7 +60,7 @@ app.registerExtension({
console.error(e);
}
}
apiPost('/jovimetrix/message', { id: event.detail.id, cancel: value });
apiJovimetrix(event.detail.id, value);

showing = false;
window.bubbles_alive = false;
Expand Down
38 changes: 29 additions & 9 deletions web/nodes/queue_too.js
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ app.registerExtension({
widgetSizeModeHook(nodeType);

function update_report(self) {
self.widget_report.value = `[${self.data_index} / ${self.data_all.length}]\n${self.data_current}`;
self.widget_report.value = `[${self.data_index+1} / ${self.data_all.length}]\n${self.data_current}`;
app.canvas.setDirty(true);
}

Expand All @@ -49,22 +49,41 @@ app.registerExtension({
this.data_all = [];

const widget_queue = this.widgets.find(w => w.name === 'Q');
const widget_batch = this.widgets.find(w => w.name === 'BATCH');
const widget_value = this.widgets.find(w => w.name === 'VAL');
const widget_hold = this.widgets.find(w => w.name === '✋🏽');
const widget_reset = this.widgets.find(w => w.name === 'RESET');
const widget_batch = this.widgets.find(w => w.name === 'BATCH');
const widget_stop = this.widgets.find(w => w.name === 'STOP');
const widget_loop = this.widgets.find(w => w.name === '🔄');
widget_batch.callback = async() => {
widget_batch.callback = () => {
widgetHide(this, widget_value);
widgetHide(this, widget_hold);
widgetHide(this, widget_reset);
widgetHide(this, widget_stop);
widgetHide(this, widget_loop);

if (!widget_batch.value) {
widgetHide(this, widget_reset);
if (widget_batch.value == false) {
widgetShow(widget_value);
widgetShow(widget_hold);
widgetShow(widget_reset);
if (widget_hold.value == false) {
widgetShow(widget_stop);
widgetShow(widget_loop);
widgetShow(widget_reset);
}
}
nodeFitHeight(this);
}

widget_hold.callback = () => {
if (widget_batch.value == true) {
return;
}
widgetHide(this, widget_stop);
widgetHide(this, widget_loop);
widgetHide(this, widget_reset);
if (widget_hold.value == false) {
widgetShow(widget_stop);
widgetShow(widget_loop);
widgetShow(widget_reset);
}
nodeFitHeight(this);
}
Expand All @@ -74,7 +93,7 @@ app.registerExtension({
update_list(self, value);
});

widget_reset.callback = async() => {
widget_reset.callback = () => {
widget_reset.value = false;
apiJovimetrix(self.id, "reset");
}
Expand Down Expand Up @@ -113,7 +132,8 @@ app.registerExtension({
api.removeEventListener(EVENT_JOVI_DONE, python_queue_done);
};

setTimeout(() => { widget_batch.callback(); }, 10);
setTimeout(() => { widget_hold.callback(); }, 5);
setTimeout(() => { widget_batch.callback(); }, 5);
return me;
}

Expand Down
33 changes: 23 additions & 10 deletions web/util/util_api.js
Original file line number Diff line number Diff line change
Expand Up @@ -22,14 +22,27 @@ export async function apiPost(url, data) {
}

export async function apiJovimetrix(id, cmd) {
return api.fetchApi('/jovimetrix/message', {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
id: id,
cmd: cmd
}),
})
try {
const response = await api.fetchApi('/jovimetrix/message', {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
id: id,
cmd: cmd
}),
});

if (!response.ok) {
throw new Error(`Error: ${response.status} - ${response.statusText}`);
}
console.debug(response);
return response;

} catch (error) {
console.error("API call to Jovimetrix failed:", error);
throw error; // or return { success: false, message: error.message }
}
}

0 comments on commit 34380e3

Please sign in to comment.