Skip to content

Commit

Permalink
array node bug fix for merge mode
Browse files Browse the repository at this point in the history
blend node wasnt carrying alpha from mask
  • Loading branch information
Amorano committed Aug 26, 2024
1 parent b8bc9ed commit c1a2e18
Show file tree
Hide file tree
Showing 3 changed files with 25 additions and 17 deletions.
1 change: 1 addition & 0 deletions core/compose.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,6 +283,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]:

func = EnumBlendType[func]
img = image_blend(pA, pB, mask, func, alpha)

mode = EnumScaleMode[mode]
if mode != EnumScaleMode.NONE:
w, h = wihi
Expand Down
37 changes: 20 additions & 17 deletions core/utility.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,18 +222,18 @@ def run(self, **kw) -> Tuple[int, list]:
mode = EnumBatchMode[mode]
if mode == EnumBatchMode.PICK:
index = index if index < len(data) else -1
results = [data[index]]
data = [data[index]]
elif mode == EnumBatchMode.SLICE:
start, end, step = slice_range
end = len(data) if end == 0 else end
results = data[start:end:step]
data = data[start:end:step]
elif mode == EnumBatchMode.RANDOM:
if self.__seed is None or self.__seed != seed:
random.seed(seed)
self.__seed = seed
if count == 0:
count = len(data)
results = random.sample(data, k=count)
data = random.sample(data, k=count)
elif mode == EnumBatchMode.INDEX_LIST:
junk = []
for x in indices.split(','):
Expand All @@ -247,34 +247,37 @@ def run(self, **kw) -> Tuple[int, list]:
junk = list(range(a, b + 1))
else:
junk = [int(x)]
results = [data[i:j+1] for i, j in zip(junk, junk)]
data = [data[i:j+1] for i, j in zip(junk, junk)]

elif mode == EnumBatchMode.CARTESIAN:
logger.warning("NOT IMPLEMENTED - CARTESIAN")

if len(results) == 0:
if len(data) == 0:
logger.warning("no data for list")
return None, 0, None, 0

if batch_chunk > 0:
results = self.batched(results, batch_chunk)
data = self.batched(data, batch_chunk)

size = len(results)
size = len(data)
if output_is_image:
_, w, h = image_by_size(results)
logger.debug(f"{w}, {h}")
results = [image_convert(i, 4) for i in results]
results = [image_matte(i, (0,0,0,0), w, h) for i in results]
results = torch.stack(results, dim=0)
size = results.shape[0]
_, w, h = image_by_size(data)
result = []
for d in data:
d = tensor2cv(d)
d = image_convert(d, 4)
d = image_matte(d, (0,0,0,0), w, h)
result.append(cv2tensor(d))
data = torch.stack([r.squeeze(0) for r in result], dim=0)
size = data.shape[0]

if count > 0:
results = results[0:count]
data = data[0:count]

if len(results) == 1:
results = results[0]
if len(data) == 1:
data = data[0]

return results, size, full_list, len(full_list)
return data, size, full_list, len(full_list)

class ExportNode(JOVBaseNode):
NAME = "EXPORT (JOV) 📽"
Expand Down
4 changes: 4 additions & 0 deletions sup/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -754,6 +754,10 @@ def image_blend(imageA: TYPE_IMAGE, imageB: TYPE_IMAGE, mask:Optional[TYPE_IMAGE
alpha = np.clip(alpha, 0, 1)
image = blendLayers(imageA, imageB, blendOp.value, alpha)
image = pil2cv(image)

if mask is not None:
image = image_mask_add(image, mask)

return image_crop_center(image, w, h)

def image_by_size(image_list: List[TYPE_IMAGE], enumSize: EnumImageBySize=EnumImageBySize.LARGEST) -> Tuple[TYPE_IMAGE, int, int]:
Expand Down

0 comments on commit c1a2e18

Please sign in to comment.