From a485af908287d4ab20c325051bf1f9b1921e734f Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 9 Mar 2024 20:23:51 +0530 Subject: [PATCH 01/43] audio fix --- ui_components/components/new_project_page.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/ui_components/components/new_project_page.py b/ui_components/components/new_project_page.py index df25b66d..d8be1e14 100644 --- a/ui_components/components/new_project_page.py +++ b/ui_components/components/new_project_page.py @@ -67,14 +67,14 @@ def new_project_page(): initial_frame = data_repo.get_timing_from_frame_number(shot.uuid, 0) data_repo.delete_timing_from_uuid(initial_frame.uuid) - if uploaded_audio: - try: - if save_audio_file(uploaded_audio, new_project.uuid): - st.success("Audio file saved and attached successfully.") - else: - st.error("Failed to save and attach the audio file.") - except Exception as e: - st.error(f"Failed to save the uploaded audio due to {str(e)}") + # if uploaded_audio: + # try: + # if save_audio_file(uploaded_audio, new_project.uuid): + # st.success("Audio file saved and attached successfully.") + # else: + # st.error("Failed to save and attach the audio file.") + # except Exception as e: + # st.error(f"Failed to save the uploaded audio due to {str(e)}") reset_project_state() From 14be942a3d73013c7e2b100d43df46d371185259 Mon Sep 17 00:00:00 2001 From: peteromallet Date: Tue, 12 Mar 2024 18:26:39 +0100 Subject: [PATCH 02/43] Aesthetic fixes --- ui_components/components/adjust_shot_page.py | 8 ++++---- ui_components/components/explorer_page.py | 4 ++-- ui_components/components/frame_styling_page.py | 5 +++-- ui_components/components/timeline_view_page.py | 9 ++++----- ui_components/widgets/animation_style_element.py | 11 +++++------ ui_components/widgets/shot_view.py | 2 +- ui_components/widgets/variant_comparison_grid.py | 6 +++--- utils/local_storage/url_storage.py | 2 +- 8 files changed, 23 insertions(+), 24 deletions(-) diff --git a/ui_components/components/adjust_shot_page.py b/ui_components/components/adjust_shot_page.py index 13edfb0a..b0ecab03 100644 --- a/ui_components/components/adjust_shot_page.py +++ b/ui_components/components/adjust_shot_page.py @@ -40,14 +40,14 @@ def adjust_shot_page(shot_uuid: str, h2): with column1: - st.markdown(f"### 🎬 '{shot.name}' frames _________") - st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") + st.markdown(f"### 🎬 '{shot.name}' frames") + st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") shot_keyframe_element(st.session_state["shot_uuid"], 4, column2, position="Individual") # sparkle emoji with Generate Frames - st.markdown("### ✨ Generate frames _________") - st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") + st.markdown("### ✨ Generate frames") + st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") generate_images_element(position='explorer', project_uuid=shot.project.uuid, timing_uuid=None, shot_uuid=shot.uuid) diff --git a/ui_components/components/explorer_page.py b/ui_components/components/explorer_page.py index 55afffed..fd5a4434 100644 --- a/ui_components/components/explorer_page.py +++ b/ui_components/components/explorer_page.py @@ -371,8 +371,8 @@ def gallery_image_view(project_uuid, shortlist=False, view=["main"], shot=None, if shortlist is False: st.markdown("***") - st.markdown("### 🖼️ Gallery _________") - st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") + st.markdown("### 🖼️ Gallery") + st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") h1,h2,h3,h4 = st.columns([3, 1, 1, 1]) # by default only showing explorer views diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index aa96414a..f1233c72 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -75,8 +75,8 @@ def frame_styling_page(shot_uuid: str): inpainting_element(options_width, timing.primary_image.location, position=f"{timing_uuid}") with options_width: - how_many_images = st.slider("How many images to generate", 1, 10, 1, key=f"how_many_images_{timing_uuid}") - if st.button("Generate inpainted image", key=f"generate_inpaint_{timing_uuid}"): + how_many_images = st.slider("How many images to generate:", 1, 10, 1, key=f"how_many_images_{timing_uuid}") + if st.button("Generate", key=f"generate_inpaint_{timing_uuid}"): if ("mask_to_use" in st.session_state and st.session_state["mask_to_use"]): for _ in range(how_many_images): # Loop based on how_many_images project_settings = data_repo.get_project_setting(shot.project.uuid) @@ -110,6 +110,7 @@ def frame_styling_page(shot_uuid: str): } process_inference_output(**inference_data) + st.rerun() else: st.error("Please create and save a mask before generation") time.sleep(0.7) diff --git a/ui_components/components/timeline_view_page.py b/ui_components/components/timeline_view_page.py index 94f51149..9de73a54 100644 --- a/ui_components/components/timeline_view_page.py +++ b/ui_components/components/timeline_view_page.py @@ -36,15 +36,14 @@ def timeline_view_page(shot_uuid: str, h2): st.markdown("***") slider1, slider2 = st.columns([4,1]) with slider1: - st.markdown(f"### 🪄 '{project.name}' timeline _________") - st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") - + st.markdown(f"### 🪄 '{project.name}' timeline") + st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") timeline_view(st.session_state["shot_uuid"], st.session_state['view']) - st.markdown("### ✨ Generate frames _________") - st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") + st.markdown("### ✨ Generate frames") + st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") generate_images_element(position='explorer', project_uuid=project_uuid, timing_uuid=None, shot_uuid=None) gallery_image_view(project_uuid,False,view=['add_and_remove_from_shortlist','view_inference_details','shot_chooser','add_to_any_shot']) \ No newline at end of file diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index 48ef0ae4..f4ba6274 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -38,8 +38,8 @@ def animation_style_element(shot_uuid): 'animation_tool': AnimationToolType.ANIMATEDIFF.value, } - st.markdown("### 🎥 Generate animations _________") - st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") + st.markdown("### 🎥 Generate animations") + st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") with st.container(): advanced1, advanced2, advanced3 = st.columns([1.0,1.5, 1.0]) @@ -100,7 +100,9 @@ def animation_style_element(shot_uuid): with grid[2*j]: # Adjust the index for image column timing = timing_list[idx] if timing.primary_image and timing.primary_image.location: + st.info(f"**Frame {idx + 1}**") + st.image(timing.primary_image.location, use_column_width=True) # settings control @@ -127,10 +129,7 @@ def animation_style_element(shot_uuid): # distance, speed and freedom settings (also aggregates them into arrays) with grid[2*j+1]: # Add the new column after the image column if idx < len(timing_list) - 1: - st.write("") - st.write("") - st.write("") - st.write("") + # if st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}'] is a int, make it a float if isinstance(st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}'], int): st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}'] = float(st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}']) diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index 772a6fd6..ba8d2561 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -74,7 +74,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None,position="Timeli if idx == len(timing_list): if position != "Timeline": - st.info("**Add new frame(s) to shot**") + # st.info("**Add new frame(s) to shot**") add_key_frame_section(shot_uuid, False) else: diff --git a/ui_components/widgets/variant_comparison_grid.py b/ui_components/widgets/variant_comparison_grid.py index 5d12cad7..9d26b495 100644 --- a/ui_components/widgets/variant_comparison_grid.py +++ b/ui_components/widgets/variant_comparison_grid.py @@ -39,13 +39,13 @@ def variant_comparison_grid(ele_uuid, stage=CreativeProcessType.MOTION.value): shot_uuid = timing.shot.uuid timing_list ="" - col1, col2, col3 = st.columns([1, 1,0.5]) + col1, col2, col3 = st.columns([1, 0.25,0.5]) if stage == CreativeProcessType.MOTION.value: items_to_show = 2 num_columns = 3 with col1: - st.markdown(f"### 🎞️ '{shot.name}' options _________") - st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") + st.markdown(f"### 🎞️ '{shot.name}' options") + st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") else: items_to_show = 5 num_columns = 3 diff --git a/utils/local_storage/url_storage.py b/utils/local_storage/url_storage.py index 950921a8..02552b17 100644 --- a/utils/local_storage/url_storage.py +++ b/utils/local_storage/url_storage.py @@ -3,7 +3,7 @@ def get_url_param(key): - params = st.experimental_get_query_params() + params = st.query_params.get_all() val = params.get(key) if isinstance(val, list): res = val[0] From 41e462f3980bd608a32769724f0d5856e62934b7 Mon Sep 17 00:00:00 2001 From: peteromallet Date: Tue, 12 Mar 2024 18:40:54 +0100 Subject: [PATCH 03/43] Aesthetic fixes --- utils/local_storage/url_storage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/local_storage/url_storage.py b/utils/local_storage/url_storage.py index 02552b17..950921a8 100644 --- a/utils/local_storage/url_storage.py +++ b/utils/local_storage/url_storage.py @@ -3,7 +3,7 @@ def get_url_param(key): - params = st.query_params.get_all() + params = st.experimental_get_query_params() val = params.get(key) if isinstance(val, list): res = val[0] From c3ea8c883787418038943028f062ff3c2440669b Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Wed, 13 Mar 2024 10:47:20 +0000 Subject: [PATCH 04/43] windows setup fix --- windows_setup.bat | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/windows_setup.bat b/windows_setup.bat index 858b9e87..edc27258 100644 --- a/windows_setup.bat +++ b/windows_setup.bat @@ -1,7 +1,8 @@ @echo off set "folderName=Dough" +for %%I in ("%~dp0.") do set ParentFolderName=%%~nxI if not exist "%folderName%\" ( - if /i not "%CD%"=="%~dp0%folderName%\" ( + if not "%folderName%"=="%ParentFolderName%" ( git clone --depth 1 -b main https://github.com/banodoco/Dough.git cd Dough git clone --depth 1 -b feature/package https://github.com/piyushK52/comfy_runner.git @@ -14,6 +15,9 @@ if not exist "%folderName%\" ( pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 pip install -r comfy_runner\requirements.txt pip install -r ComfyUI\requirements.txt + powershell -Command "(New-Object Net.WebClient).DownloadFile('https://github.com/Gourieff/Assets/raw/main/Insightface/insightface-0.7.3-cp310-cp310-win_amd64.whl', 'insightface-0.7.3-cp310-cp310-win_amd64.whl')" + pip install insightface-0.7.3-cp310-cp310-win_amd64.whl + del insightface-0.7.3-cp310-cp310-win_amd64.whl call dough-env\Scripts\deactivate.bat copy .env.sample .env cd .. From 2e5b946e78b685f751ec3538ab9f8cce66e22db0 Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Wed, 13 Mar 2024 12:12:49 +0000 Subject: [PATCH 05/43] shortlist fix --- ui_components/components/adjust_shot_page.py | 3 --- ui_components/widgets/add_key_frame_element.py | 4 ++-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/ui_components/components/adjust_shot_page.py b/ui_components/components/adjust_shot_page.py index b0ecab03..4f346cbf 100644 --- a/ui_components/components/adjust_shot_page.py +++ b/ui_components/components/adjust_shot_page.py @@ -28,9 +28,6 @@ def adjust_shot_page(shot_uuid: str, h2): st.write("") with st.expander("📋 Explorer shortlist",expanded=True): if st_memory.toggle("Open", value=True, key="explorer_shortlist_toggle"): - project_setting = data_repo.get_project_setting(shot.project.uuid) - number_of_pages = project_setting.total_shortlist_gallery_pages - page_number = 0 gallery_image_view(shot.project.uuid, shortlist=True,view=['add_and_remove_from_shortlist','add_to_this_shot'], shot=shot, sidebar=True) st.markdown(f"#### :green[{st.session_state['main_view_type']}] > :red[{st.session_state['page']}] > :blue[{shot.name}]") diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index 4dda46b3..db814a46 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -119,8 +119,8 @@ def add_key_frame(selected_image: Union[Image.Image, InternalFileObject], inheri st.session_state['current_frame_index'] = min(len(timing_list), target_aux_frame_index + 1) st.session_state['current_frame_uuid'] = timing_list[st.session_state['current_frame_index'] - 1].uuid - st.session_state['current_subpage'] = AppSubPage.KEYFRAME.value - st.session_state['section_index'] = 0 + # st.session_state['current_subpage'] = AppSubPage.KEYFRAME.value + # st.session_state['section_index'] = 0 if refresh_state: refresh_app(maintain_state=True) \ No newline at end of file From ec35a812d42f63922fbd965109f70c08963b3248 Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Wed, 13 Mar 2024 13:31:25 +0000 Subject: [PATCH 06/43] add key frame sped up --- backend/models.py | 4 ++ ui_components/components/explorer_page.py | 15 +++---- .../components/timeline_view_page.py | 12 +++-- ui_components/methods/common_methods.py | 38 +++------------- .../widgets/add_key_frame_element.py | 45 +++++++------------ .../widgets/frame_style_clone_element.py | 41 ----------------- ui_components/widgets/shot_view.py | 25 +++-------- .../widgets/variant_comparison_grid.py | 2 +- 8 files changed, 45 insertions(+), 137 deletions(-) delete mode 100644 ui_components/widgets/frame_style_clone_element.py diff --git a/backend/models.py b/backend/models.py index e103d6c1..250ccfad 100644 --- a/backend/models.py +++ b/backend/models.py @@ -321,6 +321,10 @@ def save(self, *args, **kwargs): self.aux_frame_index = new_index timing_list.update(aux_frame_index=F('aux_frame_index') - 1) + # --------------- adding alternative images ---------- + if not (self.alternative_images and len(self.alternative_images)) and self.primary_image: + self.alternative_images = json.dumps([str(self.primary_image.uuid)]) + super().save(*args, **kwargs) diff --git a/ui_components/components/explorer_page.py b/ui_components/components/explorer_page.py index fd5a4434..a2ddc00f 100644 --- a/ui_components/components/explorer_page.py +++ b/ui_components/components/explorer_page.py @@ -483,11 +483,7 @@ def gallery_image_view(project_uuid, shortlist=False, view=["main"], shot=None, button_text = "Pull new images" else: st.info(f"###### {explorer_stats['pending_image_count']} images pending generation and {explorer_stats['temp_image_count']} ready to be fetched") - button_text = "Check for/pull new images" - - # st.info(f"###### {total_number_pending} images pending generation") - # st.info(f"###### {explorer_stats['temp_image_count']} new images generated") - # st.info(f"###### {explorer_stats['pending_image_count']} images pending generation") + button_text = "Check for/pull new images" with fetch3: if st.button(f"{button_text}", key=f"check_for_new_images_", use_container_width=True): @@ -523,7 +519,7 @@ def gallery_image_view(project_uuid, shortlist=False, view=["main"], shot=None, shot_name = st.text_input("New shot name:", max_chars=40, key=f"shot_name_{gallery_image_list[i+j].uuid}") if st.button("Create new shot", key=f"create_new_{gallery_image_list[i + j].uuid}", use_container_width=True): new_shot = add_new_shot(project_uuid, name=shot_name) - add_key_frame(gallery_image_list[i + j], False, new_shot.uuid, len(data_repo.get_timing_list_from_shot(new_shot.uuid)), refresh_state=False) + add_key_frame(gallery_image_list[i + j], new_shot.uuid, len(data_repo.get_timing_list_from_shot(new_shot.uuid)), refresh_state=False) # removing this from the gallery view data_repo.update_file(gallery_image_list[i + j].uuid, tag="") st.rerun() @@ -534,12 +530,11 @@ def gallery_image_view(project_uuid, shortlist=False, view=["main"], shot=None, st.session_state["last_shot_number"] = shot_number shot_uuid = shot_list[shot_number].uuid - add_key_frame(gallery_image_list[i + j], False, shot_uuid, len(data_repo.get_timing_list_from_shot(shot_uuid)), refresh_state=False, update_cur_frame_idx=False) + add_key_frame(gallery_image_list[i + j], shot_uuid, len(data_repo.get_timing_list_from_shot(shot_uuid)), refresh_state=False, update_cur_frame_idx=False) # removing this from the gallery view data_repo.update_file(gallery_image_list[i + j].uuid, tag="") - refresh_app(maintain_state=True) - # else: - # st.error("The image is truncated and cannot be displayed.") + refresh_app(maintain_state=True) + if 'add_and_remove_from_shortlist' in view: if shortlist: if st.button("Remove from shortlist ➖", key=f"shortlist_{gallery_image_list[i + j].uuid}",use_container_width=True): diff --git a/ui_components/components/timeline_view_page.py b/ui_components/components/timeline_view_page.py index 9de73a54..e6cf0586 100644 --- a/ui_components/components/timeline_view_page.py +++ b/ui_components/components/timeline_view_page.py @@ -1,3 +1,4 @@ +import time import streamlit as st from ui_components.constants import CreativeProcessType from ui_components.widgets.timeline_view import timeline_view @@ -39,11 +40,16 @@ def timeline_view_page(shot_uuid: str, h2): st.markdown(f"### 🪄 '{project.name}' timeline") st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") + # start_time = time.time() timeline_view(st.session_state["shot_uuid"], st.session_state['view']) - - st.markdown("### ✨ Generate frames") st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") + # end_time = time.time() + # print("///////////////// timeline laoded in: ", end_time - start_time) generate_images_element(position='explorer', project_uuid=project_uuid, timing_uuid=None, shot_uuid=None) - gallery_image_view(project_uuid,False,view=['add_and_remove_from_shortlist','view_inference_details','shot_chooser','add_to_any_shot']) \ No newline at end of file + # end_time = time.time() + # print("///////////////// generate img laoded in: ", end_time - start_time) + gallery_image_view(project_uuid,False,view=['add_and_remove_from_shortlist','view_inference_details','shot_chooser','add_to_any_shot']) + # end_time = time.time() + # print("///////////////// gallery laoded in: ", end_time - start_time) \ No newline at end of file diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index b1810b9d..e7163755 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -2,7 +2,7 @@ import random from typing import List import os -from PIL import Image, ImageDraw, ImageOps, ImageFilter +from PIL import Image, ImageDraw, ImageFilter from moviepy.editor import * import cv2 import requests as r @@ -12,15 +12,14 @@ import uuid from io import BytesIO import numpy as np -import urllib3 -from shared.constants import OFFLINE_MODE, SERVER, InferenceType, InternalFileTag, InternalFileType, ProjectMetaData, ServerType +from shared.constants import OFFLINE_MODE, SERVER, InferenceType, InternalFileTag, InternalFileType, ProjectMetaData from pydub import AudioSegment from backend.models import InternalFileObject from shared.logging.constants import LoggingType from shared.logging.logging import AppLogger -from ui_components.constants import SECOND_MASK_FILE, SECOND_MASK_FILE_PATH, WorkflowStageType -from ui_components.methods.file_methods import add_temp_file_to_project, convert_bytes_to_file, generate_pil_image, generate_temp_file, save_or_host_file, save_or_host_file_bytes -from ui_components.methods.video_methods import sync_audio_and_duration, update_speed_of_video_clip +from ui_components.constants import SECOND_MASK_FILE, WorkflowStageType +from ui_components.methods.file_methods import convert_bytes_to_file, generate_pil_image, generate_temp_file, save_or_host_file, save_or_host_file_bytes +from ui_components.methods.video_methods import sync_audio_and_duration from ui_components.models import InternalFrameTimingObject, InternalSettingObject from utils.common_utils import acquire_lock, release_lock from utils.data_repo.data_repo import DataRepo @@ -29,35 +28,8 @@ from ui_components.models import InternalFileObject from typing import Union -from utils.media_processor.video import VideoProcessor - - -def clone_styling_settings(source_frame_number, target_frame_uuid): - data_repo = DataRepo() - target_timing = data_repo.get_timing_from_uuid(target_frame_uuid) - timing_list = data_repo.get_timing_list_from_shot( - target_timing.shot.uuid) - - source_timing = timing_list[source_frame_number] - params = source_timing.primary_image.inference_params - - if params: - target_timing.prompt = params['prompt'] if 'prompt' in params else source_timing.prompt - target_timing.negative_prompt = params['negative_prompt'] if 'negative_prompt' in params else source_timing.negative_prompt - target_timing.guidance_scale = params['guidance_scale'] if 'guidance_scale' in params else source_timing.guidance_scale - target_timing.seed = params['seed'] if 'seed' in params else source_timing.seed - target_timing.num_inference_steps = params['num_inference_steps'] if 'num_inference_steps' in params else source_timing.num_inference_steps - target_timing.strength = params['strength'] if 'strength' in params else source_timing.strength - target_timing.adapter_type = params['adapter_type'] if 'adapter_type' in params else source_timing.adapter_type - target_timing.low_threshold = params['low_threshold'] if 'low_threshold' in params else source_timing.low_threshold - target_timing.high_threshold = params['high_threshold'] if 'high_threshold' in params else source_timing.high_threshold - - if 'model_uuid' in params and params['model_uuid']: - model = data_repo.get_ai_model_from_uuid(params['model_uuid']) - target_timing.model = model - # TODO: image format is assumed to be PNG, change this later def save_new_image(img: Union[Image.Image, str, np.ndarray, io.BytesIO], project_uuid) -> InternalFileObject: ''' diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index db814a46..ed20feb3 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -1,27 +1,19 @@ import time from typing import Union import streamlit as st -from shared.constants import AnimationStyleType, AppSubPage -from ui_components.constants import CreativeProcessType, WorkflowStageType +from shared.constants import AnimationStyleType from ui_components.models import InternalFileObject, InternalFrameTimingObject -from ui_components.widgets.image_zoom_widgets import zoom_inputs - -from utils import st_memory from utils.common_utils import refresh_app - from utils.data_repo.data_repo import DataRepo - -from utils.constants import ImageStage from ui_components.methods.file_methods import generate_pil_image,save_or_host_file -from ui_components.methods.common_methods import add_image_variant, apply_image_transformations, clone_styling_settings, create_frame_inside_shot, save_new_image, save_uploaded_image +from ui_components.methods.common_methods import add_image_variant, save_new_image from PIL import Image -def add_key_frame_section(shot_uuid, individual_view=True): +def add_key_frame_section(shot_uuid): data_repo = DataRepo() - shot = data_repo.get_shot_from_uuid(shot_uuid) - timing_list = data_repo.get_timing_list_from_shot(shot_uuid) + shot = data_repo.get_shot_from_uuid(shot_uuid) selected_image_location = "" uploaded_images = st.file_uploader("Upload images:", type=["png", "jpg", "jpeg"], key=f"uploaded_image_{shot_uuid}", help="You can upload multiple images", accept_multiple_files=True) @@ -35,7 +27,7 @@ def add_key_frame_section(shot_uuid, individual_view=True): file_location = f"videos/{shot.uuid}/assets/frames/1_selected/{uploaded_image.name}" selected_image_location = save_or_host_file(image, file_location) selected_image_location = selected_image_location or file_location - add_key_frame(selected_image_location, "No", shot_uuid,refresh_state=False) + add_key_frame(selected_image_location, shot_uuid,refresh_state=False) progress_bar.progress((i + 1) / len(uploaded_images)) else: st.error("Please generate new images or upload them") @@ -67,7 +59,7 @@ def add_key_frame_element(shot_uuid): return selected_image, inherit_styling_settings -def add_key_frame(selected_image: Union[Image.Image, InternalFileObject], inherit_styling_settings, shot_uuid, target_frame_position=None, refresh_state=True, update_cur_frame_idx=True): +def add_key_frame(selected_image: Union[Image.Image, InternalFileObject], shot_uuid, target_frame_position=None, refresh_state=True, update_cur_frame_idx=True): ''' either a pil image or a internalfileobject can be passed to this method, for adding it inside a shot ''' @@ -75,12 +67,11 @@ def add_key_frame(selected_image: Union[Image.Image, InternalFileObject], inheri timing_list = data_repo.get_timing_list_from_shot(shot_uuid) # checking if the shot has reached the max frame limit - shot = data_repo.get_shot_from_uuid(shot_uuid) - project_settings = data_repo.get_project_setting(shot.project.uuid) - if len(shot.timing_list) >= project_settings.max_frames_per_shot: - st.error(f'Only {project_settings.max_frames_per_shot} frames allowed per shot') - time.sleep(0.3) - st.rerun() + # project_settings = data_repo.get_project_setting(shot.project.uuid) + # if len(shot.timing_list) >= project_settings.max_frames_per_shot: + # st.error(f'Only {project_settings.max_frames_per_shot} frames allowed per shot') + # time.sleep(0.3) + # st.rerun() # creating frame inside the shot at target_frame_position len_shot_timing_list = len(timing_list) if len(timing_list) > 0 else 0 @@ -89,10 +80,9 @@ def add_key_frame(selected_image: Union[Image.Image, InternalFileObject], inheri if isinstance(selected_image, InternalFileObject): saved_image = selected_image - print("selected_image is an instance of InternalFileObject") else: + shot = data_repo.get_shot_from_uuid(shot_uuid) saved_image = save_new_image(selected_image, shot.project.uuid) - print("selected_image is an instance of Image.Image") timing_data = { "shot_id": shot_uuid, @@ -101,16 +91,11 @@ def add_key_frame(selected_image: Union[Image.Image, InternalFileObject], inheri "source_image_id": saved_image.uuid, "primary_image_id": saved_image.uuid, } - timing: InternalFrameTimingObject = data_repo.create_timing(**timing_data) - - add_image_variant(saved_image.uuid, timing.uuid) + _: InternalFrameTimingObject = data_repo.create_timing(**timing_data) - timing_list = data_repo.get_timing_list_from_shot(shot_uuid) if update_cur_frame_idx: + timing_list = data_repo.get_timing_list_from_shot(shot_uuid) # this part of code updates current_frame_index when a new keyframe is added - if inherit_styling_settings == "Yes" and st.session_state['current_frame_index']: - clone_styling_settings(st.session_state['current_frame_index'] - 1, timing_list[target_aux_frame_index-1].uuid) - if len(timing_list) <= 1: st.session_state['current_frame_index'] = 1 st.session_state['current_frame_uuid'] = timing_list[0].uuid @@ -121,6 +106,6 @@ def add_key_frame(selected_image: Union[Image.Image, InternalFileObject], inheri # st.session_state['current_subpage'] = AppSubPage.KEYFRAME.value # st.session_state['section_index'] = 0 - + if refresh_state: refresh_app(maintain_state=True) \ No newline at end of file diff --git a/ui_components/widgets/frame_style_clone_element.py b/ui_components/widgets/frame_style_clone_element.py deleted file mode 100644 index 0f2c5371..00000000 --- a/ui_components/widgets/frame_style_clone_element.py +++ /dev/null @@ -1,41 +0,0 @@ -import streamlit as st -from shared.constants import AIModelCategory -from ui_components.constants import WorkflowStageType -from ui_components.methods.common_methods import clone_styling_settings -from ui_components.models import InternalAIModelObject -from ui_components.widgets.image_carousal import display_image -from utils.common_utils import reset_styling_settings - -from utils.data_repo.data_repo import DataRepo - -def style_cloning_element(timing_details): - open_copier = st.checkbox("Copy styling settings from another frame") - if open_copier is True: - copy1, copy2 = st.columns([1, 1]) - with copy1: - frame_index = st.number_input("Which frame would you like to copy styling settings from?", min_value=1, max_value=len( - timing_details), value=st.session_state['current_frame_index'], step=1) - if st.button("Copy styling settings from this frame"): - clone_styling_settings(frame_index - 1, st.session_state['current_frame_uuid']) - reset_styling_settings(st.session_state['current_frame_uuid']) - st.rerun() - - with copy2: - display_image(timing_details[frame_index - 1].uuid, stage=WorkflowStageType.STYLED.value, clickable=False) - - if timing_details[frame_index - 1].primary_image.inference_params: - st.text("Prompt: ") - st.caption(timing_details[frame_index - 1].primary_image.inference_params.prompt) - st.text("Negative Prompt: ") - st.caption(timing_details[frame_index - 1].primary_image.inference_params.negative_prompt) - - if timing_details[frame_index - 1].primary_image.inference_params.model_uuid: - data_repo = DataRepo() - model: InternalAIModelObject = data_repo.get_ai_model_from_uuid(timing_details[frame_index - 1].primary_image.inference_params.model_uuid) - - st.text("Model:") - st.caption(model.name) - - if model.category.lower() == AIModelCategory.CONTROLNET.value: - st.text("Adapter Type:") - st.caption(timing_details[frame_index - 1].primary_image.inference_params.adapter_type) \ No newline at end of file diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index ba8d2561..46313945 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -26,19 +26,13 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None,position="Timeli if "open_shot" not in st.session_state: st.session_state["open_shot"] = None - timing_list: List[InternalFrameTimingObject] = shot.timing_list - if position == "Timeline": - header_col_0, header_col_1, header_col_2, header_col_3 = st.columns([2,1,1.5,0.5]) - + with header_col_0: - update_shot_name(shot.uuid) - - # with header_col_1: - # update_shot_duration(shot.uuid) + update_shot_name(shot.uuid) with header_col_2: st.write("") @@ -73,10 +67,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None,position="Timeli with grid[j]: if idx == len(timing_list): if position != "Timeline": - - # st.info("**Add new frame(s) to shot**") - add_key_frame_section(shot_uuid, False) - + add_key_frame_section(shot_uuid) else: timing = timing_list[idx] if timing.primary_image and timing.primary_image.location: @@ -88,17 +79,13 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None,position="Timeli timeline_view_buttons(idx, shot_uuid, copy_frame_toggle, move_frames_toggle,delete_frames_toggle, change_shot_toggle, shift_frame_toggle) if (i < len(timing_list) - 1) or (st.session_state["open_shot"] == shot.uuid) or (len(timing_list) % items_per_row != 0 and st.session_state["open_shot"] != shot.uuid) or len(timing_list) % items_per_row == 0: st.markdown("***") - # st.markdown("***") - if position == "Timeline": - # st.markdown("***") + if position == "Timeline": bottom1, bottom2, bottom3, bottom4,_ = st.columns([1,1,1,1,2]) with bottom1: delete_shot_button(shot.uuid) - with bottom2: - duplicate_shot_button(shot.uuid) - + duplicate_shot_button(shot.uuid) with bottom3: move_shot_buttons(shot, "up") @@ -319,7 +306,7 @@ def timeline_view_buttons(idx, shot_uuid, copy_frame_toggle, move_frames_toggle, with btn3: if st.button("🔁", key=f"copy_frame_{timing_list[idx].uuid}", use_container_width=True): pil_image = generate_pil_image(timing_list[idx].primary_image.location) - add_key_frame(pil_image, False, st.session_state['shot_uuid'], timing_list[idx].aux_frame_index+1, refresh_state=False) + add_key_frame(pil_image, st.session_state['shot_uuid'], timing_list[idx].aux_frame_index+1, refresh_state=False) refresh_app(maintain_state=True) if delete_frames_toggle: diff --git a/ui_components/widgets/variant_comparison_grid.py b/ui_components/widgets/variant_comparison_grid.py index 9d26b495..ee873d6a 100644 --- a/ui_components/widgets/variant_comparison_grid.py +++ b/ui_components/widgets/variant_comparison_grid.py @@ -310,5 +310,5 @@ def add_variant_to_shot_element(file: InternalFileObject, project_uuid): shot_uuid = shot_list[shot_number].uuid duplicate_file = create_duplicate_file(file, project_uuid) - add_key_frame(duplicate_file, False, shot_uuid, len(data_repo.get_timing_list_from_shot(shot_uuid)), refresh_state=False, update_cur_frame_idx=False) + add_key_frame(duplicate_file, shot_uuid, len(data_repo.get_timing_list_from_shot(shot_uuid)), refresh_state=False, update_cur_frame_idx=False) st.rerun() From 65399b1ce8aea2eee6da61555ff5047c32e0fcaf Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Wed, 13 Mar 2024 16:29:01 +0000 Subject: [PATCH 07/43] img inf added --- ui_components/components/adjust_shot_page.py | 5 ----- ui_components/widgets/shot_view.py | 9 ++++----- ui_components/widgets/variant_comparison_grid.py | 2 +- 3 files changed, 5 insertions(+), 11 deletions(-) diff --git a/ui_components/components/adjust_shot_page.py b/ui_components/components/adjust_shot_page.py index 4f346cbf..e6f22799 100644 --- a/ui_components/components/adjust_shot_page.py +++ b/ui_components/components/adjust_shot_page.py @@ -35,18 +35,13 @@ def adjust_shot_page(shot_uuid: str, h2): column1, column2 = st.columns([0.8,1.35]) with column1: - - st.markdown(f"### 🎬 '{shot.name}' frames") st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") - shot_keyframe_element(st.session_state["shot_uuid"], 4, column2, position="Individual") # sparkle emoji with Generate Frames st.markdown("### ✨ Generate frames") st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") - - generate_images_element(position='explorer', project_uuid=shot.project.uuid, timing_uuid=None, shot_uuid=shot.uuid) # st.markdown("***") diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index 46313945..e734bf63 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -20,7 +20,9 @@ from utils.data_repo.data_repo import DataRepo from utils import st_memory -def shot_keyframe_element(shot_uuid, items_per_row, column=None,position="Timeline",**kwargs): +def shot_keyframe_element(shot_uuid, items_per_row, column=None, position="Timeline", **kwargs): + from ui_components.widgets.variant_comparison_grid import image_variant_details + data_repo = DataRepo() shot: InternalShotObject = data_repo.get_shot_from_uuid(shot_uuid) @@ -30,10 +32,8 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None,position="Timeli timing_list: List[InternalFrameTimingObject] = shot.timing_list if position == "Timeline": header_col_0, header_col_1, header_col_2, header_col_3 = st.columns([2,1,1.5,0.5]) - with header_col_0: update_shot_name(shot.uuid) - with header_col_2: st.write("") shot_adjustment_button(shot, show_label=True) @@ -44,7 +44,6 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None,position="Timeli else: with column: col1, col2, col3, col4, col5 = st.columns([1,1,1,1,1]) - with col1: delete_frames_toggle = st_memory.toggle("Delete Frames", value=True, key="delete_frames_toggle") with col2: @@ -57,7 +56,6 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None,position="Timeli shift_frame_toggle = st_memory.toggle("Shift Frames", value=False, key="shift_frame_toggle") st.markdown("***") - for i in range(0, len(timing_list) + 1, items_per_row): with st.container(): grid = st.columns(items_per_row) @@ -72,6 +70,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None,position="Timeli timing = timing_list[idx] if timing.primary_image and timing.primary_image.location: st.image(timing.primary_image.location, use_column_width=True) + image_variant_details(timing.primary_image) else: st.warning("No primary image present.") jump_to_single_frame_view_button(idx + 1, timing_list, f"jump_to_{idx + 1}",uuid=shot.uuid) diff --git a/ui_components/widgets/variant_comparison_grid.py b/ui_components/widgets/variant_comparison_grid.py index ee873d6a..3fe674c3 100644 --- a/ui_components/widgets/variant_comparison_grid.py +++ b/ui_components/widgets/variant_comparison_grid.py @@ -126,7 +126,7 @@ def variant_comparison_grid(ele_uuid, stage=CreativeProcessType.MOTION.value): # Add markdown line if this is not the last variant in page_indices def image_variant_details(variant: InternalFileObject): - with st.expander("Settings", expanded=False): + with st.expander("Inference Details", expanded=False): if variant.inference_params and 'query_dict' in variant.inference_params: query_dict = json.loads(variant.inference_params['query_dict']) st.markdown(f"Prompt: {query_dict['prompt']}", unsafe_allow_html=True) From 10e3fa6f231eb20ac1a89d02c4c0d3fbbefc4e6c Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Wed, 13 Mar 2024 17:10:35 +0000 Subject: [PATCH 08/43] zoom img time improved + cache logic updated --- ui_components/methods/common_methods.py | 14 ++++----- ui_components/widgets/cropping_element.py | 2 +- ui_components/widgets/image_zoom_widgets.py | 21 +------------ ui_components/widgets/inpainting_element.py | 6 ++-- utils/cache/cache_methods.py | 34 ++++++++++----------- 5 files changed, 28 insertions(+), 49 deletions(-) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index e7163755..9b4bb3b8 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -65,7 +65,7 @@ def save_and_promote_image(image, shot_uuid, timing_uuid, stage): saved_image = save_new_image(image, shot.project.uuid) # Update records based on stage if stage == WorkflowStageType.SOURCE.value: - data_repo.update_specific_timing(timing_uuid, source_image_id=saved_image.uuid) + data_repo.update_specific_timing(timing_uuid, source_image_id=saved_image.uuid, update_in_place=True) elif stage == WorkflowStageType.STYLED.value: number_of_image_variants = add_image_variant(saved_image.uuid, timing_uuid) promote_image_variant(timing_uuid, number_of_image_variants - 1) @@ -328,7 +328,7 @@ def save_uploaded_image(image: Union[Image.Image, str, np.ndarray, io.BytesIO, I # Update records based on stage_type if stage_type == WorkflowStageType.SOURCE.value: - data_repo.update_specific_timing(frame_uuid, source_image_id=saved_image.uuid) + data_repo.update_specific_timing(frame_uuid, source_image_id=saved_image.uuid, update_in_place=True) elif stage_type == WorkflowStageType.STYLED.value: number_of_image_variants = add_image_variant(saved_image.uuid, frame_uuid) promote_image_variant(frame_uuid, number_of_image_variants - 1) @@ -349,7 +349,7 @@ def promote_image_variant(timing_uuid, variant_to_promote_frame_number: str): # promoting variant variant_to_promote = timing.alternative_images_list[variant_to_promote_frame_number] - data_repo.update_specific_timing(timing_uuid, primary_image_id=variant_to_promote.uuid) + data_repo.update_specific_timing(timing_uuid, primary_image_id=variant_to_promote.uuid, update_in_place=True) _ = data_repo.get_timing_list_from_shot(timing.shot.uuid) @@ -474,7 +474,7 @@ def create_or_update_mask(timing_uuid, image) -> InternalFileObject: file_data.update({'local_path': file_location}) mask_file: InternalFileObject = data_repo.create_file(**file_data) - data_repo.update_specific_timing(timing_uuid, mask_id=mask_file.uuid) + data_repo.update_specific_timing(timing_uuid, mask_id=mask_file.uuid, update_in_place=True) else: # if it is already present then just updating the file location if hosted_url: @@ -512,11 +512,11 @@ def add_image_variant(image_file_uuid: str, timing_uuid: str): alternative_image_uuid_list = json.dumps(alternative_image_uuid_list) data_repo.update_specific_timing( - timing_uuid, alternative_images=alternative_image_uuid_list) + timing_uuid, alternative_images=alternative_image_uuid_list, update_in_place=True) if not timing.primary_image: data_repo.update_specific_timing( - timing_uuid, primary_image_id=primary_image_uuid) + timing_uuid, primary_image_id=primary_image_uuid, update_in_place=True) return len(alternative_image_list) @@ -811,7 +811,7 @@ def process_inference_output(**kwargs): ) if stage == WorkflowStageType.SOURCE.value: - data_repo.update_specific_timing(current_frame_uuid, source_image_id=output_file.uuid) + data_repo.update_specific_timing(current_frame_uuid, source_image_id=output_file.uuid, update_in_place=True) elif stage == WorkflowStageType.STYLED.value: number_of_image_variants = add_image_variant(output_file.uuid, current_frame_uuid) if promote: diff --git a/ui_components/widgets/cropping_element.py b/ui_components/widgets/cropping_element.py index 088a2160..773b7ab9 100644 --- a/ui_components/widgets/cropping_element.py +++ b/ui_components/widgets/cropping_element.py @@ -104,7 +104,7 @@ def precision_cropping_element(stage, shot_uuid): st.success("Image saved successfully!") time.sleep(1) st.rerun() - + inpaint_in_black_space_element(output_image, shot.project.uuid, stage, shot_uuid, transformation_data) def manual_cropping_element(stage, timing_uuid): diff --git a/ui_components/widgets/image_zoom_widgets.py b/ui_components/widgets/image_zoom_widgets.py index db6db809..4543b736 100644 --- a/ui_components/widgets/image_zoom_widgets.py +++ b/ui_components/widgets/image_zoom_widgets.py @@ -33,7 +33,6 @@ def zoom_inputs(position='in-frame', horizontal=False): col6.checkbox( "Flip Horizontally ↔️", key=f"flip_horizontally", value=False) - def save_zoomed_image(image, timing_uuid, stage, promote=False): data_repo = DataRepo() @@ -58,7 +57,7 @@ def save_zoomed_image(image, timing_uuid, stage, promote=False): source_image: InternalFileObject = data_repo.create_file(**file_data) data_repo.update_specific_timing( - st.session_state['current_frame_uuid'], source_image_id=source_image.uuid) + st.session_state['current_frame_uuid'], source_image_id=source_image.uuid, update_in_place=True) elif stage == WorkflowStageType.STYLED.value: save_location = f"videos/{project_uuid}/assets/frames/2_character_pipeline_completed/{file_name}" hosted_url = save_or_host_file(image, save_location) @@ -74,29 +73,11 @@ def save_zoomed_image(image, timing_uuid, stage, promote=False): file_data.update({'local_path': save_location}) styled_image: InternalFileObject = data_repo.create_file(**file_data) - number_of_image_variants = add_image_variant( styled_image.uuid, timing_uuid) if promote: promote_image_variant(timing_uuid, number_of_image_variants - 1) - ''' - project_update_data = { - "zoom_level": st.session_state['zoom_level_input'], - "rotation_angle_value": st.session_state['rotation_angle_input'], - "x_shift": st.session_state['x_shift'], - "y_shift": st.session_state['y_shift'] - } - - data_repo.update_project_setting(project_uuid, **project_update_data) - # TODO: **CORRECT-CODE - make a proper column for zoom details - timing_update_data = { - "zoom_details": f"{st.session_state['zoom_level_input']},{st.session_state['rotation_angle_input']},{st.session_state['x_shift']},{st.session_state['y_shift']}", - - } - - data_repo.update_specific_timing(timing_uuid, **timing_update_data) - ''' def reset_zoom_element(): st.session_state['zoom_level_input_key'] = 100 diff --git a/ui_components/widgets/inpainting_element.py b/ui_components/widgets/inpainting_element.py index f12953ef..6d83c4bd 100644 --- a/ui_components/widgets/inpainting_element.py +++ b/ui_components/widgets/inpainting_element.py @@ -152,7 +152,7 @@ def replace_with_image(stage, output_file, current_frame_uuid, promote=False): data_repo = DataRepo() if stage == WorkflowStageType.SOURCE.value: - data_repo.update_specific_timing(current_frame_uuid, source_image_id=output_file.uuid) + data_repo.update_specific_timing(current_frame_uuid, source_image_id=output_file.uuid, update_in_place=True) elif stage == WorkflowStageType.STYLED.value: number_of_image_variants = add_image_variant(output_file.uuid, current_frame_uuid) if promote: @@ -164,8 +164,6 @@ def replace_with_image(stage, output_file, current_frame_uuid, promote=False): def inpaint_in_black_space_element(cropped_img: Image.Image, project_uuid, \ stage=WorkflowStageType.SOURCE.value, shot_uuid=None, transformation_data = None): data_repo = DataRepo() - project_settings: InternalSettingObject = data_repo.get_project_setting( - project_uuid) st.markdown("##### Inpaint in black space:") @@ -177,6 +175,8 @@ def inpaint_in_black_space_element(cropped_img: Image.Image, project_uuid, \ st.session_state['precision_cropping_inpainted_image_uuid'] = "" def inpaint(promote=False, transformation_data=transformation_data): + project_settings: InternalSettingObject = data_repo.get_project_setting( + project_uuid) width = int(project_settings.width) height = int(project_settings.height) diff --git a/utils/cache/cache_methods.py b/utils/cache/cache_methods.py index de5af2ef..e828bce1 100644 --- a/utils/cache/cache_methods.py +++ b/utils/cache/cache_methods.py @@ -300,25 +300,23 @@ def _cache_update_specific_timing(self, *args, **kwargs): status = original_func(self, *args, **kwargs) if status: - StCache.delete_all(CacheKey.TIMING_DETAILS.value) - # deleting shots as well. for e.g. timing update can be moving it from - # one shot to another - StCache.delete_all(CacheKey.SHOT.value) - - # updating the timing list - timing_func = getattr(cls, '_original_get_timing_from_uuid') - timing = timing_func(self, args[0]) - if timing and timing.shot.project: - # original_func = getattr(cls, '_original_get_timing_list_from_project') - # timing_list = original_func(self, timing.shot.project.uuid) - # if timing_list and len(timing_list): - # StCache.add_all(timing_list, CacheKey.TIMING_DETAILS.value) + timing_func = getattr(cls, '_original_get_timing_from_uuid') + timing = timing_func(self, args[0]) + if kwargs.get('update_in_place', False): + # these changes contains updates which don't require a complete cache refresh + StCache.update(timing, CacheKey.TIMING_DETAILS.value) + else: + StCache.delete_all(CacheKey.TIMING_DETAILS.value) + # deleting shots as well. for e.g. timing update can be moving it from + # one shot to another + StCache.delete_all(CacheKey.SHOT.value) - # updating shot list - original_func = getattr(cls, '_original_get_shot_list') - shot_list = original_func(self, timing.shot.project.uuid) - if shot_list: - StCache.add_all(shot_list, CacheKey.SHOT.value) + if timing and timing.shot.project: + # updating shot list + original_func = getattr(cls, '_original_get_shot_list') + shot_list = original_func(self, timing.shot.project.uuid) + if shot_list: + StCache.add_all(shot_list, CacheKey.SHOT.value) setattr(cls, '_original_update_specific_timing', cls.update_specific_timing) setattr(cls, "update_specific_timing", _cache_update_specific_timing) From f8bc2015b478d035b42cd8a10e29e972f26e212b Mon Sep 17 00:00:00 2001 From: peteromallet Date: Wed, 13 Mar 2024 23:38:40 +0100 Subject: [PATCH 09/43] Frame moving --- .../widgets/frame_movement_widgets.py | 3 +- ui_components/widgets/image_zoom_widgets.py | 41 +- ui_components/widgets/shot_view.py | 354 +++++++++++------- 3 files changed, 243 insertions(+), 155 deletions(-) diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py index c060ffe8..32f39c6b 100644 --- a/ui_components/widgets/frame_movement_widgets.py +++ b/ui_components/widgets/frame_movement_widgets.py @@ -95,12 +95,11 @@ def delete_frame(timing_uuid): timing_list = data_repo.get_timing_list_from_shot(timing.shot.uuid) - data_repo.delete_timing_from_uuid(timing.uuid) timing_list = data_repo.get_timing_list_from_shot(shot_uuid) if len(timing_list) == 0: - st.success("Frame deleted!") + print("No more frames in this shot") # this is the last frame elif not next_timing: st.session_state['current_frame_index'] = max(1, st.session_state['current_frame_index'] - 1) diff --git a/ui_components/widgets/image_zoom_widgets.py b/ui_components/widgets/image_zoom_widgets.py index db6db809..ebd42b24 100644 --- a/ui_components/widgets/image_zoom_widgets.py +++ b/ui_components/widgets/image_zoom_widgets.py @@ -10,28 +10,39 @@ def zoom_inputs(position='in-frame', horizontal=False): if horizontal: - col1, col2, col3, col4, col5, col6 = st.columns(6) + col1, col2 = st.columns(2) + col3, col4 = st.columns(2) + col5, col6 = st.columns(2) else: col1 = col2 = col3 = col4 = col5 = col6 = st + if 'zoom_level_input_default' not in st.session_state: + st.session_state['zoom_level_input_default'] = 100 + st.session_state['rotation_angle_input_default'] = 0 + st.session_state['x_shift_default'] = 0 + st.session_state['y_shift_default'] = 0 + st.session_state['flip_vertically_default'] = False + st.session_state['flip_horizontally_default'] = False + + col1.number_input( - "Zoom In/Out", min_value=10, max_value=1000, step=10, key=f"zoom_level_input", value=100) + "Zoom In/Out", min_value=10, max_value=1000, step=10, key=f"zoom_level_input", value=st.session_state['zoom_level_input_default']) - # col2.number_input( - # "Rotate Counterclockwise/Clockwise", min_value=-360, max_value=360, step=5, key="rotation_angle_input", value=0) - st.session_state['rotation_angle_input'] = 0 + col2.number_input( + "Rotate", min_value=-360, max_value=360, step=5, key="rotation_angle_input", value=st.session_state['rotation_angle_input_default']) + # st.session_state['rotation_angle_input'] = 0 col3.number_input( - "Shift Left/Right", min_value=-1000, max_value=1000, step=5, key=f"x_shift", value=0) + "Shift Left/Right", min_value=-1000, max_value=1000, step=5, key=f"x_shift", value=st.session_state['x_shift_default']) col4.number_input( - "Shift Down/Up", min_value=-1000, max_value=1000, step=5, key=f"y_shift", value=0) + "Shift Down/Up", min_value=-1000, max_value=1000, step=5, key=f"y_shift", value=st.session_state['y_shift_default']) col5.checkbox( - "Flip Vertically ↕️", key=f"flip_vertically", value=False) + "Flip Vertically ↕️", key=f"flip_vertically", value=str(st.session_state['flip_vertically_default'])) col6.checkbox( - "Flip Horizontally ↔️", key=f"flip_horizontally", value=False) + "Flip Horizontally ↔️", key=f"flip_horizontally", value=str(st.session_state['flip_horizontally_default'])) @@ -99,14 +110,16 @@ def save_zoomed_image(image, timing_uuid, stage, promote=False): ''' def reset_zoom_element(): - st.session_state['zoom_level_input_key'] = 100 - st.session_state['rotation_angle_input_key'] = 0 - st.session_state['x_shift_key'] = 0 - st.session_state['y_shift_key'] = 0 + st.session_state['zoom_level_input_default'] = 100 st.session_state['zoom_level_input'] = 100 + st.session_state['rotation_angle_input_default'] = 0 st.session_state['rotation_angle_input'] = 0 + st.session_state['x_shift_default'] = 0 st.session_state['x_shift'] = 0 + st.session_state['y_shift_default'] = 0 st.session_state['y_shift'] = 0 + st.session_state['flip_vertically_default'] = False st.session_state['flip_vertically'] = False + st.session_state['flip_horizontally_default'] = False st.session_state['flip_horizontally'] = False - st.rerun() \ No newline at end of file + \ No newline at end of file diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index ba8d2561..47523833 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -7,100 +7,193 @@ from PIL import Image import requests from io import BytesIO +import pandas as pd import streamlit as st +import uuid +import random from shared.constants import AppSubPage, InferenceParamType from ui_components.constants import WorkflowStageType from ui_components.methods.file_methods import generate_pil_image from streamlit_option_menu import option_menu +from shared.constants import InternalFileType from ui_components.models import InternalFrameTimingObject, InternalShotObject from ui_components.widgets.add_key_frame_element import add_key_frame,add_key_frame_section from ui_components.widgets.common_element import duplicate_shot_button -from ui_components.widgets.frame_movement_widgets import change_frame_shot, delete_frame_button, jump_to_single_frame_view_button, move_frame_back_button, move_frame_forward_button, replace_image_widget +from ui_components.methods.common_methods import apply_coord_transformations, apply_image_transformations +from ui_components.widgets.frame_movement_widgets import change_frame_shot, delete_frame_button, jump_to_single_frame_view_button, move_frame_back_button, move_frame_forward_button, replace_image_widget,delete_frame from utils.common_utils import refresh_app from utils.data_repo.data_repo import DataRepo +from ui_components.methods.file_methods import save_or_host_file from utils import st_memory +from ui_components.widgets.image_zoom_widgets import reset_zoom_element, save_zoomed_image, zoom_inputs def shot_keyframe_element(shot_uuid, items_per_row, column=None,position="Timeline",**kwargs): data_repo = DataRepo() shot: InternalShotObject = data_repo.get_shot_from_uuid(shot_uuid) - - if "open_shot" not in st.session_state: - st.session_state["open_shot"] = None - + project_uuid = shot.project.uuid + timing_list: List[InternalFrameTimingObject] = shot.timing_list - - if position == "Timeline": - - header_col_0, header_col_1, header_col_2, header_col_3 = st.columns([2,1,1.5,0.5]) - - with header_col_0: - update_shot_name(shot.uuid) - - # with header_col_1: - # update_shot_duration(shot.uuid) - - with header_col_2: - st.write("") - shot_adjustment_button(shot, show_label=True) - with header_col_3: - st.write("") - shot_animation_button(shot, show_label=True) - else: - with column: - col1, col2, col3, col4, col5 = st.columns([1,1,1,1,1]) - - with col1: - delete_frames_toggle = st_memory.toggle("Delete Frames", value=True, key="delete_frames_toggle") - with col2: - copy_frame_toggle = st_memory.toggle("Copy Frame", value=True, key="copy_frame_toggle") - with col3: - move_frames_toggle = st_memory.toggle("Move Frames", value=True, key="move_frames_toggle") - with col4: - change_shot_toggle = st_memory.toggle("Change Shot", value=False, key="change_shot_toggle") - with col5: - shift_frame_toggle = st_memory.toggle("Shift Frames", value=False, key="shift_frame_toggle") - + with column: + col1, col2 = st.columns([1, 1]) + with col1: + move_frame_mode = st_memory.toggle("Enter 'move frame' mode", value=False, key=f"move_frame_mode_{shot.uuid}") + if st.session_state[f"move_frame_mode_{shot.uuid}"]: + st.warning("You are in 'move frame' mode. You have to press 'Save' below to save the changes.") + if st.button("Save", key=f"save_move_frame_{shot.uuid}", help="Save the changes made in 'move frame' mode", use_container_width=True): + update_shot_frames(shot_uuid, timing_list) + st.rerun() + if f"shot_data_{shot_uuid}" not in st.session_state: + st.session_state[f"shot_data_{shot_uuid}"] = None + if st.session_state[f"shot_data_{shot_uuid}"] is None: + shot_data = [{ + "uuid": timing.uuid, + "image_location": timing.primary_image.location if timing.primary_image and timing.primary_image.location else None, + "position": idx + } for idx, timing in enumerate(timing_list)] + st.session_state[f"shot_data_{shot_uuid}"] = pd.DataFrame(shot_data) + st.markdown("***") - for i in range(0, len(timing_list) + 1, items_per_row): - with st.container(): - grid = st.columns(items_per_row) - for j in range(items_per_row): - idx = i + j - if idx <= len(timing_list): - with grid[j]: - if idx == len(timing_list): - if position != "Timeline": - - # st.info("**Add new frame(s) to shot**") - add_key_frame_section(shot_uuid, False) - - else: - timing = timing_list[idx] - if timing.primary_image and timing.primary_image.location: - st.image(timing.primary_image.location, use_column_width=True) - else: - st.warning("No primary image present.") - jump_to_single_frame_view_button(idx + 1, timing_list, f"jump_to_{idx + 1}",uuid=shot.uuid) - if position != "Timeline": - timeline_view_buttons(idx, shot_uuid, copy_frame_toggle, move_frames_toggle,delete_frames_toggle, change_shot_toggle, shift_frame_toggle) - if (i < len(timing_list) - 1) or (st.session_state["open_shot"] == shot.uuid) or (len(timing_list) % items_per_row != 0 and st.session_state["open_shot"] != shot.uuid) or len(timing_list) % items_per_row == 0: - st.markdown("***") - # st.markdown("***") + if move_frame_mode: + + for i in range(0, len(st.session_state[f"shot_data_{shot_uuid}"]), items_per_row): + with st.container(): + grid = st.columns(items_per_row) + for j in range(items_per_row): + idx = i + j + if idx < len(st.session_state[f"shot_data_{shot_uuid}"]): # Ensure idx does not exceed the length of shot_df + + with grid[j % items_per_row]: # Use modulo for column indexingr + + row = st.session_state[f"shot_data_{shot_uuid}"].loc[idx] + + if row['image_location']: + st.image(row['image_location'], use_column_width=True) + else: + st.warning("No primary image present.") - if position == "Timeline": - # st.markdown("***") - bottom1, bottom2, bottom3, bottom4,_ = st.columns([1,1,1,1,2]) - with bottom1: - delete_shot_button(shot.uuid) + btn1, btn2, btn3, btn4 = st.columns([1, 1, 1, 1]) - with bottom2: - duplicate_shot_button(shot.uuid) - - with bottom3: - move_shot_buttons(shot, "up") + with btn1: + if st.button("⬅️", key=f"move_frame_back_{idx}", help="Move frame back", use_container_width=True): + st.session_state[f"shot_data_{shot_uuid}"] = move_temp_frame(st.session_state[f"shot_data_{shot_uuid}"], idx, 'backward') + st.rerun() + with btn2: + if st.button("➡️", key=f"move_frame_forward_{idx}", help="Move frame forward", use_container_width=True): + st.session_state[f"shot_data_{shot_uuid}"] = move_temp_frame(st.session_state[f"shot_data_{shot_uuid}"], idx, 'forward') + st.rerun() + with btn3: + if st.button("🔁", key=f"copy_frame_{idx}", use_container_width=True): + st.session_state[f"shot_data_{shot_uuid}"] = copy_temp_frame(st.session_state[f"shot_data_{shot_uuid}"], idx) + st.rerun() + with btn4: + if st.button("❌", key=f"delete_frame_{idx}", use_container_width=True): + st.session_state[f"shot_data_{shot_uuid}"] = delete_temp_frame(st.session_state[f"shot_data_{shot_uuid}"], idx) + st.rerun() + + header1, header2 = st.columns([1, 1.5]) + with header1: + st_memory.toggle("Open Zoom", key=f"open_zoom_{shot.uuid}_{idx}", value=False) + + if st.session_state[f"open_zoom_{shot.uuid}_{idx}"]: + with header2: + if st.button("Reset",use_container_width=True): + reset_zoom_element() + st.rerun() + + # close all other zooms + for i in range(0, len(st.session_state[f"shot_data_{shot_uuid}"])): + if i != idx: + st.session_state[f"open_zoom_{shot.uuid}_{i}"] = False + + input_image = generate_pil_image(st.session_state[f"shot_data_{shot_uuid}"].loc[idx]['image_location']) + + if 'zoom_level_input' not in st.session_state: + st.session_state['zoom_level_input'] = 100 + st.session_state['rotation_angle_input'] = 0 + st.session_state['x_shift'] = 0 + st.session_state['y_shift'] = 0 + st.session_state['flip_vertically'] = False + st.session_state['flip_horizontally'] = False + + zoom_inputs(horizontal=True) + + st.caption("Output Image:") + + output_image = apply_image_transformations( + input_image, + st.session_state['zoom_level_input'], + st.session_state['rotation_angle_input'], + st.session_state['x_shift'] , + st.session_state['y_shift'] , + st.session_state['flip_vertically'] , + st.session_state['flip_horizontally'] + ) + + st.image(output_image, use_column_width=True) + + + if st.button("Save", key=f"save_zoom_{idx}", help="Save the changes made in 'move frame' mode",type="primary",use_container_width=True): + # make file_name into a random uuid using uuid + file_name = f"{uuid.uuid4()}.png" + + save_location = f"videos/{project_uuid}/assets/frames/2_character_pipeline_completed/{file_name}" + hosted_url = save_or_host_file(output_image, save_location) + file_data = { + "name": file_name, + "type": InternalFileType.IMAGE.value, + "project_id": project_uuid + } + + if hosted_url: + file_data.update({'hosted_url': hosted_url}) + location = hosted_url + else: + file_data.update({'local_path': save_location}) + location = save_location + + st.session_state[f"shot_data_{shot_uuid}"].loc[idx, 'image_location'] = location + st.session_state[f'open_zoom_{shot.uuid}_{idx}'] = False + st.rerun() + + st.markdown("***") + bottom1, bottom2 = st.columns([1, 1]) + with bottom1: + st.warning("You are in 'move frame' mode. You have to press 'Save' below to save the changes.") + if st.button("Save", key=f"save_move_frame_{shot.uuid}_bottom", help="Save the changes made in 'move frame' mode", use_container_width=True): + update_shot_frames(shot_uuid, timing_list) + st.rerun() + st.markdown("***") + + + + else: + + for i in range(0, len(timing_list) + 1, items_per_row): + with st.container(): + grid = st.columns(items_per_row) + for j in range(items_per_row): + idx = i + j + if idx <= len(timing_list): + with grid[j]: + if idx == len(timing_list): + if position != "Timeline": + + # st.info("**Add new frame(s) to shot**") + add_key_frame_section(shot_uuid, False) + + else: + timing = timing_list[idx] + if timing.primary_image and timing.primary_image.location: + st.image(timing.primary_image.location, use_column_width=True) + jump_to_single_frame_view_button(idx + 1, timing_list, f"jump_to_{idx + 1}",uuid=shot.uuid) + else: + st.warning("No primary image present.") + # jump_to_single_frame_view_button(idx + 1, timing_list, f"jump_to_{idx + 1}",uuid=shot.uuid) + + st.markdown("***") def move_shot_buttons(shot, direction): @@ -257,79 +350,62 @@ def shot_animation_button(shot, show_label=False): -def shift_frame_to_position(timing_uuid, target_position): - ''' - Shifts the frame to the specified target position within the list of frames. - - Note: target_position is expected to be 1-based for user convenience (e.g., position 1 is the first position). - ''' - data_repo = DataRepo() - timing = data_repo.get_timing_from_uuid(timing_uuid) - timing_list = data_repo.get_timing_list_from_shot(timing.shot.uuid) - # Adjusting target_position to 0-based indexing for internal logic - target_position -= 1 +def move_temp_frame(df, current_position, direction): + if direction == 'forward' and current_position < len(df) - 1: + df.loc[current_position, 'position'], df.loc[current_position + 1, 'position'] = df.loc[current_position + 1, 'position'], df.loc[current_position, 'position'] + elif direction == 'backward' and current_position > 0: + df.loc[current_position, 'position'], df.loc[current_position - 1, 'position'] = df.loc[current_position - 1, 'position'], df.loc[current_position, 'position'] + return df.sort_values('position').reset_index(drop=True) - current_position = timing.aux_frame_index - total_frames = len(timing_list) +# Function to copy a frame in the dataframe +def copy_temp_frame(df, position_to_copy): + new_row = df.loc[position_to_copy].copy() + new_row['uuid'] = f"Copy_of_{new_row['uuid']}" + df = pd.concat([df, pd.DataFrame([new_row])], ignore_index=True) + df['position'] = range(len(df)) + return df.sort_values('position').reset_index(drop=True) - # Check if the target position is valid - if target_position < 0 or target_position >= total_frames: - st.error("Invalid target position") - time.sleep(0.5) - return - - # Check if the frame is already at the target position - if current_position == target_position: - st.error("That's already your position") - time.sleep(0.5) - return +# Function to delete a frame in the dataframe +def delete_temp_frame(df, position_to_delete): + df = df.drop(position_to_delete).reset_index(drop=True) + df['position'] = range(len(df)) + return df - # Update the position of the current frame - data_repo.update_specific_timing(timing.uuid, aux_frame_index=target_position) -def shift_frame_button(idx,shot): - timing_list: List[InternalFrameTimingObject] = shot.timing_list - col1, col2 = st.columns([1,1]) - with col1: - position_to_shift_to = st.number_input("Shift to position:", value=timing_list[idx].aux_frame_index+1, key=f"shift_to_position_{timing_list[idx].uuid}",min_value=1, max_value=len(timing_list)) - with col2: - st.write("") - if st.button("Shift", key=f"shift_frame_{timing_list[idx].uuid}", use_container_width=True): - shift_frame_to_position(timing_list[idx].uuid, position_to_shift_to) - st.rerun() - +def update_shot_frames(shot_uuid, timing_list): + """ + Updates the frames for a given shot by deleting existing frames and adding them again. + Displays a progress bar and random emojis to indicate progress. -def timeline_view_buttons(idx, shot_uuid, copy_frame_toggle, move_frames_toggle, delete_frames_toggle, change_shot_toggle, shift_frame_toggle): - data_repo = DataRepo() - shot = data_repo.get_shot_from_uuid(shot_uuid) - timing_list = shot.timing_list + Parameters: + - shot_uuid: UUID of the shot to update. + - timing_list: List of timing objects associated with the shot. + """ + # Ensure the move frame mode is turned off + st.session_state[f"move_frame_mode_{shot_uuid}"] = False - - btn1, btn2, btn3, btn4 = st.columns([1, 1, 1, 1]) - - if move_frames_toggle: - with btn1: - move_frame_back_button(timing_list[idx].uuid, "side-to-side") - with btn2: - move_frame_forward_button(timing_list[idx].uuid, "side-to-side") - - if copy_frame_toggle: - with btn3: - if st.button("🔁", key=f"copy_frame_{timing_list[idx].uuid}", use_container_width=True): - pil_image = generate_pil_image(timing_list[idx].primary_image.location) - add_key_frame(pil_image, False, st.session_state['shot_uuid'], timing_list[idx].aux_frame_index+1, refresh_state=False) - refresh_app(maintain_state=True) - - if delete_frames_toggle: - with btn4: - delete_frame_button(timing_list[idx].uuid) - - if change_shot_toggle: - change_frame_shot(timing_list[idx].uuid, "side-to-side") - - jump_to_single_frame_view_button(idx + 1, timing_list, 'timeline_btn_'+str(timing_list[idx].uuid)) + # Delete all existing frames first + for timing in timing_list: + delete_frame(timing.uuid) + + # Initialize the progress bar + progress_bar = st.progress(0) + + # Calculate the total number of items for accurate progress update + total_items = len(st.session_state[f"shot_data_{shot_uuid}"]) + + # List of happy emojis + random_list_of_emojis = ["🎉", "🎊", "🎈", "🎁", "🎀", "🎆", "🎇", "🧨", "🪅"] + + # Add frames again and update progress + for idx, (index, row) in enumerate(st.session_state[f"shot_data_{shot_uuid}"].iterrows()): + selected_image_location = row['image_location'] + add_key_frame(selected_image_location, "No", shot_uuid, refresh_state=False) - if shift_frame_toggle: - shift_frame_button(idx,shot) + # Update the progress bar + progress = (idx + 1) / total_items + random_emoji = random.choice(random_list_of_emojis) + st.caption(f"Saving frame {idx + 1} of {total_items} {random_emoji}") + progress_bar.progress(progress) \ No newline at end of file From ddc8772e388b8db2ad476e5fb160307fe8ce7b6f Mon Sep 17 00:00:00 2001 From: peteromallet Date: Thu, 14 Mar 2024 05:16:42 +0100 Subject: [PATCH 10/43] Fixing frame switcher --- ui_components/components/explorer_page.py | 5 +- ui_components/widgets/shot_view.py | 124 +++++++++++++++++++--- 2 files changed, 113 insertions(+), 16 deletions(-) diff --git a/ui_components/components/explorer_page.py b/ui_components/components/explorer_page.py index fd5a4434..4d2fd826 100644 --- a/ui_components/components/explorer_page.py +++ b/ui_components/components/explorer_page.py @@ -530,6 +530,7 @@ def gallery_image_view(project_uuid, shortlist=False, view=["main"], shot=None, else: if st.button(f"Add to shot", key=f"add_{gallery_image_list[i + j].uuid}", use_container_width=True): + shot_number = shot_names.index(shot_name) st.session_state["last_shot_number"] = shot_number shot_uuid = shot_list[shot_number].uuid @@ -537,7 +538,9 @@ def gallery_image_view(project_uuid, shortlist=False, view=["main"], shot=None, add_key_frame(gallery_image_list[i + j], False, shot_uuid, len(data_repo.get_timing_list_from_shot(shot_uuid)), refresh_state=False, update_cur_frame_idx=False) # removing this from the gallery view data_repo.update_file(gallery_image_list[i + j].uuid, tag="") - refresh_app(maintain_state=True) + st.session_state[f"move_frame_mode_{shot.uuid}"] = False + refresh_app(maintain_state=True) + # else: # st.error("The image is truncated and cannot be displayed.") if 'add_and_remove_from_shortlist' in view: diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index 47523833..7144e860 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -36,9 +36,9 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None,position="Timeli timing_list: List[InternalFrameTimingObject] = shot.timing_list with column: - col1, col2 = st.columns([1, 1]) + col1, col2, col3 = st.columns([1, 1, 1]) with col1: - move_frame_mode = st_memory.toggle("Enter 'move frame' mode", value=False, key=f"move_frame_mode_{shot.uuid}") + move_frame_mode = st_memory.toggle("Enter Frame Changer™ mode", value=False, key=f"move_frame_mode_{shot.uuid}") if st.session_state[f"move_frame_mode_{shot.uuid}"]: st.warning("You are in 'move frame' mode. You have to press 'Save' below to save the changes.") if st.button("Save", key=f"save_move_frame_{shot.uuid}", help="Save the changes made in 'move frame' mode", use_container_width=True): @@ -53,11 +53,52 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None,position="Timeli "position": idx } for idx, timing in enumerate(timing_list)] st.session_state[f"shot_data_{shot_uuid}"] = pd.DataFrame(shot_data) + else: + st.session_state[f"shot_data_{shot_uuid}"] = None + st.markdown("***") if move_frame_mode: + + with column: + if f'list_to_move_{shot.uuid}' not in st.session_state: + st.session_state[f'list_to_move_{shot.uuid}'] = [] + with col2: + frame_to_move_to = st.selectbox("Bulk move frames to:", [f"{i + 1}" for i in range(len(timing_list))], key=f"frame_to_move_to_{shot.uuid}") + with col3: + if st.session_state[f"move_frame_mode_{shot.uuid}"]: + + if st.session_state[f'list_to_move_{shot.uuid}'] == []: + st.write("") + st.info("No frames selected to move. Select them below.") + else: + st.info(f"Selected frames to move: {st.session_state[f'list_to_move_{shot.uuid}']}") + if st.session_state[f'list_to_move_{shot.uuid}'] != []: + if st.button("Remove all selected", key=f"remove_all_selected_{shot.uuid}", help="Remove all selected frames to move"): + st.session_state[f'list_to_move_{shot.uuid}'] = [] + st.rerun() + + + with col2: + + if st.session_state[f'list_to_move_{shot.uuid}'] != []: + if st.button("Move selected", key=f"move_frame_to_{shot.uuid}", help="Move the frame to the selected position", use_container_width=True): + # order list to move in ascending order + list_to_move = sorted(st.session_state[f'list_to_move_{shot.uuid}']) + + st.session_state[f"shot_data_{shot_uuid}"] = move_temp_frames_to_positions(st.session_state[f"shot_data_{shot_uuid}"], list_to_move, int(frame_to_move_to)-1) + st.session_state[f'list_to_move_{shot.uuid}'] = [] + st.rerun() + if st.button("Delete selected", key=f"delete_frame_to_{shot.uuid}", help="Delete the selected frames"): + + st.session_state[f"shot_data_{shot_uuid}"] = bulk_delete_temp_frames(st.session_state[f"shot_data_{shot_uuid}"], st.session_state[f'list_to_move_{shot.uuid}']) + st.session_state[f'list_to_move_{shot.uuid}'] = [] + st.rerun() + else: + st.button("Move selected", key=f"move_frame_to_{shot.uuid}", use_container_width=True,disabled=True, help="No frames selected to move.") + for i in range(0, len(st.session_state[f"shot_data_{shot_uuid}"]), items_per_row): with st.container(): grid = st.columns(items_per_row) @@ -74,7 +115,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None,position="Timeli else: st.warning("No primary image present.") - btn1, btn2, btn3, btn4 = st.columns([1, 1, 1, 1]) + btn1, btn2, btn3, btn4, btn5 = st.columns([1, 1, 1, 1, 1.25]) with btn1: if st.button("⬅️", key=f"move_frame_back_{idx}", help="Move frame back", use_container_width=True): @@ -92,6 +133,19 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None,position="Timeli if st.button("❌", key=f"delete_frame_{idx}", use_container_width=True): st.session_state[f"shot_data_{shot_uuid}"] = delete_temp_frame(st.session_state[f"shot_data_{shot_uuid}"], idx) st.rerun() + with btn5: + if idx not in st.session_state[f'list_to_move_{shot.uuid}']: + if st.button("Select", key=f"select_frame_{idx}", use_container_width=True): + st.session_state[f'list_to_move_{shot.uuid}'].append(idx) + st.rerun() + else: + if st.button("Deselect", key=f"deselect_frame_{idx}", use_container_width=True,type='primary'): + st.session_state[f'list_to_move_{shot.uuid}'].remove(idx) + st.rerun() + + + + header1, header2 = st.columns([1, 1.5]) with header1: @@ -162,7 +216,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None,position="Timeli bottom1, bottom2 = st.columns([1, 1]) with bottom1: st.warning("You are in 'move frame' mode. You have to press 'Save' below to save the changes.") - if st.button("Save", key=f"save_move_frame_{shot.uuid}_bottom", help="Save the changes made in 'move frame' mode", use_container_width=True): + if st.button("Save", key=f"save_move_frame_{shot.uuid}_bottom", help="Save the changes made in 'move frame' mode", use_container_width=True): update_shot_frames(shot_uuid, timing_list) st.rerun() st.markdown("***") @@ -363,7 +417,10 @@ def copy_temp_frame(df, position_to_copy): new_row = df.loc[position_to_copy].copy() new_row['uuid'] = f"Copy_of_{new_row['uuid']}" df = pd.concat([df, pd.DataFrame([new_row])], ignore_index=True) - df['position'] = range(len(df)) + # make the position current frame + 1 and all the frames after it + 1 + df.loc[position_to_copy + 1:, 'position'] = df.loc[position_to_copy + 1:, 'position'] + 1 + + return df.sort_values('position').reset_index(drop=True) # Function to delete a frame in the dataframe @@ -372,17 +429,21 @@ def delete_temp_frame(df, position_to_delete): df['position'] = range(len(df)) return df - +def bulk_delete_temp_frames(df, positions_to_delete): + # Ensure positions_to_delete is a list + if not isinstance(positions_to_delete, list): + positions_to_delete = [positions_to_delete] + + # Drop the rows from their positions + df = df.drop(positions_to_delete).reset_index(drop=True) + + # Correct the 'position' column to reflect the new order + df['position'] = range(len(df)) + + return df def update_shot_frames(shot_uuid, timing_list): - """ - Updates the frames for a given shot by deleting existing frames and adding them again. - Displays a progress bar and random emojis to indicate progress. - - Parameters: - - shot_uuid: UUID of the shot to update. - - timing_list: List of timing objects associated with the shot. - """ + # Ensure the move frame mode is turned off st.session_state[f"move_frame_mode_{shot_uuid}"] = False @@ -408,4 +469,37 @@ def update_shot_frames(shot_uuid, timing_list): progress = (idx + 1) / total_items random_emoji = random.choice(random_list_of_emojis) st.caption(f"Saving frame {idx + 1} of {total_items} {random_emoji}") - progress_bar.progress(progress) \ No newline at end of file + progress_bar.progress(progress) + + st.session_state[f"shot_data_{shot_uuid}"] = None + + + +def move_temp_frames_to_positions(df, current_positions, new_start_position): + # Ensure current_positions is a list + if not isinstance(current_positions, list): + current_positions = [current_positions] + + # Sort current_positions to handle them in order + current_positions = sorted(current_positions) + + # Extract rows to move + rows_to_move = df.iloc[current_positions] + + # Drop the rows from their current positions + df = df.drop(df.index[current_positions]).reset_index(drop=True) + + # Calculate new positions considering the removals + new_positions = range(new_start_position, new_start_position + len(rows_to_move)) + + # Split the DataFrame into parts before and after the new start position + df_before = df.iloc[:new_start_position] + df_after = df.iloc[new_start_position:] + + # Reassemble the DataFrame with rows inserted at their new positions + df = pd.concat([df_before, rows_to_move, df_after]).reset_index(drop=True) + + # Correct the 'position' column to reflect the new order + df['position'] = range(len(df)) + + return df \ No newline at end of file From 3f9dd3c2dd6729b0611ff6f1d98b187598a50758 Mon Sep 17 00:00:00 2001 From: peteromallet Date: Fri, 15 Mar 2024 07:24:03 +0100 Subject: [PATCH 11/43] Adding numbering --- .../components/timeline_view_page.py | 76 ++++++++++++++++++- ui_components/widgets/shot_view.py | 3 +- .../widgets/variant_comparison_grid.py | 29 +++++++ 3 files changed, 106 insertions(+), 2 deletions(-) diff --git a/ui_components/components/timeline_view_page.py b/ui_components/components/timeline_view_page.py index e6cf0586..bf49204e 100644 --- a/ui_components/components/timeline_view_page.py +++ b/ui_components/components/timeline_view_page.py @@ -1,5 +1,10 @@ import time import streamlit as st +import os +import requests +import shutil +from zipfile import ZipFile +from io import BytesIO from ui_components.constants import CreativeProcessType from ui_components.widgets.timeline_view import timeline_view from ui_components.components.explorer_page import gallery_image_view @@ -35,11 +40,80 @@ def timeline_view_page(shot_uuid: str, h2): st.markdown(f"#### :green[{st.session_state['main_view_type']}] > :red[{st.session_state['page']}]") st.markdown("***") - slider1, slider2 = st.columns([4,1]) + slider1, slider2, slider3 = st.columns([2,1,1]) with slider1: st.markdown(f"### 🪄 '{project.name}' timeline") st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") + with slider3: + with st.expander("Export all main variants", expanded=False): + shot_list = data_repo.get_shot_list(project_uuid) + if not shot_list: + st.info("No shots available in the project.") + return + + if st.button('Prepare videos for download'): + temp_dir = 'temp_main_variants' + os.makedirs(temp_dir, exist_ok=True) + zip_data = BytesIO() + st.info("Preparing videos for download. This may take a while.") + time.sleep(0.4) + try: + for idx, shot in enumerate(shot_list): + if shot.main_clip and shot.main_clip.location: + # Prepend the shot number (idx + 1) to the filename + file_name = f'{idx + 1:03d}_{shot.name}.mp4' # Using :03d to ensure the number is zero-padded to 3 digits + file_path = os.path.join(temp_dir, file_name) + if shot.main_clip.location.startswith('http'): + response = requests.get(shot.main_clip.location) + with open(file_path, 'wb') as f: + f.write(response.content) + else: + shutil.copyfile(shot.main_clip.location, file_path) + + with ZipFile(zip_data, 'w') as zipf: + for root, _, files in os.walk(temp_dir): + for file in files: + zipf.write(os.path.join(root, file), file) + + st.download_button( + label="Download Main Variant Videos zip", + data=zip_data.getvalue(), + file_name="main_variant_videos.zip", + mime='application/zip', + key="main_variant_download", + use_container_width=True, + type="primary" + ) + finally: + shutil.rmtree(temp_dir) + with slider2: + with st.expander("Bulk upscale", expanded=False): + def upscale_settings(): + checkpoints_dir = "ComfyUI/models/checkpoints" + all_files = os.listdir(checkpoints_dir) + if len(all_files) == 0: + st.info("No models found in the checkpoints directory") + styling_model = "None" + else: + # Filter files to only include those with .safetensors and .ckpt extensions + model_files = [file for file in all_files if file.endswith('.safetensors') or file.endswith('.ckpt')] + # drop all files that contain xl + model_files = [file for file in model_files if "xl" not in file] + model_files.insert(0, "None") # Add "None" option at the beginning + styling_model = st.selectbox("Styling model", model_files, key="styling_model") + + type_of_upscaler = st.selectbox("Type of upscaler", ["Dreamy", "Realistic", "Anime", "Cartoon"], key="type_of_upscaler") + upscale_by = st.slider("Upscale by", min_value=1.0, max_value=3.0, step=0.1, key="upscale_by", value=2.0) + strength_of_upscale = st.slider("Strength of upscale", min_value=1.0, max_value=3.0, step=0.1, key="strength_of_upscale", value=2.0) + set_upscaled_to_main_variant = st.checkbox("Set upscaled to main variant", key="set_upscaled_to_main_variant", value=True) + + return styling_model, type_of_upscaler, upscale_by, strength_of_upscale, set_upscaled_to_main_variant + + styling_model, type_of_upscaler, upscale_by, strength_of_upscale, set_upscaled_to_main_variant = upscale_settings() + + st.button("Upscale All Main Variants") + # start_time = time.time() timeline_view(st.session_state["shot_uuid"], st.session_state['view']) st.markdown("### ✨ Generate frames") diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index d74f7de0..58f1175b 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -108,6 +108,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None, position="Timel row = st.session_state[f"shot_data_{shot_uuid}"].loc[idx] if row['image_location']: + st.caption(f"Frame {idx + 1}") st.image(row['image_location'], use_column_width=True) else: st.warning("No primary image present.") @@ -224,7 +225,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None, position="Timel with grid[j]: if idx == len(timing_list): if position != "Timeline": - add_key_frame_section(shot_uuid, False) + add_key_frame_section(shot_uuid) else: timing = timing_list[idx] if timing.primary_image and timing.primary_image.location: diff --git a/ui_components/widgets/variant_comparison_grid.py b/ui_components/widgets/variant_comparison_grid.py index 3fe674c3..f20146a4 100644 --- a/ui_components/widgets/variant_comparison_grid.py +++ b/ui_components/widgets/variant_comparison_grid.py @@ -3,6 +3,8 @@ import ast import streamlit as st import re +import os +from PIL import Image from shared.constants import AIModelCategory, InferenceParamType, InternalFileTag from ui_components.constants import CreativeProcessType from ui_components.methods.animation_style_methods import get_generation_settings_from_log, load_shot_settings @@ -74,6 +76,12 @@ def variant_comparison_grid(ele_uuid, stage=CreativeProcessType.MOTION.value): # Display the main variant if stage == CreativeProcessType.MOTION.value: st.video(variants[current_variant].location, format='mp4', start_time=0) if (current_variant != -1 and variants[current_variant]) else st.error("No video present") + with st.expander("Upscale settings", expanded=False): + styling_model, type_of_upscaler, upscale_by, strength_of_upscale, set_upscaled_to_main_variant = upscale_settings() + if st.button("Upscale Main Variant", key=f"upscale_main_variant_{shot_uuid}", help="Upscale the main variant with the selected settings", use_container_width=True): + st.success("Upscaling started") + + create_video_download_button(variants[current_variant].location, tag="var_compare") variant_inference_detail_element(variants[current_variant], stage, shot_uuid, timing_list, tag="var_compare") @@ -267,6 +275,27 @@ def prepare_values(inf_data, timing_list): return values +def upscale_settings(): + checkpoints_dir = "ComfyUI/models/checkpoints" + all_files = os.listdir(checkpoints_dir) + if len(all_files) == 0: + st.info("No models found in the checkpoints directory") + styling_model = "None" + else: + # Filter files to only include those with .safetensors and .ckpt extensions + model_files = [file for file in all_files if file.endswith('.safetensors') or file.endswith('.ckpt')] + # drop all files that contain xl + model_files = [file for file in model_files if "xl" not in file] + model_files.insert(0, "None") # Add "None" option at the beginning + styling_model = st.selectbox("Styling model", model_files, key="styling_model") + + type_of_upscaler = st.selectbox("Type of upscaler", ["Dreamy", "Realistic", "Anime", "Cartoon"], key="type_of_upscaler") + upscale_by = st.slider("Upscale by", min_value=1.0, max_value=3.0, step=0.1, key="upscale_by", value=2.0) + strength_of_upscale = st.slider("Strength of upscale", min_value=1.0, max_value=3.0, step=0.1, key="strength_of_upscale", value=2.0) + set_upscaled_to_main_variant = st.checkbox("Set upscaled to main variant", key="set_upscaled_to_main_variant", value=True) + + return styling_model, type_of_upscaler, upscale_by, strength_of_upscale, set_upscaled_to_main_variant + def fetch_inference_data(file: InternalFileObject): if not file: return From 8365e3e7064821723b1b6f44bfa59b7b052ea099 Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Fri, 15 Mar 2024 10:04:59 +0000 Subject: [PATCH 12/43] minor fixes --- ui_components/components/explorer_page.py | 3 +- .../components/timeline_view_page.py | 90 ++++++++++--------- ui_components/widgets/timeline_view.py | 40 --------- 3 files changed, 50 insertions(+), 83 deletions(-) diff --git a/ui_components/components/explorer_page.py b/ui_components/components/explorer_page.py index 972c5ee2..d3434698 100644 --- a/ui_components/components/explorer_page.py +++ b/ui_components/components/explorer_page.py @@ -526,7 +526,6 @@ def gallery_image_view(project_uuid, shortlist=False, view=["main"], shot=None, else: if st.button(f"Add to shot", key=f"add_{gallery_image_list[i + j].uuid}", use_container_width=True): - shot_number = shot_names.index(shot_name) st.session_state["last_shot_number"] = shot_number shot_uuid = shot_list[shot_number].uuid @@ -534,7 +533,7 @@ def gallery_image_view(project_uuid, shortlist=False, view=["main"], shot=None, add_key_frame(gallery_image_list[i + j], shot_uuid, len(data_repo.get_timing_list_from_shot(shot_uuid)), refresh_state=False, update_cur_frame_idx=False) # removing this from the gallery view data_repo.update_file(gallery_image_list[i + j].uuid, tag="") - st.session_state[f"move_frame_mode_{shot.uuid}"] = False + st.session_state[f"move_frame_mode_{shot_uuid}"] = False refresh_app(maintain_state=True) # else: diff --git a/ui_components/components/timeline_view_page.py b/ui_components/components/timeline_view_page.py index bf49204e..9461c06e 100644 --- a/ui_components/components/timeline_view_page.py +++ b/ui_components/components/timeline_view_page.py @@ -45,48 +45,54 @@ def timeline_view_page(shot_uuid: str, h2): st.markdown(f"### 🪄 '{project.name}' timeline") st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") + shot_list = data_repo.get_shot_list(project_uuid) + main_clip_list = [] + for shot in shot_list: + if shot.main_clip and shot.main_clip.location: + main_clip_list.append(shot.main_clip.location) + with slider3: with st.expander("Export all main variants", expanded=False): - shot_list = data_repo.get_shot_list(project_uuid) - if not shot_list: - st.info("No shots available in the project.") - return - - if st.button('Prepare videos for download'): - temp_dir = 'temp_main_variants' - os.makedirs(temp_dir, exist_ok=True) - zip_data = BytesIO() - st.info("Preparing videos for download. This may take a while.") - time.sleep(0.4) - try: - for idx, shot in enumerate(shot_list): - if shot.main_clip and shot.main_clip.location: - # Prepend the shot number (idx + 1) to the filename - file_name = f'{idx + 1:03d}_{shot.name}.mp4' # Using :03d to ensure the number is zero-padded to 3 digits - file_path = os.path.join(temp_dir, file_name) - if shot.main_clip.location.startswith('http'): - response = requests.get(shot.main_clip.location) - with open(file_path, 'wb') as f: - f.write(response.content) - else: - shutil.copyfile(shot.main_clip.location, file_path) + if not len(main_clip_list): + st.info("No videos available in the project.") + + else: + if st.button('Prepare videos for download'): + temp_dir = 'temp_main_variants' + os.makedirs(temp_dir, exist_ok=True) + zip_data = BytesIO() + st.info("Preparing videos for download. This may take a while.") + time.sleep(0.4) + try: + for idx, shot in enumerate(shot_list): + if shot.main_clip and shot.main_clip.location: + # Prepend the shot number (idx + 1) to the filename + file_name = f'{idx + 1:03d}_{shot.name}.mp4' # Using :03d to ensure the number is zero-padded to 3 digits + file_path = os.path.join(temp_dir, file_name) + if shot.main_clip.location.startswith('http'): + response = requests.get(shot.main_clip.location) + with open(file_path, 'wb') as f: + f.write(response.content) + else: + shutil.copyfile(shot.main_clip.location, file_path) - with ZipFile(zip_data, 'w') as zipf: - for root, _, files in os.walk(temp_dir): - for file in files: - zipf.write(os.path.join(root, file), file) + with ZipFile(zip_data, 'w') as zipf: + for root, _, files in os.walk(temp_dir): + for file in files: + zipf.write(os.path.join(root, file), file) - st.download_button( - label="Download Main Variant Videos zip", - data=zip_data.getvalue(), - file_name="main_variant_videos.zip", - mime='application/zip', - key="main_variant_download", - use_container_width=True, - type="primary" - ) - finally: - shutil.rmtree(temp_dir) + st.download_button( + label="Download Main Variant Videos zip", + data=zip_data.getvalue(), + file_name="main_variant_videos.zip", + mime='application/zip', + key="main_variant_download", + use_container_width=True, + type="primary" + ) + finally: + shutil.rmtree(temp_dir) + with slider2: with st.expander("Bulk upscale", expanded=False): def upscale_settings(): @@ -110,9 +116,11 @@ def upscale_settings(): return styling_model, type_of_upscaler, upscale_by, strength_of_upscale, set_upscaled_to_main_variant - styling_model, type_of_upscaler, upscale_by, strength_of_upscale, set_upscaled_to_main_variant = upscale_settings() - - st.button("Upscale All Main Variants") + if not len(main_clip_list): + st.info("No videos to upscale") + else: + styling_model, type_of_upscaler, upscale_by, strength_of_upscale, set_upscaled_to_main_variant = upscale_settings() + st.button("Upscale All Main Variants") # start_time = time.time() timeline_view(st.session_state["shot_uuid"], st.session_state['view']) diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index a71158c3..62420d9a 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -71,46 +71,6 @@ def timeline_view(shot_uuid, stage): with grid[(idx + 1) % items_per_row]: st.markdown("### Add new shot") add_new_shot_element(shot, data_repo) - # download_all_video_element(shot_list) - -def download_all_video_element(shot_list: List[InternalShotObject]): - dwn_key = 'download_all_videos' - if dwn_key in st.session_state and st.session_state[dwn_key]: - st.session_state[dwn_key] = False - temp_dir = 'temp' - os.makedirs(temp_dir, exist_ok=True) - zip_data = BytesIO() - try: - paths = [s.main_clip.location for s in shot_list] - for idx, path in enumerate(paths): - file_name = f'file_{idx}.mp4' - file_path = os.path.join(temp_dir, file_name) - if path.startswith('http'): - response = requests.get(path) - with open(file_path, 'wb') as f: - f.write(response.content) - else: - shutil.copyfile(path, file_path) - - with ZipFile(zip_data, 'w') as zipf: - for root, _, files in os.walk(temp_dir): - for file in files: - zipf.write(os.path.join(root, file), file) - - st.download_button( - label="Download zip", - data=zip_data.getvalue(), - file_name="videos.zip", - mime='application/zip', - key="explorer_download", - use_container_width=True - ) - finally: - shutil.rmtree(temp_dir) - - if st.button('Create videos zip'): - st.session_state[dwn_key] = True - st.rerun() def add_new_shot_element(shot, data_repo): new_shot_name = st.text_input("Shot Name:",max_chars=25) From 8e563d3c369cad51829d2357f638e75a36f46b95 Mon Sep 17 00:00:00 2001 From: peteromallet Date: Fri, 15 Mar 2024 15:00:59 +0100 Subject: [PATCH 13/43] Fix bug --- ui_components/widgets/shot_view.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index 58f1175b..88b35859 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -41,7 +41,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None, position="Timel move_frame_mode = st_memory.toggle("Enter Frame Changer™ mode", value=False, key=f"move_frame_mode_{shot.uuid}") if st.session_state[f"move_frame_mode_{shot.uuid}"]: st.warning("You are in 'move frame' mode. You have to press 'Save' below to save the changes.") - if st.button("Save", key=f"save_move_frame_{shot.uuid}", help="Save the changes made in 'move frame' mode", use_container_width=True): + if st.button("Save", key=f"save_move_frame_{shot.uuid}", help="Save the changes made in 'move frame' mode", use_container_width=True): update_shot_frames(shot_uuid, timing_list) st.rerun() if f"shot_data_{shot_uuid}" not in st.session_state: @@ -430,7 +430,7 @@ def bulk_delete_temp_frames(df, positions_to_delete): return df def update_shot_frames(shot_uuid, timing_list): - + # Ensure the move frame mode is turned off st.session_state[f"move_frame_mode_{shot_uuid}"] = False @@ -450,7 +450,8 @@ def update_shot_frames(shot_uuid, timing_list): # Add frames again and update progress for idx, (index, row) in enumerate(st.session_state[f"shot_data_{shot_uuid}"].iterrows()): selected_image_location = row['image_location'] - add_key_frame(selected_image_location, "No", shot_uuid, refresh_state=False) + # def add_key_frame(selected_image: Union[Image.Image, InternalFileObject], shot_uuid, target_frame_position=None, refresh_state=True, update_cur_frame_idx=True): + add_key_frame(selected_image_location, shot_uuid, refresh_state=False) # Update the progress bar progress = (idx + 1) / total_items From 21293998ecbd7f688b437be7e4ca46079a641fa5 Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Fri, 15 Mar 2024 10:46:31 +0000 Subject: [PATCH 14/43] video upscaler added --- ui_components/components/explorer_page.py | 1 - .../components/timeline_view_page.py | 18 +- ui_components/methods/common_methods.py | 2 +- ui_components/methods/ml_methods.py | 2 +- ui_components/methods/video_methods.py | 47 +++- .../widgets/variant_comparison_grid.py | 14 +- utils/ml_processor/comfy_data_transform.py | 62 ++++- .../comfy_workflows/video_upscaler_api.json | 253 ++++++++++++++++++ utils/ml_processor/constants.py | 3 + 9 files changed, 388 insertions(+), 14 deletions(-) create mode 100644 utils/ml_processor/comfy_workflows/video_upscaler_api.json diff --git a/ui_components/components/explorer_page.py b/ui_components/components/explorer_page.py index d3434698..e2d23af5 100644 --- a/ui_components/components/explorer_page.py +++ b/ui_components/components/explorer_page.py @@ -318,7 +318,6 @@ def handle_image_input(column, type_of_generation, output_value_name, data_repo= ) output, log = ml_client.predict_model_output_standardized(ML_MODEL.sdxl_inpainting, query_obj, queue_inference=QUEUE_INFERENCE_QUERIES) - if log: inference_data = { diff --git a/ui_components/components/timeline_view_page.py b/ui_components/components/timeline_view_page.py index 9461c06e..5576cfa2 100644 --- a/ui_components/components/timeline_view_page.py +++ b/ui_components/components/timeline_view_page.py @@ -6,9 +6,9 @@ from zipfile import ZipFile from io import BytesIO from ui_components.constants import CreativeProcessType +from ui_components.methods.video_methods import upscale_video from ui_components.widgets.timeline_view import timeline_view from ui_components.components.explorer_page import gallery_image_view -from streamlit_option_menu import option_menu from utils import st_memory from utils.data_repo.data_repo import DataRepo from ui_components.widgets.sidebar_logger import sidebar_logger @@ -119,9 +119,19 @@ def upscale_settings(): if not len(main_clip_list): st.info("No videos to upscale") else: - styling_model, type_of_upscaler, upscale_by, strength_of_upscale, set_upscaled_to_main_variant = upscale_settings() - st.button("Upscale All Main Variants") - + styling_model, upscaler_type, upscale_factor, upscale_strength, promote_to_main_variant = upscale_settings() + if st.button("Upscale All Main Variants"): + for shot in shot_list: + if shot.main_clip and shot.main_clip.location: + upscale_video( + shot.uuid, + styling_model, + upscaler_type, + upscale_factor, + upscale_strength, + promote_to_main_variant + ) + # start_time = time.time() timeline_view(st.session_state["shot_uuid"], st.session_state['view']) st.markdown("### ✨ Generate frames") diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 9b4bb3b8..aeb4a39a 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -750,7 +750,7 @@ def process_inference_output(**kwargs): inference_log_id=log_uuid ) - if not shot.main_clip: + if not shot.main_clip or settings.get("promote_to_main_variant", False): output_video = sync_audio_and_duration(video, shot_uuid) data_repo.update_shot(uuid=shot_uuid, main_clip_id=output_video.uuid) data_repo.add_interpolated_clip(shot_uuid, interpolated_clip_id=output_video.uuid) diff --git a/ui_components/methods/ml_methods.py b/ui_components/methods/ml_methods.py index 378bee04..3609a4af 100644 --- a/ui_components/methods/ml_methods.py +++ b/ui_components/methods/ml_methods.py @@ -188,7 +188,7 @@ # emotion = (f"neutral expression") # return emotion -def inpainting(input_image: str, prompt, negative_prompt, width, height, shot_uuid,project_uuid) -> InternalFileObject: +def inpainting(input_image: str, prompt, negative_prompt, width, height, shot_uuid, project_uuid) -> InternalFileObject: data_repo = DataRepo() # timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index 9af6d901..a98a2f0d 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -10,14 +10,17 @@ from moviepy.editor import concatenate_videoclips, concatenate_audioclips, VideoFileClip, AudioFileClip, CompositeVideoClip from pydub import AudioSegment -from shared.constants import InferenceType, InternalFileTag +from shared.constants import QUEUE_INFERENCE_QUERIES, InferenceType, InternalFileTag from shared.file_upload.s3 import is_s3_image_url from ui_components.methods.file_methods import save_or_host_file_bytes from ui_components.models import InternalFileObject, InternalFrameTimingObject, InternalShotObject from utils.common_utils import padded_integer +from utils.constants import MLQueryObject from utils.data_repo.data_repo import DataRepo from utils.media_processor.interpolator import VideoInterpolator from utils.media_processor.video import VideoProcessor +from utils.ml_processor.constants import ML_MODEL +from utils.ml_processor.ml_interface import get_ml_client def create_single_interpolated_clip(shot_uuid, quality, settings={}, variant_count=1, backlog=False): @@ -68,6 +71,48 @@ def create_single_interpolated_clip(shot_uuid, quality, settings={}, variant_cou st.error("Failed to create interpolated clip") time.sleep(0.5) st.rerun() + +def upscale_video(shot_uuid, styling_model, upscaler_type, upscale_factor, upscale_strength, promote_to_main_variant): + from ui_components.methods.common_methods import process_inference_output + from shared.constants import QUEUE_INFERENCE_QUERIES + + data_repo = DataRepo() + shot = data_repo.get_shot_from_uuid(shot_uuid) + + query_obj = MLQueryObject( + timing_uuid=None, + model_uuid=None, + guidance_scale=8, + seed=-1, + num_inference_steps=25, + strength=0.5, + adapter_type=None, + prompt="upscale", + negative_prompt="", + height=512, # these are dummy values, not used + width=512, + data={ + "file_video": shot.main_clip.uuid, + "model": styling_model, + "upscaler_type": upscaler_type, + "upscale_factor": upscale_factor, + "upscale_strength": upscale_strength + } + ) + + ml_client = get_ml_client() + output, log = ml_client.predict_model_output_standardized(ML_MODEL.video_upscaler, query_obj, queue_inference=QUEUE_INFERENCE_QUERIES) + if log: + inference_data = { + "inference_type": InferenceType.FRAME_INTERPOLATION.value, + "output": output, + "log_uuid": log.uuid, + "settings": {"promote_to_main_variant": promote_to_main_variant}, + "shot_uuid": str(shot_uuid) + } + + process_inference_output(**inference_data) + def update_speed_of_video_clip(video_file: InternalFileObject, duration) -> InternalFileObject: from ui_components.methods.file_methods import generate_temp_file, convert_bytes_to_file diff --git a/ui_components/widgets/variant_comparison_grid.py b/ui_components/widgets/variant_comparison_grid.py index f20146a4..7c60f173 100644 --- a/ui_components/widgets/variant_comparison_grid.py +++ b/ui_components/widgets/variant_comparison_grid.py @@ -10,7 +10,7 @@ from ui_components.methods.animation_style_methods import get_generation_settings_from_log, load_shot_settings from ui_components.methods.common_methods import promote_image_variant, promote_video_variant from ui_components.methods.file_methods import create_duplicate_file -from ui_components.methods.video_methods import sync_audio_and_duration +from ui_components.methods.video_methods import sync_audio_and_duration, upscale_video from ui_components.widgets.shot_view import create_video_download_button from ui_components.models import InternalAIModelObject, InternalFileObject from ui_components.widgets.add_key_frame_element import add_key_frame @@ -77,10 +77,16 @@ def variant_comparison_grid(ele_uuid, stage=CreativeProcessType.MOTION.value): if stage == CreativeProcessType.MOTION.value: st.video(variants[current_variant].location, format='mp4', start_time=0) if (current_variant != -1 and variants[current_variant]) else st.error("No video present") with st.expander("Upscale settings", expanded=False): - styling_model, type_of_upscaler, upscale_by, strength_of_upscale, set_upscaled_to_main_variant = upscale_settings() + styling_model, upscaler_type, upscale_factor, upscale_strength, promote_to_main_variant = upscale_settings() if st.button("Upscale Main Variant", key=f"upscale_main_variant_{shot_uuid}", help="Upscale the main variant with the selected settings", use_container_width=True): - st.success("Upscaling started") - + upscale_video( + shot_uuid, + styling_model, + upscaler_type, + upscale_factor, + upscale_strength, + promote_to_main_variant + ) create_video_download_button(variants[current_variant].location, tag="var_compare") variant_inference_detail_element(variants[current_variant], stage, shot_uuid, timing_list, tag="var_compare") diff --git a/utils/ml_processor/comfy_data_transform.py b/utils/ml_processor/comfy_data_transform.py index c0d5335b..d9448160 100644 --- a/utils/ml_processor/comfy_data_transform.py +++ b/utils/ml_processor/comfy_data_transform.py @@ -23,7 +23,8 @@ ComfyWorkflow.IP_ADAPTER_PLUS: {"workflow_path": 'comfy_workflows/ipadapter_plus_api.json', "output_node_id": 29}, ComfyWorkflow.IP_ADAPTER_FACE: {"workflow_path": 'comfy_workflows/ipadapter_face_api.json', "output_node_id": 29}, ComfyWorkflow.IP_ADAPTER_FACE_PLUS: {"workflow_path": 'comfy_workflows/ipadapter_face_plus_api.json', "output_node_id": 29}, - ComfyWorkflow.STEERABLE_MOTION: {"workflow_path": 'comfy_workflows/steerable_motion_api.json', "output_node_id": 281} + ComfyWorkflow.STEERABLE_MOTION: {"workflow_path": 'comfy_workflows/steerable_motion_api.json', "output_node_id": 281}, + ComfyWorkflow.UPSCALER: {"workflow_path": 'comfy_workflows/video_upscaler_api.json', "output_node_id": 243} } @@ -368,6 +369,62 @@ def update_json_with_loras(json_data, loras): ignore_list = sm_data.get("lora_data", []) return json.dumps(workflow), output_node_ids, [], ignore_list + + @staticmethod + def transform_video_upscaler_workflow(query: MLQueryObject): + data_repo = DataRepo() + workflow, output_node_ids = ComfyDataTransform.get_workflow_json(ComfyWorkflow.UPSCALER) + data = query.data.get("data", {}) + video_uuid = data.get("file_video", None) + video = data_repo.get_file_from_uuid(video_uuid) + model = data.get("model", None) + upscaler_type = data.get("upscaler_type", None) + upscale_factor = data.get("upscale_factor", None) + upscale_strength = data.get("upscale_strength", None) + + workflow["302"]["inputs"]["video"] = os.path.basename(video.filename) + workflow["244"]["inputs"]["ckpt_name"] = model + workflow["241"]["inputs"]["upscale_by"] = upscale_factor + + extra_models_list = [ + { + "filename": "controlnet_checkpoint.ckpt", + "url": "https://huggingface.co/crishhh/animatediff_controlnet/resolve/main/controlnet_checkpoint.ckpt", + "dest": "./ComfyUI/models/controlnet/" + }, + { + "filename": "AnimateLCM_sd15_t2v_lora.safetensors", + "url": "https://huggingface.co/wangfuyun/AnimateLCM/resolve/main/AnimateLCM_sd15_t2v_lora.safetensors?download=true", + "dest": "./ComfyUI/models/loras/" + }, + { + "filename": "AnimateLCM_sd15_t2v.ckpt", + "url": "https://huggingface.co/wangfuyun/AnimateLCM/resolve/main/AnimateLCM_sd15_t2v.ckpt?download=true", + "dest": "./ComfyUI/models/animatediff_models/" + }, + { + "filename": "4x_RealisticRescaler_100000_G.pth", + "url": "https://huggingface.co/holwech/realistic-rescaler-real-esrgan/resolve/main/4x_RealisticRescaler_100000_G.pth?download=true", + "dest": "./ComfyUI/models/upscale_models/" + }, + { + "filename": "4xLexicaHAT.pth", + "url": "https://github.com/Phhofm/models/raw/main/4xLexicaHAT/4xLexicaHAT.pth", + "dest": "./ComfyUI/models/upscale_models/" + }, + { + "filename": "2x_AstroManLite_266k.pth", + "url": "https://huggingface.co/lone682/upscaler_models/resolve/main/2x_AstroManLite_266k.pth?download=true", + "dest": "./ComfyUI/models/upscale_models/" + }, + { + "filename": "4x_IllustrationJaNai_V1_ESRGAN_135k.pth", + "url": "https://huggingface.co/lone682/upscaler_models/resolve/main/4x_IllustrationJaNai_V1_ESRGAN_135k.pth?download=true", + "dest": "./ComfyUI/models/upscale_models/" + } + ] + + return json.dumps(workflow), output_node_ids, extra_models_list, [] # NOTE: only populating with models currently in use @@ -381,7 +438,8 @@ def update_json_with_loras(json_data, loras): ML_MODEL.ipadapter_face.workflow_name: ComfyDataTransform.transform_ipadaptor_face_workflow, ML_MODEL.ipadapter_face_plus.workflow_name: ComfyDataTransform.transform_ipadaptor_face_plus_workflow, ML_MODEL.ad_interpolation.workflow_name: ComfyDataTransform.transform_steerable_motion_workflow, - ML_MODEL.sdxl_img2img.workflow_name: ComfyDataTransform.transform_sdxl_img2img_workflow + ML_MODEL.sdxl_img2img.workflow_name: ComfyDataTransform.transform_sdxl_img2img_workflow, + ML_MODEL.video_upscaler.workflow_name: ComfyDataTransform.transform_video_upscaler_workflow } # returns stringified json of the workflow diff --git a/utils/ml_processor/comfy_workflows/video_upscaler_api.json b/utils/ml_processor/comfy_workflows/video_upscaler_api.json new file mode 100644 index 00000000..a9eb43e5 --- /dev/null +++ b/utils/ml_processor/comfy_workflows/video_upscaler_api.json @@ -0,0 +1,253 @@ +{ + "241": { + "inputs": { + "upscale_by": 2, + "seed": 44, + "steps": 4, + "cfg": 1, + "sampler_name": "lcm", + "scheduler": "sgm_uniform", + "denoise": 0.25, + "mode_type": "Linear", + "tile_width": 640, + "tile_height": 640, + "mask_blur": 32, + "tile_padding": 64, + "seam_fix_mode": "None", + "seam_fix_denoise": 0.75, + "seam_fix_width": 64, + "seam_fix_mask_blur": 16, + "seam_fix_padding": 32, + "force_uniform_tiles": true, + "tiled_decode": false, + "image": [ + "302", + 0 + ], + "model": [ + "301", + 0 + ], + "positive": [ + "253", + 0 + ], + "negative": [ + "253", + 1 + ], + "vae": [ + "244", + 4 + ], + "upscale_model": [ + "242", + 0 + ] + }, + "class_type": "UltimateSDUpscale", + "_meta": { + "title": "Ultimate SD Upscale" + } + }, + "242": { + "inputs": { + "model_name": "4xLexicaHAT.pth" + }, + "class_type": "Upscale Model Loader", + "_meta": { + "title": "Upscale Model Loader" + } + }, + "243": { + "inputs": { + "frame_rate": 15, + "loop_count": 0, + "filename_prefix": "hr_ult", + "format": "video/ProRes", + "pingpong": false, + "save_output": true, + "images": [ + "241", + 0 + ] + }, + "class_type": "VHS_VideoCombine", + "_meta": { + "title": "Video Combine 🎥🅥🅗🅢" + } + }, + "244": { + "inputs": { + "ckpt_name": "Realistic_Vision_V5.0.safetensors", + "vae_name": "Baked VAE", + "clip_skip": -1, + "lora_name": "AnimateLCM_sd15_t2v_lora.safetensors", + "lora_model_strength": 1, + "lora_clip_strength": 1, + "positive": [ + "307", + 0 + ], + "negative": "watermark, text, signature, blurry", + "token_normalization": "none", + "weight_interpretation": "comfy", + "empty_latent_width": 320, + "empty_latent_height": 576, + "batch_size": 64 + }, + "class_type": "Efficient Loader", + "_meta": { + "title": "Efficient Loader" + } + }, + "245": { + "inputs": { + "model_name": "AnimateLCM_sd15_t2v.ckpt", + "beta_schedule": "lcm >> sqrt_linear", + "motion_scale": 1, + "apply_v2_models_properly": false, + "model": [ + "244", + 0 + ], + "context_options": [ + "246", + 0 + ] + }, + "class_type": "ADE_AnimateDiffLoaderWithContext", + "_meta": { + "title": "AnimateDiff Loader [Legacy] 🎭🅐🅓①" + } + }, + "246": { + "inputs": { + "context_length": 16, + "context_stride": 1, + "context_overlap": 4, + "closed_loop": false, + "fuse_method": "pyramid", + "use_on_equal_length": false, + "start_percent": 0, + "guarantee_steps": 1 + }, + "class_type": "ADE_LoopedUniformContextOptions", + "_meta": { + "title": "Context Options◆Looped Uniform 🎭🅐🅓" + } + }, + "250": { + "inputs": { + "ipadapter_file": "ip-adapter-plus_sd15.bin" + }, + "class_type": "IPAdapterModelLoader", + "_meta": { + "title": "Load IPAdapter Model" + } + }, + "253": { + "inputs": { + "strength": 0.8, + "start_percent": 0, + "end_percent": 0.751, + "positive": [ + "244", + 1 + ], + "negative": [ + "244", + 2 + ], + "control_net": [ + "254", + 0 + ], + "image": [ + "302", + 0 + ] + }, + "class_type": "ACN_AdvancedControlNetApply", + "_meta": { + "title": "Apply Advanced ControlNet 🛂🅐🅒🅝" + } + }, + "254": { + "inputs": { + "control_net_name": "controlnet_checkpoint.ckpt" + }, + "class_type": "ControlNetLoaderAdvanced", + "_meta": { + "title": "Load Advanced ControlNet Model 🛂🅐🅒🅝" + } + }, + "301": { + "inputs": { + "weight": 1, + "noise": 0.3, + "weight_type": "original", + "start_at": 0, + "end_at": 1, + "unfold_batch": true, + "ipadapter": [ + "250", + 0 + ], + "clip_vision": [ + "303", + 0 + ], + "image": [ + "302", + 0 + ], + "model": [ + "245", + 0 + ] + }, + "class_type": "IPAdapterApply", + "_meta": { + "title": "Apply IPAdapter" + } + }, + "302": { + "inputs": { + "video": "94568767-6b1e-450d-8dfb-cb8f08365d6f.mp4", + "force_rate": 0, + "force_size": "Disabled", + "custom_width": 512, + "custom_height": 512, + "frame_load_cap": 0, + "skip_first_frames": 0, + "select_every_nth": 1 + }, + "class_type": "VHS_LoadVideo", + "_meta": { + "title": "Load Video (Upload) 🎥🅥🅗🅢" + } + }, + "303": { + "inputs": { + "clip_name": "SD1.5/pytorch_model.bin" + }, + "class_type": "CLIPVisionLoader", + "_meta": { + "title": "Load CLIP Vision" + } + }, + "307": { + "inputs": { + "action": "append", + "tidy_tags": "yes", + "text_a": "", + "text_b": "", + "result": "" + }, + "class_type": "StringFunction|pysssss", + "_meta": { + "title": "String Function 🐍" + } + } + } \ No newline at end of file diff --git a/utils/ml_processor/constants.py b/utils/ml_processor/constants.py index d34a65f8..42c36127 100644 --- a/utils/ml_processor/constants.py +++ b/utils/ml_processor/constants.py @@ -14,6 +14,7 @@ class ComfyWorkflow(ExtendedEnum): SDXL_INPAINTING = "sdxl-inpainting" STEERABLE_MOTION = "steerable_motion" SDXL_IMG2IMG = "sdxl_img2img" + UPSCALER = "upscale" @dataclass class MLModel: @@ -91,6 +92,8 @@ class ML_MODEL: ipadapter_plus = MLModel(ComfyRunnerModel.name, ComfyRunnerModel.version, ComfyWorkflow.IP_ADAPTER_PLUS) ipadapter_face = MLModel(ComfyRunnerModel.name, ComfyRunnerModel.version, ComfyWorkflow.IP_ADAPTER_FACE) ipadapter_face_plus = MLModel(ComfyRunnerModel.name, ComfyRunnerModel.version, ComfyWorkflow.IP_ADAPTER_FACE_PLUS) + video_upscaler = MLModel(ComfyRunnerModel.name, ComfyRunnerModel.version, ComfyWorkflow.UPSCALER) + @staticmethod def get_model_by_db_obj(model_db_obj): From 99175a0a56c1ef51f2d4fe5b62dd3bd4696c5179 Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Fri, 15 Mar 2024 23:43:27 +0000 Subject: [PATCH 15/43] handling large video files --- .../components/timeline_view_page.py | 2 +- .../components/video_rendering_page.py | 3 +- ui_components/methods/file_methods.py | 40 ++++++++++++++---- ui_components/widgets/display_element.py | 12 ++++++ ui_components/widgets/shot_view.py | 42 ++++++++++++------- ui_components/widgets/sidebar_logger.py | 3 +- ui_components/widgets/timeline_view.py | 3 +- .../widgets/variant_comparison_grid.py | 13 ++++-- 8 files changed, 88 insertions(+), 30 deletions(-) create mode 100644 ui_components/widgets/display_element.py diff --git a/ui_components/components/timeline_view_page.py b/ui_components/components/timeline_view_page.py index 5576cfa2..864d4aa3 100644 --- a/ui_components/components/timeline_view_page.py +++ b/ui_components/components/timeline_view_page.py @@ -106,7 +106,7 @@ def upscale_settings(): model_files = [file for file in all_files if file.endswith('.safetensors') or file.endswith('.ckpt')] # drop all files that contain xl model_files = [file for file in model_files if "xl" not in file] - model_files.insert(0, "None") # Add "None" option at the beginning + # model_files.insert(0, "None") # Add "None" option at the beginning styling_model = st.selectbox("Styling model", model_files, key="styling_model") type_of_upscaler = st.selectbox("Type of upscaler", ["Dreamy", "Realistic", "Anime", "Cartoon"], key="type_of_upscaler") diff --git a/ui_components/components/video_rendering_page.py b/ui_components/components/video_rendering_page.py index b6875bbe..0ae111a8 100644 --- a/ui_components/components/video_rendering_page.py +++ b/ui_components/components/video_rendering_page.py @@ -10,6 +10,7 @@ from ui_components.models import InternalFileObject, InternalFrameTimingObject from ui_components.widgets.attach_audio_element import attach_audio_element +from ui_components.widgets.display_element import individual_video_display_element from utils.data_repo.data_repo import DataRepo @@ -51,7 +52,7 @@ def video_rendering_page(project_uuid): except Exception as e: st.write(datetime.datetime.strptime(video.created_on, '%Y-%m-%dT%H:%M:%S.%fZ')) - st.video(video.location) + individual_video_display_element(video) col1, col2 = st.columns(2) diff --git a/ui_components/methods/file_methods.py b/ui_components/methods/file_methods.py index de84187b..6f8f6332 100644 --- a/ui_components/methods/file_methods.py +++ b/ui_components/methods/file_methods.py @@ -358,14 +358,22 @@ def list_files_in_folder(folder_path): files.append(file) return files -def get_file_bytes_and_extension(url): +def get_file_bytes_and_extension(path_or_url): try: - response = requests.get(url) - response.raise_for_status() # non-2xx responses - file_bytes = response.content - parsed_url = urlparse(url) - filename, file_extension = os.path.splitext(parsed_url.path) - file_extension = file_extension.lstrip('.') + if urlparse(path_or_url).scheme: + # URL + response = requests.get(path_or_url) + response.raise_for_status() # non-2xx responses + file_bytes = response.content + parsed_url = urlparse(path_or_url) + filename, file_extension = os.path.splitext(parsed_url.path) + file_extension = file_extension.lstrip('.') + else: + # Local file path + with open(path_or_url, 'rb') as file: + file_bytes = file.read() + filename, file_extension = os.path.splitext(path_or_url) + file_extension = file_extension.lstrip('.') return file_bytes, file_extension except Exception as e: @@ -410,4 +418,20 @@ def detect_and_draw_contour(image): img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB) output_image = Image.fromarray(img_rgb) - return output_image \ No newline at end of file + return output_image + +def get_file_size(file_path): + file_size = 0 + if file_path.startswith('http://') or file_path.startswith('https://'): + response = requests.head(file_path) + if response.status_code == 200: + file_size = int(response.headers.get('content-length', 0)) + else: + print("Failed to fetch file from URL:", file_path) + else: + if os.path.exists(file_path): + file_size = os.path.getsize(file_path) + else: + print("File does not exist:", file_path) + + return int(file_size / (1024 * 1024)) \ No newline at end of file diff --git a/ui_components/widgets/display_element.py b/ui_components/widgets/display_element.py new file mode 100644 index 00000000..1e2bcac6 --- /dev/null +++ b/ui_components/widgets/display_element.py @@ -0,0 +1,12 @@ +from typing import Union +import streamlit as st +from ui_components.methods.file_methods import get_file_size +from ui_components.models import InternalFileObject + + +def individual_video_display_element(file: Union[InternalFileObject, str]): + file_location = file.location if file and not isinstance(file, str) and file.location else file + if file_location: + st.video(file_location, format='mp4', start_time=0) if get_file_size(file_location) < 5 else st.info("Video file too large to display") + else: + st.error("No video present") \ No newline at end of file diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index 58f1175b..752a5359 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -1,3 +1,4 @@ +import base64 import json import time from typing import List @@ -13,7 +14,7 @@ import random from shared.constants import AppSubPage, InferenceParamType from ui_components.constants import WorkflowStageType -from ui_components.methods.file_methods import generate_pil_image +from ui_components.methods.file_methods import generate_pil_image, get_file_bytes_and_extension, get_file_size from streamlit_option_menu import option_menu from shared.constants import InternalFileType from ui_components.models import InternalFrameTimingObject, InternalShotObject @@ -349,26 +350,39 @@ def create_video_download_button(video_location, tag="temp"): # Extract the file name from the video location file_name = os.path.basename(video_location) - if video_location.startswith('http'): # cloud file - response = requests.get(video_location) - st.download_button( - label="Download video", - data=response.content, - file_name=file_name, - mime='video/mp4', - key=tag + str(file_name), - use_container_width=True - ) - else: # local file - with open(video_location, 'rb') as file: + if get_file_size(video_location) > 5: + if st.button("Prepare video for download", use_container_width=True): + file_bytes, file_ext = get_file_bytes_and_extension(video_location) + # file_bytes = base64.b64encode(file_bytes).decode('utf-8') st.download_button( label="Download video", - data=file, + data=file_bytes, file_name=file_name, mime='video/mp4', key=tag + str(file_name), use_container_width=True ) + else: + if video_location.startswith('http'): # cloud file + response = requests.get(video_location) + st.download_button( + label="Download video", + data=response.content, + file_name=file_name, + mime='video/mp4', + key=tag + str(file_name), + use_container_width=True + ) + else: # local file + with open(video_location, 'rb') as file: + st.download_button( + label="Download video", + data=file, + file_name=file_name, + mime='video/mp4', + key=tag + str(file_name), + use_container_width=True + ) def shot_adjustment_button(shot, show_label=False): button_label = "Shot Adjustment 🔧" if show_label else "🔧" diff --git a/ui_components/widgets/sidebar_logger.py b/ui_components/widgets/sidebar_logger.py index 7ea047eb..abe316da 100644 --- a/ui_components/widgets/sidebar_logger.py +++ b/ui_components/widgets/sidebar_logger.py @@ -2,6 +2,7 @@ import streamlit as st from shared.constants import InferenceParamType, InferenceStatus, InternalFileTag, InternalFileType +from ui_components.widgets.display_element import individual_video_display_element from ui_components.widgets.frame_movement_widgets import jump_to_single_frame_view_button import json import math @@ -130,7 +131,7 @@ def sidebar_logger(shot_uuid): if output_url.endswith('png') or output_url.endswith('jpg') or output_url.endswith('jpeg') or output_url.endswith('gif'): st.image(output_url) elif output_url.endswith('mp4'): - st.video(output_url, format='mp4', start_time=0) + individual_video_display_element(output_url) else: st.info("No data to display") diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index 62420d9a..61d2a3be 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -10,6 +10,7 @@ from ui_components.methods.common_methods import add_new_shot from ui_components.models import InternalFrameTimingObject, InternalShotObject from ui_components.widgets.common_element import duplicate_shot_button +from ui_components.widgets.display_element import individual_video_display_element from ui_components.widgets.shot_view import shot_keyframe_element, shot_adjustment_button, shot_animation_button, update_shot_name, update_shot_duration, move_shot_buttons, delete_shot_button, create_video_download_button from utils.data_repo.data_repo import DataRepo from utils import st_memory @@ -36,7 +37,7 @@ def timeline_view(shot_uuid, stage): with grid[idx % items_per_row]: st.info(f"##### {shot.name}") if shot.main_clip and shot.main_clip.location: - st.video(shot.main_clip.location) + individual_video_display_element(shot.main_clip) else: for i in range(0, len(timing_list), items_per_row): if i % items_per_row == 0: diff --git a/ui_components/widgets/variant_comparison_grid.py b/ui_components/widgets/variant_comparison_grid.py index 7c60f173..5676d7f7 100644 --- a/ui_components/widgets/variant_comparison_grid.py +++ b/ui_components/widgets/variant_comparison_grid.py @@ -11,6 +11,7 @@ from ui_components.methods.common_methods import promote_image_variant, promote_video_variant from ui_components.methods.file_methods import create_duplicate_file from ui_components.methods.video_methods import sync_audio_and_duration, upscale_video +from ui_components.widgets.display_element import individual_video_display_element from ui_components.widgets.shot_view import create_video_download_button from ui_components.models import InternalAIModelObject, InternalFileObject from ui_components.widgets.add_key_frame_element import add_key_frame @@ -75,7 +76,8 @@ def variant_comparison_grid(ele_uuid, stage=CreativeProcessType.MOTION.value): st.success("**Main variant**") # Display the main variant if stage == CreativeProcessType.MOTION.value: - st.video(variants[current_variant].location, format='mp4', start_time=0) if (current_variant != -1 and variants[current_variant]) else st.error("No video present") + if current_variant != -1 and variants[current_variant]: + individual_video_display_element(variants[current_variant]) with st.expander("Upscale settings", expanded=False): styling_model, upscaler_type, upscale_factor, upscale_strength, promote_to_main_variant = upscale_settings() if st.button("Upscale Main Variant", key=f"upscale_main_variant_{shot_uuid}", help="Upscale the main variant with the selected settings", use_container_width=True): @@ -116,8 +118,11 @@ def variant_comparison_grid(ele_uuid, stage=CreativeProcessType.MOTION.value): promote_image_variant(timing.uuid, variant_index) st.rerun() - if stage == CreativeProcessType.MOTION.value: - st.video(variants[variant_index].location, format='mp4', start_time=0) if variants[variant_index] else st.error("No video present") + if stage == CreativeProcessType.MOTION.value: + if variants[variant_index]: + individual_video_display_element(variants[variant_index]) + else: + st.error("No video present") create_video_download_button(variants[variant_index].location, tag="var_details") variant_inference_detail_element(variants[variant_index], stage, shot_uuid, timing_list, tag="var_details") @@ -292,7 +297,7 @@ def upscale_settings(): model_files = [file for file in all_files if file.endswith('.safetensors') or file.endswith('.ckpt')] # drop all files that contain xl model_files = [file for file in model_files if "xl" not in file] - model_files.insert(0, "None") # Add "None" option at the beginning + # model_files.insert(0, "None") # Add "None" option at the beginning styling_model = st.selectbox("Styling model", model_files, key="styling_model") type_of_upscaler = st.selectbox("Type of upscaler", ["Dreamy", "Realistic", "Anime", "Cartoon"], key="type_of_upscaler") From b65c8fe3409c7b49b2cadf43f0ee5787464ac08b Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Sat, 16 Mar 2024 00:46:36 +0000 Subject: [PATCH 16/43] minor fix --- ui_components/widgets/shot_view.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index 752a5359..44189322 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -351,7 +351,7 @@ def create_video_download_button(video_location, tag="temp"): file_name = os.path.basename(video_location) if get_file_size(video_location) > 5: - if st.button("Prepare video for download", use_container_width=True): + if st.button("Prepare video for download", use_container_width=True, key=tag + str(file_name)): file_bytes, file_ext = get_file_bytes_and_extension(video_location) # file_bytes = base64.b64encode(file_bytes).decode('utf-8') st.download_button( From db65d98c0c9b0974c3758bca7fc6d99ddf5414d0 Mon Sep 17 00:00:00 2001 From: peteromallet Date: Sat, 16 Mar 2024 05:39:26 +0100 Subject: [PATCH 17/43] Update --- ui_components/widgets/animation_style_element.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index f4ba6274..6e8d3f38 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -68,7 +68,7 @@ def animation_style_element(shot_uuid): st.session_state[f'lora_data_{shot.uuid}'] = [] if f'strength_of_adherence_value_{shot.uuid}' not in st.session_state: - st.session_state[f'strength_of_adherence_value_{shot.uuid}'] = 0.15 + st.session_state[f'strength_of_adherence_value_{shot.uuid}'] = 0.10 if f'type_of_motion_context_index_{shot.uuid}' not in st.session_state: st.session_state[f'type_of_motion_context_index_{shot.uuid}'] = 1 @@ -137,7 +137,7 @@ def animation_style_element(shot_uuid): distances_to_next_frames.append(distance_to_next_frame/2) speed_of_transition = st.slider("Speed of transition:", min_value=0.45, max_value=0.7, step=0.01, key=f"speed_of_transition_widget_{idx}_{timing.uuid}", value=st.session_state[f'speed_of_transition_{shot.uuid}_{idx}']) speeds_of_transitions.append(speed_of_transition) - freedom_between_frames = st.slider("Freedom between frames:", min_value=0.05, max_value=0.95, step=0.01, key=f"freedom_between_frames_widget_{idx}_{timing.uuid}", value=st.session_state[f'freedom_between_frames_{shot.uuid}_{idx}']) + freedom_between_frames = st.slider("Freedom between frames:", min_value=0.15, max_value=0.85, step=0.01, key=f"freedom_between_frames_widget_{idx}_{timing.uuid}", value=st.session_state[f'freedom_between_frames_{shot.uuid}_{idx}']) freedoms_between_frames.append(freedom_between_frames) if (i < len(timing_list) - 1) or (len(timing_list) % items_per_row != 0): @@ -670,7 +670,7 @@ def update_prompt(): elif editable_entity == "Speed of transitions": entity_new_val = st.slider("What would you like to change it to?", min_value=0.45, max_value=0.7, step=0.01, value=0.6, key="entity_new_val") elif editable_entity == "Freedom between frames": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.05, max_value=0.95, step=0.01, value=0.5, key="entity_new_val") + entity_new_val = st.slider("What would you like to change it to?", min_value=0.15, max_value=0.85, step=0.01, value=0.5, key="entity_new_val") elif editable_entity == "Motion during frames": entity_new_val = st.slider("What would you like to change it to?", min_value=0.5, max_value=1.5, step=0.01, value=1.3, key="entity_new_val") From 24d755682f8bfb5efdcd884ea29435429669168c Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Sat, 16 Mar 2024 12:36:01 +0000 Subject: [PATCH 18/43] video disappearance bug fix --- ui_components/widgets/shot_view.py | 45 ++++++++---------------------- 1 file changed, 12 insertions(+), 33 deletions(-) diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index 44189322..39ff2bf7 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -350,39 +350,18 @@ def create_video_download_button(video_location, tag="temp"): # Extract the file name from the video location file_name = os.path.basename(video_location) - if get_file_size(video_location) > 5: - if st.button("Prepare video for download", use_container_width=True, key=tag + str(file_name)): - file_bytes, file_ext = get_file_bytes_and_extension(video_location) - # file_bytes = base64.b64encode(file_bytes).decode('utf-8') - st.download_button( - label="Download video", - data=file_bytes, - file_name=file_name, - mime='video/mp4', - key=tag + str(file_name), - use_container_width=True - ) - else: - if video_location.startswith('http'): # cloud file - response = requests.get(video_location) - st.download_button( - label="Download video", - data=response.content, - file_name=file_name, - mime='video/mp4', - key=tag + str(file_name), - use_container_width=True - ) - else: # local file - with open(video_location, 'rb') as file: - st.download_button( - label="Download video", - data=file, - file_name=file_name, - mime='video/mp4', - key=tag + str(file_name), - use_container_width=True - ) + # if get_file_size(video_location) > 5: + if st.button("Prepare video for download", use_container_width=True, key=tag + str(file_name)): + file_bytes, file_ext = get_file_bytes_and_extension(video_location) + # file_bytes = base64.b64encode(file_bytes).decode('utf-8') + st.download_button( + label="Download video", + data=file_bytes, + file_name=file_name, + mime='video/mp4', + key=tag + str(file_name) + "_download_gen", + use_container_width=True + ) def shot_adjustment_button(shot, show_label=False): button_label = "Shot Adjustment 🔧" if show_label else "🔧" From c6c99165652a389f03fb2d3152847c3138ea0fbf Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Sat, 16 Mar 2024 23:08:27 -0700 Subject: [PATCH 19/43] windows multiple tab + infinite loop fix --- app.py | 4 +++- banodoco_runner.py | 2 +- utils/common_utils.py | 2 ++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/app.py b/app.py index d4f952e2..d99e0934 100644 --- a/app.py +++ b/app.py @@ -50,8 +50,10 @@ def start_runner(): app_logger.info("Starting runner") python_executable = sys.executable _ = subprocess.Popen([python_executable, "banodoco_runner.py"]) - while not is_process_active(RUNNER_PROCESS_NAME, RUNNER_PROCESS_PORT): + max_retries = 6 + while not is_process_active(RUNNER_PROCESS_NAME, RUNNER_PROCESS_PORT) and max_retries: time.sleep(0.1) + max_retries -= 1 else: # app_logger.debug("Runner already running") pass diff --git a/banodoco_runner.py b/banodoco_runner.py index a3a51169..97bdeec6 100644 --- a/banodoco_runner.py +++ b/banodoco_runner.py @@ -75,7 +75,7 @@ def main(): server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_socket.bind(("localhost", RUNNER_PROCESS_PORT)) - server_socket.listen(1) + server_socket.listen(100) # hacky fix print('runner running') while True: diff --git a/utils/common_utils.py b/utils/common_utils.py index cd3b8b26..0292f618 100644 --- a/utils/common_utils.py +++ b/utils/common_utils.py @@ -230,8 +230,10 @@ def is_process_active(custom_process_name, custom_process_port): client_socket = socket.create_connection(("localhost", custom_process_port)) client_socket.close() res = True + # print("----------------- process is active") except ConnectionRefusedError: res = False + # print("----------------- process is NOT active") else: # Use 'ps' for Unix/Linux ps_output = subprocess.check_output(["ps", "aux"]).decode("utf-8") From ee358e0dd95c716fdd569896c3cf383320a5ce97 Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Sun, 17 Mar 2024 00:39:21 +0000 Subject: [PATCH 20/43] inpainting fix --- ui_components/components/explorer_page.py | 9 ++++++++- ui_components/widgets/inpainting_element.py | 10 +++++++--- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/ui_components/components/explorer_page.py b/ui_components/components/explorer_page.py index e2d23af5..a5b526fc 100644 --- a/ui_components/components/explorer_page.py +++ b/ui_components/components/explorer_page.py @@ -2,6 +2,7 @@ import streamlit as st from ui_components.constants import GalleryImageViewType from ui_components.methods.common_methods import get_canny_img, process_inference_output,add_new_shot, save_new_image +from ui_components.methods.file_methods import zoom_and_crop from ui_components.widgets.add_key_frame_element import add_key_frame from ui_components.widgets.inpainting_element import inpainting_image_input from utils.common_utils import refresh_app @@ -14,6 +15,7 @@ from utils.ml_processor.ml_interface import get_ml_client from utils.ml_processor.constants import ML_MODEL import numpy as np +from PIL import Image from utils import st_memory @@ -89,7 +91,12 @@ def handle_image_input(column, type_of_generation, output_value_name, data_repo= st.session_state[f"uploaded_image_{output_value_name}"] = f"0_{output_value_name}" if source_of_starting_image == "Upload": - st.session_state['uploaded_image'] = st.file_uploader("Upload a starting image", type=["png", "jpg", "jpeg"], key=st.session_state[f"uploaded_image_{output_value_name}"], help="This will be the base image for the generation.") + uploaded_image = st.file_uploader("Upload a starting image", type=["png", "jpg", "jpeg"], key=st.session_state[f"uploaded_image_{output_value_name}"], help="This will be the base image for the generation.") + if uploaded_image: + uploaded_image = Image.open(uploaded_image) if not isinstance(uploaded_image, Image.Image) else uploaded_image + uploaded_image = zoom_and_crop(uploaded_image, project_settings.width, project_settings.height) + + st.session_state['uploaded_image'] = uploaded_image else: # taking image from shots diff --git a/ui_components/widgets/inpainting_element.py b/ui_components/widgets/inpainting_element.py index 6d83c4bd..5f0afcad 100644 --- a/ui_components/widgets/inpainting_element.py +++ b/ui_components/widgets/inpainting_element.py @@ -1,3 +1,4 @@ +import io import uuid import numpy as np from PIL import Image, ImageOps, ImageDraw @@ -5,7 +6,7 @@ from streamlit_drawable_canvas import st_canvas from shared.constants import QUEUE_INFERENCE_QUERIES, InferenceType from ui_components.constants import CROPPED_IMG_LOCAL_PATH, MASK_IMG_LOCAL_PATH, TEMP_MASK_FILE, DefaultProjectSettingParams, WorkflowStageType -from ui_components.methods.file_methods import add_temp_file_to_project, detect_and_draw_contour, save_or_host_file +from ui_components.methods.file_methods import add_temp_file_to_project, detect_and_draw_contour, save_or_host_file, zoom_and_crop from utils.constants import MLQueryObject from utils.data_repo.data_repo import DataRepo @@ -38,7 +39,7 @@ def inpainting_element(options_width, image, position="explorer"): st.rerun() else: with main_col_1: - canvas_image = Image.open(image) + canvas_image = image if isinstance(image, Image.Image) else Image.open(image) if 'drawing_input' not in st.session_state: st.session_state['drawing_input'] = 'Magic shapes 🪄' @@ -100,7 +101,7 @@ def inpainting_element(options_width, image, position="explorer"): im_rgb.paste(im, mask=im.split()[3]) # Paste the mask onto the RGB image im = im_rgb im = ImageOps.invert(im) # Inverting for sdxl inpainting - st.session_state['editing_image'] = image + st.session_state['editing_image'] = image if isinstance(image, Image.Image) else Image.open(image) mask_file_path = "videos/temp/" + str(uuid.uuid4()) + ".png" mask_file_path = save_or_host_file(im, mask_file_path) or mask_file_path st.session_state['mask_to_use'] = mask_file_path @@ -119,6 +120,7 @@ def inpainting_element(options_width, image, position="explorer"): def inpainting_image_input(project_uuid, position="explorer"): data_repo = DataRepo() options_width, canvas_width = st.columns([1.2, 3]) + project_settings: InternalSettingObject = data_repo.get_project_setting(project_uuid) if not ('uploaded_image' in st.session_state and st.session_state["uploaded_image"]): st.session_state['uploaded_image'] = "" with options_width: @@ -128,6 +130,8 @@ def inpainting_image_input(project_uuid, position="explorer"): uploaded_image = st.file_uploader("Upload a starting image", type=["png", "jpg", "jpeg"], key=f"uploaded_image_{position}", help="This will be the base image for the generation.") if uploaded_image: if st.button("Select as base image", key=f"inpainting_base_image_{position}"): + uploaded_image = Image.open(uploaded_image) + uploaded_image = zoom_and_crop(uploaded_image, project_settings.width, project_settings.height) st.session_state['uploaded_image'] = uploaded_image else: # taking image from shots From 45a5ad6cd4599055195d95022be65ad2c149f89f Mon Sep 17 00:00:00 2001 From: peteromallet Date: Sun, 17 Mar 2024 15:45:35 +0100 Subject: [PATCH 21/43] Adding IPA composition --- ui_components/components/explorer_page.py | 18 +- utils/ml_processor/comfy_data_transform.py | 32 +++- .../ipa_composition_workflow_api.json | 174 ++++++++++++++++++ 3 files changed, 213 insertions(+), 11 deletions(-) create mode 100644 utils/ml_processor/comfy_workflows/ipa_composition_workflow_api.json diff --git a/ui_components/components/explorer_page.py b/ui_components/components/explorer_page.py index a5b526fc..7acfcc7d 100644 --- a/ui_components/components/explorer_page.py +++ b/ui_components/components/explorer_page.py @@ -22,7 +22,7 @@ class InputImageStyling(ExtendedEnum): TEXT2IMAGE = "Text to Image" IMAGE2IMAGE = "Image to Image" - CONTROLNET_CANNY = "ControlNet Canny" + IPADAPTER_COMPOSITION = "IP-Adapter Composition" IPADAPTER_FACE = "IP-Adapter Face" IPADAPTER_PLUS = "IP-Adapter Plus" IPADPTER_FACE_AND_PLUS = "IP-Adapter Face & Plus" @@ -207,29 +207,27 @@ def handle_image_input(column, type_of_generation, output_value_name, data_repo= output, log = ml_client.predict_model_output_standardized(ML_MODEL.sdxl_img2img, query_obj, queue_inference=QUEUE_INFERENCE_QUERIES) - elif generation_method == InputImageStyling.CONTROLNET_CANNY.value: - edge_pil_img = get_canny_img(st.session_state["input_image_1"], low_threshold=50, high_threshold=150) # redundant incase of local inference - input_img = edge_pil_img if not GPU_INFERENCE_ENABLED else st.session_state["input_image_1"] + elif generation_method == InputImageStyling.IPADAPTER_COMPOSITION.value: + + input_img = st.session_state["input_image_1"] input_image_file = save_new_image(input_img, project_uuid) query_obj = MLQueryObject( timing_uuid=None, model_uuid=None, image_uuid=input_image_file.uuid, - guidance_scale=8, + guidance_scale=5, seed=-1, num_inference_steps=30, strength=strength_of_image/100, adapter_type=None, - prompt=prompt, - low_threshold=0.2, - high_threshold=0.7, + prompt=prompt, negative_prompt=negative_prompt, height=project_settings.height, width=project_settings.width, data={'condition_scale': 1, "shot_uuid": shot_uuid} ) - output, log = ml_client.predict_model_output_standardized(ML_MODEL.sdxl_controlnet, query_obj, queue_inference=QUEUE_INFERENCE_QUERIES) + output, log = ml_client.predict_model_output_standardized(ML_MODEL.IPADAPTER_COMPOSITION, query_obj, queue_inference=QUEUE_INFERENCE_QUERIES) elif generation_method == InputImageStyling.IPADAPTER_FACE.value: # validation @@ -349,7 +347,7 @@ def handle_image_input(column, type_of_generation, output_value_name, data_repo= st.button("Generate images", key="generate_images", use_container_width=True, type="primary", disabled=True, help="Please enter a prompt to generate images") elif type_of_generation == InputImageStyling.IMAGE2IMAGE.value and st.session_state["input_image_1"] is None: st.button("Generate images", key="generate_images", use_container_width=True, type="primary", disabled=True, help="Please upload an image") - elif type_of_generation == InputImageStyling.CONTROLNET_CANNY.value and st.session_state["input_image_1"] is None: + elif type_of_generation == InputImageStyling.IPADAPTER_COMPOSITION.value and st.session_state["input_image_1"] is None: st.button("Generate images", key="generate_images", use_container_width=True, type="primary", disabled=True, help="Please upload an image") elif type_of_generation == InputImageStyling.IPADAPTER_FACE.value and st.session_state["input_image_1"] is None: st.button("Generate images", key="generate_images", use_container_width=True, type="primary", disabled=True, help="Please upload an image") diff --git a/utils/ml_processor/comfy_data_transform.py b/utils/ml_processor/comfy_data_transform.py index d9448160..3d1b7d1b 100644 --- a/utils/ml_processor/comfy_data_transform.py +++ b/utils/ml_processor/comfy_data_transform.py @@ -17,6 +17,7 @@ ComfyWorkflow.SDXL: {"workflow_path": 'comfy_workflows/sdxl_workflow_api.json', "output_node_id": 19}, ComfyWorkflow.SDXL_IMG2IMG: {"workflow_path": 'comfy_workflows/sdxl_img2img_workflow_api.json', "output_node_id": 31}, ComfyWorkflow.SDXL_CONTROLNET: {"workflow_path": 'comfy_workflows/sdxl_controlnet_workflow_api.json', "output_node_id": 9}, + ComfyWorkflow.IPADAPTER_COMPOSITION: {"workflow_path": 'comfy_workflows/ipa_composition_workflow_api.json', "output_node_id": 27}, ComfyWorkflow.SDXL_CONTROLNET_OPENPOSE: {"workflow_path": 'comfy_workflows/sdxl_openpose_workflow_api.json', "output_node_id": 9}, ComfyWorkflow.LLAMA_2_7B: {"workflow_path": 'comfy_workflows/llama_workflow_api.json', "output_node_id": 14}, ComfyWorkflow.SDXL_INPAINTING: {"workflow_path": 'comfy_workflows/sdxl_inpainting_workflow_api.json', "output_node_id": 56}, @@ -82,7 +83,8 @@ def transform_sdxl_img2img_workflow(query: MLQueryObject): workflow["42:2"]["inputs"]["seed"] = random_seed() return json.dumps(workflow), output_node_ids, [], [] - + + @staticmethod def transform_sdxl_controlnet_workflow(query: MLQueryObject): data_repo = DataRepo() @@ -108,6 +110,33 @@ def transform_sdxl_controlnet_workflow(query: MLQueryObject): return json.dumps(workflow), output_node_ids, [], [] + + @staticmethod + def transform_ipadaptor_composition_workflow(query: MLQueryObject): + data_repo = DataRepo() + workflow, output_node_ids = ComfyDataTransform.get_workflow_json(ComfyWorkflow.IPADAPTER_COMPOSITION) + + # workflow params + width, height = query.width, query.height + # width, height = determine_dimensions_for_sdxl(width, height) + positive_prompt, negative_prompt = query.prompt, query.negative_prompt + steps, cfg = query.num_inference_steps, query.guidance_scale + # low_threshold, high_threshold = query.low_threshold, query.high_threshold + image = data_repo.get_file_from_uuid(query.image_uuid) + image_name = image.filename + + # updating params + workflow["9"]["inputs"]["seed"] = random_seed() + workflow["10"]["width"], workflow["10"]["height"] = width, height + # workflow["17"]["width"], workflow["17"]["height"] = width, height + workflow["7"]["inputs"]["text"], workflow["8"]["inputs"]["text"] = positive_prompt, negative_prompt + # workflow["12"]["inputs"]["low_threshold"], workflow["12"]["inputs"]["high_threshold"] = low_threshold, high_threshold + workflow["9"]["inputs"]["steps"], workflow["9"]["inputs"]["cfg"] = steps, cfg + workflow["6"]["inputs"]["image"] = image_name + workflow["28"]["inputs"]["weight"] = query.strength + + return json.dumps(workflow), output_node_ids, [], [] + @staticmethod def transform_sdxl_controlnet_openpose_workflow(query: MLQueryObject): data_repo = DataRepo() @@ -432,6 +461,7 @@ def transform_video_upscaler_workflow(query: MLQueryObject): ML_MODEL.sdxl.workflow_name: ComfyDataTransform.transform_sdxl_workflow, ML_MODEL.sdxl_controlnet.workflow_name: ComfyDataTransform.transform_sdxl_controlnet_workflow, ML_MODEL.sdxl_controlnet_openpose.workflow_name: ComfyDataTransform.transform_sdxl_controlnet_openpose_workflow, + ML_MODEL.ipaadapter_compotion.workflow_name: ComfyDataTransform.transform_ipadaptor_composition_workflow, ML_MODEL.llama_2_7b.workflow_name: ComfyDataTransform.transform_llama_2_7b_workflow, ML_MODEL.sdxl_inpainting.workflow_name: ComfyDataTransform.transform_sdxl_inpainting_workflow, ML_MODEL.ipadapter_plus.workflow_name: ComfyDataTransform.transform_ipadaptor_plus_workflow, diff --git a/utils/ml_processor/comfy_workflows/ipa_composition_workflow_api.json b/utils/ml_processor/comfy_workflows/ipa_composition_workflow_api.json new file mode 100644 index 00000000..a94fd389 --- /dev/null +++ b/utils/ml_processor/comfy_workflows/ipa_composition_workflow_api.json @@ -0,0 +1,174 @@ +{ + "1": { + "inputs": { + "ckpt_name": "Realistic_Vision_V5.0.safetensors" + }, + "class_type": "CheckpointLoaderSimple", + "_meta": { + "title": "Load Checkpoint" + } + }, + "2": { + "inputs": { + "vae_name": "vae-ft-mse-840000-ema-pruned.safetensors" + }, + "class_type": "VAELoader", + "_meta": { + "title": "Load VAE" + } + }, + "3": { + "inputs": { + "ipadapter_file": "ip_plus_composition_sd15.safetensors" + }, + "class_type": "IPAdapterModelLoader", + "_meta": { + "title": "Load IPAdapter Model" + } + }, + "4": { + "inputs": { + "clip_name": "SD1.5/pytorch_model.bin" + }, + "class_type": "CLIPVisionLoader", + "_meta": { + "title": "Load CLIP Vision" + } + }, + "6": { + "inputs": { + "image": "Hulk_Hogan.jpg", + "upload": "image" + }, + "class_type": "LoadImage", + "_meta": { + "title": "Load Image" + } + }, + "7": { + "inputs": { + "text": "hulk hogan", + "clip": [ + "1", + 1 + ] + }, + "class_type": "CLIPTextEncode", + "_meta": { + "title": "CLIP Text Encode (Prompt)" + } + }, + "8": { + "inputs": { + "text": "blurry, photo, malformed", + "clip": [ + "1", + 1 + ] + }, + "class_type": "CLIPTextEncode", + "_meta": { + "title": "CLIP Text Encode (Prompt)" + } + }, + "9": { + "inputs": { + "seed": 16, + "steps": 30, + "cfg": 5, + "sampler_name": "dpmpp_2m_sde", + "scheduler": "exponential", + "denoise": 1, + "model": [ + "28", + 0 + ], + "positive": [ + "7", + 0 + ], + "negative": [ + "8", + 0 + ], + "latent_image": [ + "10", + 0 + ] + }, + "class_type": "KSampler", + "_meta": { + "title": "KSampler" + } + }, + "10": { + "inputs": { + "width": 512, + "height": 512, + "batch_size": 1 + }, + "class_type": "EmptyLatentImage", + "_meta": { + "title": "Empty Latent Image" + } + }, + "11": { + "inputs": { + "samples": [ + "9", + 0 + ], + "vae": [ + "2", + 0 + ] + }, + "class_type": "VAEDecode", + "_meta": { + "title": "VAE Decode" + } + }, + "27": { + "inputs": { + "filename_prefix": "ComfyUI", + "images": [ + "11", + 0 + ] + }, + "class_type": "SaveImage", + "_meta": { + "title": "Save Image" + } + }, + "28": { + "inputs": { + "weight": 1, + "noise": 0, + "weight_type": "original", + "start_at": 0, + "end_at": 1, + "unfold_batch": false, + "ipadapter": [ + "3", + 0 + ], + "clip_vision": [ + "4", + 0 + ], + "image": [ + "6", + 0 + ], + "model": [ + "1", + 0 + ] + }, + "class_type": "IPAdapterApply", + "_meta": { + "title": "Apply IPAdapter" + } + } +} \ No newline at end of file From 0bf995625998f92195fe641a152e672a0876a281 Mon Sep 17 00:00:00 2001 From: peteromallet Date: Sun, 17 Mar 2024 15:56:26 +0100 Subject: [PATCH 22/43] Fix --- utils/ml_processor/comfy_data_transform.py | 6 +++--- ...low_api.json => ipadapter_composition_workflow_api.json} | 0 2 files changed, 3 insertions(+), 3 deletions(-) rename utils/ml_processor/comfy_workflows/{ipa_composition_workflow_api.json => ipadapter_composition_workflow_api.json} (100%) diff --git a/utils/ml_processor/comfy_data_transform.py b/utils/ml_processor/comfy_data_transform.py index 3d1b7d1b..d7862f4b 100644 --- a/utils/ml_processor/comfy_data_transform.py +++ b/utils/ml_processor/comfy_data_transform.py @@ -17,7 +17,7 @@ ComfyWorkflow.SDXL: {"workflow_path": 'comfy_workflows/sdxl_workflow_api.json', "output_node_id": 19}, ComfyWorkflow.SDXL_IMG2IMG: {"workflow_path": 'comfy_workflows/sdxl_img2img_workflow_api.json', "output_node_id": 31}, ComfyWorkflow.SDXL_CONTROLNET: {"workflow_path": 'comfy_workflows/sdxl_controlnet_workflow_api.json', "output_node_id": 9}, - ComfyWorkflow.IPADAPTER_COMPOSITION: {"workflow_path": 'comfy_workflows/ipa_composition_workflow_api.json', "output_node_id": 27}, + ComfyWorkflow.IPADAPTER_COMPOSITION: {"workflow_path": 'comfy_workflows/ipadapter_composition_workflow_api.json', "output_node_id": 27}, ComfyWorkflow.SDXL_CONTROLNET_OPENPOSE: {"workflow_path": 'comfy_workflows/sdxl_openpose_workflow_api.json', "output_node_id": 9}, ComfyWorkflow.LLAMA_2_7B: {"workflow_path": 'comfy_workflows/llama_workflow_api.json', "output_node_id": 14}, ComfyWorkflow.SDXL_INPAINTING: {"workflow_path": 'comfy_workflows/sdxl_inpainting_workflow_api.json', "output_node_id": 56}, @@ -112,7 +112,7 @@ def transform_sdxl_controlnet_workflow(query: MLQueryObject): @staticmethod - def transform_ipadaptor_composition_workflow(query: MLQueryObject): + def transform_ipadapter_composition_workflow(query: MLQueryObject): data_repo = DataRepo() workflow, output_node_ids = ComfyDataTransform.get_workflow_json(ComfyWorkflow.IPADAPTER_COMPOSITION) @@ -461,7 +461,7 @@ def transform_video_upscaler_workflow(query: MLQueryObject): ML_MODEL.sdxl.workflow_name: ComfyDataTransform.transform_sdxl_workflow, ML_MODEL.sdxl_controlnet.workflow_name: ComfyDataTransform.transform_sdxl_controlnet_workflow, ML_MODEL.sdxl_controlnet_openpose.workflow_name: ComfyDataTransform.transform_sdxl_controlnet_openpose_workflow, - ML_MODEL.ipaadapter_compotion.workflow_name: ComfyDataTransform.transform_ipadaptor_composition_workflow, + ML_MODEL.ipadapter_composition.workflow_name: ComfyDataTransform.transform_ipadapter_composition_workflow, ML_MODEL.llama_2_7b.workflow_name: ComfyDataTransform.transform_llama_2_7b_workflow, ML_MODEL.sdxl_inpainting.workflow_name: ComfyDataTransform.transform_sdxl_inpainting_workflow, ML_MODEL.ipadapter_plus.workflow_name: ComfyDataTransform.transform_ipadaptor_plus_workflow, diff --git a/utils/ml_processor/comfy_workflows/ipa_composition_workflow_api.json b/utils/ml_processor/comfy_workflows/ipadapter_composition_workflow_api.json similarity index 100% rename from utils/ml_processor/comfy_workflows/ipa_composition_workflow_api.json rename to utils/ml_processor/comfy_workflows/ipadapter_composition_workflow_api.json From 0a361c7f4816cd0d05b7e2f7ff6717190cdd6859 Mon Sep 17 00:00:00 2001 From: peteromallet Date: Sun, 17 Mar 2024 16:10:19 +0100 Subject: [PATCH 23/43] Updating constants --- utils/ml_processor/constants.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/utils/ml_processor/constants.py b/utils/ml_processor/constants.py index 42c36127..3113abc8 100644 --- a/utils/ml_processor/constants.py +++ b/utils/ml_processor/constants.py @@ -15,6 +15,7 @@ class ComfyWorkflow(ExtendedEnum): STEERABLE_MOTION = "steerable_motion" SDXL_IMG2IMG = "sdxl_img2img" UPSCALER = "upscale" + IPADAPTER_COMPOSITION = "ipadapter_composition" @dataclass class MLModel: @@ -93,6 +94,9 @@ class ML_MODEL: ipadapter_face = MLModel(ComfyRunnerModel.name, ComfyRunnerModel.version, ComfyWorkflow.IP_ADAPTER_FACE) ipadapter_face_plus = MLModel(ComfyRunnerModel.name, ComfyRunnerModel.version, ComfyWorkflow.IP_ADAPTER_FACE_PLUS) video_upscaler = MLModel(ComfyRunnerModel.name, ComfyRunnerModel.version, ComfyWorkflow.UPSCALER) + ipadapter_composition = MLModel(ComfyRunnerModel.name, ComfyRunnerModel.version, ComfyWorkflow.IPADAPTER_COMPOSITION) + + @staticmethod @@ -112,7 +116,8 @@ def get_model_by_db_obj(model_db_obj): ML_MODEL.ad_interpolation, ML_MODEL.ipadapter_face, ML_MODEL.ipadapter_face_plus, - ML_MODEL.ipadapter_plus + ML_MODEL.ipadapter_plus, + ML_MODEL.padapter_composition, ] DEFAULT_LORA_MODEL_URL = "https://replicate.delivery/pbxt/nWm6eP9ojwVvBCaWoWZVawOKRfgxPJmkVk13ES7PX36Y66kQA/tmpxuz6k_k2datazip.safetensors" From 1d98172cb94ce157a44a36be9734cdb4b828d798 Mon Sep 17 00:00:00 2001 From: peteromallet Date: Sun, 17 Mar 2024 16:12:31 +0100 Subject: [PATCH 24/43] fix --- utils/ml_processor/constants.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/ml_processor/constants.py b/utils/ml_processor/constants.py index 3113abc8..266546d3 100644 --- a/utils/ml_processor/constants.py +++ b/utils/ml_processor/constants.py @@ -117,7 +117,7 @@ def get_model_by_db_obj(model_db_obj): ML_MODEL.ipadapter_face, ML_MODEL.ipadapter_face_plus, ML_MODEL.ipadapter_plus, - ML_MODEL.padapter_composition, + ML_MODEL.ipadapter_composition, ] DEFAULT_LORA_MODEL_URL = "https://replicate.delivery/pbxt/nWm6eP9ojwVvBCaWoWZVawOKRfgxPJmkVk13ES7PX36Y66kQA/tmpxuz6k_k2datazip.safetensors" From 1fa33e47a6638a56b07ba0506439ddf212ab4130 Mon Sep 17 00:00:00 2001 From: peteromallet Date: Sun, 17 Mar 2024 16:19:37 +0100 Subject: [PATCH 25/43] Tiny fix --- ui_components/components/explorer_page.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui_components/components/explorer_page.py b/ui_components/components/explorer_page.py index 7acfcc7d..6335f2af 100644 --- a/ui_components/components/explorer_page.py +++ b/ui_components/components/explorer_page.py @@ -227,7 +227,7 @@ def handle_image_input(column, type_of_generation, output_value_name, data_repo= data={'condition_scale': 1, "shot_uuid": shot_uuid} ) - output, log = ml_client.predict_model_output_standardized(ML_MODEL.IPADAPTER_COMPOSITION, query_obj, queue_inference=QUEUE_INFERENCE_QUERIES) + output, log = ml_client.predict_model_output_standardized(ML_MODEL.ipadapter_composition, query_obj, queue_inference=QUEUE_INFERENCE_QUERIES) elif generation_method == InputImageStyling.IPADAPTER_FACE.value: # validation From 44980208e0bba5ecf1570b11aeaf863a2ef2c4df Mon Sep 17 00:00:00 2001 From: peteromallet Date: Sun, 17 Mar 2024 16:25:57 +0100 Subject: [PATCH 26/43] fixing model --- .../comfy_workflows/ipadapter_composition_workflow_api.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/ml_processor/comfy_workflows/ipadapter_composition_workflow_api.json b/utils/ml_processor/comfy_workflows/ipadapter_composition_workflow_api.json index a94fd389..cc5a4cea 100644 --- a/utils/ml_processor/comfy_workflows/ipadapter_composition_workflow_api.json +++ b/utils/ml_processor/comfy_workflows/ipadapter_composition_workflow_api.json @@ -1,7 +1,7 @@ { "1": { "inputs": { - "ckpt_name": "Realistic_Vision_V5.0.safetensors" + "ckpt_name": "Realistic_Vision_V5.1.safetensors" }, "class_type": "CheckpointLoaderSimple", "_meta": { From f3bd2f8291b409297c24fe51b8adb2c7afed1b8e Mon Sep 17 00:00:00 2001 From: peteromallet Date: Mon, 18 Mar 2024 23:34:39 +0100 Subject: [PATCH 27/43] Adding dynamicrafter --- .../widgets/animation_style_element.py | 1297 +++++++++-------- utils/media_processor/interpolator.py | 89 +- utils/ml_processor/comfy_data_transform.py | 25 +- .../comfy_workflows/dynamicrafter_api.json | 109 ++ utils/ml_processor/constants.py | 3 + 5 files changed, 859 insertions(+), 664 deletions(-) create mode 100644 utils/ml_processor/comfy_workflows/dynamicrafter_api.json diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index 6e8d3f38..6e0d0aa8 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -41,667 +41,740 @@ def animation_style_element(shot_uuid): st.markdown("### 🎥 Generate animations") st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") - with st.container(): - advanced1, advanced2, advanced3 = st.columns([1.0,1.5, 1.0]) + type_of_animation = st.radio("What type of animation would you like to generate?", options=["Batch Creative Interpolation", "2-Image Realistic Interpolation"], key="type_of_animation",horizontal=True, help="**Batch Creative Interpolaton** lets you input multple images and control the motion and style of each frame - resulting in a fluid, surreal and highly-controllable motion. \n\n **2-Image Realistic Interpolation** is a simpler way to generate animations - it generates a video by interpolating between two images, and is best for realistic motion.") - with advanced1: - st.markdown("##### Individual frame settings") - - items_per_row = 3 - strength_of_frames = [] - distances_to_next_frames = [] - speeds_of_transitions = [] - freedoms_between_frames = [] - individual_prompts = [] - individual_negative_prompts = [] - motions_during_frames = [] - shot_meta_data = {} + if type_of_animation == "Batch Creative Interpolation": - if len(timing_list) <= 1: - st.warning("You need at least two frames to generate a video.") - st.stop() + with st.container(): + advanced1, advanced2, advanced3 = st.columns([1.0,1.5, 1.0]) - open_advanced_settings = st_memory.toggle("Open all advanced settings", key="advanced_settings", value=False) + with advanced1: + st.markdown("##### Individual frame settings") + + items_per_row = 3 + strength_of_frames = [] + distances_to_next_frames = [] + speeds_of_transitions = [] + freedoms_between_frames = [] + individual_prompts = [] + individual_negative_prompts = [] + motions_during_frames = [] + shot_meta_data = {} - # setting default values to main shot settings - if f'lora_data_{shot.uuid}' not in st.session_state: - st.session_state[f'lora_data_{shot.uuid}'] = [] + if len(timing_list) <= 1: + st.warning("You need at least two frames to generate a video.") + st.stop() - if f'strength_of_adherence_value_{shot.uuid}' not in st.session_state: - st.session_state[f'strength_of_adherence_value_{shot.uuid}'] = 0.10 + open_advanced_settings = st_memory.toggle("Open all advanced settings", key="advanced_settings", value=False) - if f'type_of_motion_context_index_{shot.uuid}' not in st.session_state: - st.session_state[f'type_of_motion_context_index_{shot.uuid}'] = 1 + # setting default values to main shot settings + if f'lora_data_{shot.uuid}' not in st.session_state: + st.session_state[f'lora_data_{shot.uuid}'] = [] - if f'positive_prompt_video_{shot.uuid}' not in st.session_state: - st.session_state[f"positive_prompt_video_{shot.uuid}"] = "" + if f'strength_of_adherence_value_{shot.uuid}' not in st.session_state: + st.session_state[f'strength_of_adherence_value_{shot.uuid}'] = 0.10 - if f'negative_prompt_video_{shot.uuid}' not in st.session_state: - st.session_state[f"negative_prompt_video_{shot.uuid}"] = "" + if f'type_of_motion_context_index_{shot.uuid}' not in st.session_state: + st.session_state[f'type_of_motion_context_index_{shot.uuid}'] = 1 - if f'ckpt_{shot.uuid}' not in st.session_state: - st.session_state[f'ckpt_{shot.uuid}'] = "" - - if f"amount_of_motion_{shot.uuid}" not in st.session_state: - st.session_state[f"amount_of_motion_{shot.uuid}"] = 1.3 - - # loading settings of the last shot (if this shot is being loaded for the first time) - if f'strength_of_frame_{shot_uuid}_0' not in st.session_state: - load_shot_settings(shot.uuid) + if f'positive_prompt_video_{shot.uuid}' not in st.session_state: + st.session_state[f"positive_prompt_video_{shot.uuid}"] = "" + + if f'negative_prompt_video_{shot.uuid}' not in st.session_state: + st.session_state[f"negative_prompt_video_{shot.uuid}"] = "" + + if f'ckpt_{shot.uuid}' not in st.session_state: + st.session_state[f'ckpt_{shot.uuid}'] = "" - # ------------- Timing Frame and their settings ------------------- - for i in range(0, len(timing_list) , items_per_row): - with st.container(): - grid = st.columns([2 if j%2==0 else 1 for j in range(2*items_per_row)]) # Adjust the column widths - for j in range(items_per_row): - - idx = i + j - if idx < len(timing_list): - with grid[2*j]: # Adjust the index for image column - timing = timing_list[idx] - if timing.primary_image and timing.primary_image.location: + if f"amount_of_motion_{shot.uuid}" not in st.session_state: + st.session_state[f"amount_of_motion_{shot.uuid}"] = 1.3 + + # loading settings of the last shot (if this shot is being loaded for the first time) + if f'strength_of_frame_{shot_uuid}_0' not in st.session_state: + load_shot_settings(shot.uuid) + + # ------------- Timing Frame and their settings ------------------- + for i in range(0, len(timing_list) , items_per_row): + with st.container(): + grid = st.columns([2 if j%2==0 else 1 for j in range(2*items_per_row)]) # Adjust the column widths + for j in range(items_per_row): + + idx = i + j + if idx < len(timing_list): + with grid[2*j]: # Adjust the index for image column + timing = timing_list[idx] + if timing.primary_image and timing.primary_image.location: - st.info(f"**Frame {idx + 1}**") - - st.image(timing.primary_image.location, use_column_width=True) - - # settings control - with st.expander("Advanced settings:", expanded=open_advanced_settings): - # checking for newly added frames - if f'individual_prompt_{shot.uuid}_{idx}' not in st.session_state: - for k, v in DEFAULT_SHOT_MOTION_VALUES.items(): - st.session_state[f"{k}_{shot_uuid}_{idx}"] = v + st.info(f"**Frame {idx + 1}**") - individual_prompt = st.text_input("What to include:", key=f"individual_prompt_widget_{idx}_{timing.uuid}", value=st.session_state[f'individual_prompt_{shot.uuid}_{idx}'], help="Use this sparingly, as it can have a large impact on the video and cause weird distortions.") - individual_prompts.append(individual_prompt) - individual_negative_prompt = st.text_input("What to avoid:", key=f"negative_prompt_widget_{idx}_{timing.uuid}", value=st.session_state[f'individual_negative_prompt_{shot.uuid}_{idx}'],help="Use this sparingly, as it can have a large impact on the video and cause weird distortions.") - individual_negative_prompts.append(individual_negative_prompt) - strength1, strength2 = st.columns([1, 1]) - with strength1: - strength_of_frame = st.slider("Strength of current frame:", min_value=0.25, max_value=1.0, step=0.01, key=f"strength_of_frame_widget_{shot.uuid}_{idx}", value=st.session_state[f'strength_of_frame_{shot.uuid}_{idx}']) - strength_of_frames.append(strength_of_frame) - with strength2: - motion_during_frame = st.slider("Motion during frame:", min_value=0.5, max_value=1.5, step=0.01, key=f"motion_during_frame_widget_{idx}_{timing.uuid}", value=st.session_state[f'motion_during_frame_{shot.uuid}_{idx}']) - motions_during_frames.append(motion_during_frame) - else: - st.warning("No primary image present.") - - # distance, speed and freedom settings (also aggregates them into arrays) - with grid[2*j+1]: # Add the new column after the image column - if idx < len(timing_list) - 1: + st.image(timing.primary_image.location, use_column_width=True) + + # settings control + with st.expander("Advanced settings:", expanded=open_advanced_settings): + # checking for newly added frames + if f'individual_prompt_{shot.uuid}_{idx}' not in st.session_state: + for k, v in DEFAULT_SHOT_MOTION_VALUES.items(): + st.session_state[f"{k}_{shot_uuid}_{idx}"] = v + + individual_prompt = st.text_input("What to include:", key=f"individual_prompt_widget_{idx}_{timing.uuid}", value=st.session_state[f'individual_prompt_{shot.uuid}_{idx}'], help="Use this sparingly, as it can have a large impact on the video and cause weird distortions.") + individual_prompts.append(individual_prompt) + individual_negative_prompt = st.text_input("What to avoid:", key=f"negative_prompt_widget_{idx}_{timing.uuid}", value=st.session_state[f'individual_negative_prompt_{shot.uuid}_{idx}'],help="Use this sparingly, as it can have a large impact on the video and cause weird distortions.") + individual_negative_prompts.append(individual_negative_prompt) + strength1, strength2 = st.columns([1, 1]) + with strength1: + strength_of_frame = st.slider("Strength of current frame:", min_value=0.25, max_value=1.0, step=0.01, key=f"strength_of_frame_widget_{shot.uuid}_{idx}", value=st.session_state[f'strength_of_frame_{shot.uuid}_{idx}']) + strength_of_frames.append(strength_of_frame) + with strength2: + motion_during_frame = st.slider("Motion during frame:", min_value=0.5, max_value=1.5, step=0.01, key=f"motion_during_frame_widget_{idx}_{timing.uuid}", value=st.session_state[f'motion_during_frame_{shot.uuid}_{idx}']) + motions_during_frames.append(motion_during_frame) + else: + st.warning("No primary image present.") + + # distance, speed and freedom settings (also aggregates them into arrays) + with grid[2*j+1]: # Add the new column after the image column + if idx < len(timing_list) - 1: + + # if st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}'] is a int, make it a float + if isinstance(st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}'], int): + st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}'] = float(st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}']) + distance_to_next_frame = st.slider("Seconds to next frame:", min_value=0.25, max_value=6.00, step=0.25, key=f"distance_to_next_frame_widget_{idx}_{timing.uuid}", value=st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}']) + distances_to_next_frames.append(distance_to_next_frame/2) + speed_of_transition = st.slider("Speed of transition:", min_value=0.45, max_value=0.7, step=0.01, key=f"speed_of_transition_widget_{idx}_{timing.uuid}", value=st.session_state[f'speed_of_transition_{shot.uuid}_{idx}']) + speeds_of_transitions.append(speed_of_transition) + freedom_between_frames = st.slider("Freedom between frames:", min_value=0.15, max_value=0.85, step=0.01, key=f"freedom_between_frames_widget_{idx}_{timing.uuid}", value=st.session_state[f'freedom_between_frames_{shot.uuid}_{idx}']) + freedoms_between_frames.append(freedom_between_frames) + + if (i < len(timing_list) - 1) or (len(timing_list) % items_per_row != 0): + st.markdown("***") + - # if st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}'] is a int, make it a float - if isinstance(st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}'], int): - st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}'] = float(st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}']) - distance_to_next_frame = st.slider("Seconds to next frame:", min_value=0.25, max_value=6.00, step=0.25, key=f"distance_to_next_frame_widget_{idx}_{timing.uuid}", value=st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}']) - distances_to_next_frames.append(distance_to_next_frame/2) - speed_of_transition = st.slider("Speed of transition:", min_value=0.45, max_value=0.7, step=0.01, key=f"speed_of_transition_widget_{idx}_{timing.uuid}", value=st.session_state[f'speed_of_transition_{shot.uuid}_{idx}']) - speeds_of_transitions.append(speed_of_transition) - freedom_between_frames = st.slider("Freedom between frames:", min_value=0.15, max_value=0.85, step=0.01, key=f"freedom_between_frames_widget_{idx}_{timing.uuid}", value=st.session_state[f'freedom_between_frames_{shot.uuid}_{idx}']) - freedoms_between_frames.append(freedom_between_frames) - - if (i < len(timing_list) - 1) or (len(timing_list) % items_per_row != 0): - st.markdown("***") - + st.markdown("##### Style model") + tab1, tab2 = st.tabs(["Choose Model","Download Models"]) - st.markdown("##### Style model") - tab1, tab2 = st.tabs(["Choose Model","Download Models"]) - - checkpoints_dir = "ComfyUI/models/checkpoints" - all_files = os.listdir(checkpoints_dir) - if len(all_files) == 0: - model_files = [default_model] - - else: - # Filter files to only include those with .safetensors and .ckpt extensions - model_files = [file for file in all_files if file.endswith('.safetensors') or file.endswith('.ckpt')] - # drop all files that contain xl - model_files = [file for file in model_files if "xl" not in file] - - # Mapping of model names to their download URLs - sd_model_dict = { - "Anything V3 FP16 Pruned": { - "url": "https://weights.replicate.delivery/default/comfy-ui/checkpoints/anything-v3-fp16-pruned.safetensors.tar", - "filename": "anything-v3-fp16-pruned.safetensors.tar" - }, - "Deliberate V2": { - "url": "https://weights.replicate.delivery/default/comfy-ui/checkpoints/Deliberate_v2.safetensors.tar", - "filename": "Deliberate_v2.safetensors.tar" - }, - "Dreamshaper 8": { - "url": "https://weights.replicate.delivery/default/comfy-ui/checkpoints/dreamshaper_8.safetensors.tar", - "filename": "dreamshaper_8.safetensors.tar" - }, - "epicrealism_pureEvolutionV5": { - "url": "https://civitai.com/api/download/models/134065", - "filename": "epicrealism_pureEvolutionv5.safetensors" - }, - "majicmixRealistic_v6": { - "url": "https://civitai.com/api/download/models/94640", - "filename": "majicmixRealistic_v6.safetensors" - }, - } - - cur_model = st.session_state[f'ckpt_{shot.uuid}'] - current_model_index = model_files.index(cur_model) if (cur_model and cur_model in model_files) else 0 - # st.session_state['sd_model_video'] = current_model_index - # ---------------- SELECT CKPT -------------- - with tab1: - model1, model2 = st.columns([1, 1]) - with model1: - sd_model = "" - def update_model(): - global sd_model - sd_model = checkpoints_dir + "/" + st.session_state['sd_model_video'] - - if model_files and len(model_files): - sd_model = st.selectbox( - label="Which model would you like to use?", - options=model_files, - key="sd_model_video", - index=current_model_index, - on_change=update_model - ) - else: - st.write("") - st.info("Default model Deliberate V2 would be selected") - with model2: - if len(all_files) == 0: - st.write("") - st.info("This is the default model - to download more, go to the Download Models tab.") - else: - st.write("") - st.info("To download more models, go to the Download Models tab.") - - # if it's in sd_model-list, just pass the name. If not, stick checkpoints_dir in front of it - # sd_model = checkpoints_dir + "/" + sd_model - - # ---------------- ADD CKPT --------------- - with tab2: - where_to_get_model = st.radio("Where would you like to get the model from?", options=["Our list", "Upload a model", "From a URL"], key="where_to_get_model") + checkpoints_dir = "ComfyUI/models/checkpoints" + all_files = os.listdir(checkpoints_dir) + if len(all_files) == 0: + model_files = [default_model] - if where_to_get_model == "Our list": - # Use the keys (model names) for the selection box - model_name_selected = st.selectbox("Which model would you like to download?", options=list(sd_model_dict.keys()), key="model_to_download") - - if st.button("Download Model", key="download_model"): - with st.spinner("Downloading model..."): - download_bar = st.progress(0, text="") - save_directory = "ComfyUI/models/checkpoints" - os.makedirs(save_directory, exist_ok=True) # Create the directory if it doesn't exist - - # Retrieve the URL using the selected model name - model_url = sd_model_dict[model_name_selected]["url"] + else: + # Filter files to only include those with .safetensors and .ckpt extensions + model_files = [file for file in all_files if file.endswith('.safetensors') or file.endswith('.ckpt')] + # drop all files that contain xl + model_files = [file for file in model_files if "xl" not in file] + + # Mapping of model names to their download URLs + sd_model_dict = { + "Anything V3 FP16 Pruned": { + "url": "https://weights.replicate.delivery/default/comfy-ui/checkpoints/anything-v3-fp16-pruned.safetensors.tar", + "filename": "anything-v3-fp16-pruned.safetensors.tar" + }, + "Deliberate V2": { + "url": "https://weights.replicate.delivery/default/comfy-ui/checkpoints/Deliberate_v2.safetensors.tar", + "filename": "Deliberate_v2.safetensors.tar" + }, + "Dreamshaper 8": { + "url": "https://weights.replicate.delivery/default/comfy-ui/checkpoints/dreamshaper_8.safetensors.tar", + "filename": "dreamshaper_8.safetensors.tar" + }, + "epicrealism_pureEvolutionV5": { + "url": "https://civitai.com/api/download/models/134065", + "filename": "epicrealism_pureEvolutionv5.safetensors" + }, + "majicmixRealistic_v6": { + "url": "https://civitai.com/api/download/models/94640", + "filename": "majicmixRealistic_v6.safetensors" + }, + } + + cur_model = st.session_state[f'ckpt_{shot.uuid}'] + current_model_index = model_files.index(cur_model) if (cur_model and cur_model in model_files) else 0 + # st.session_state['sd_model_video'] = current_model_index + # ---------------- SELECT CKPT -------------- + with tab1: + model1, model2 = st.columns([1, 1]) + with model1: + sd_model = "" + def update_model(): + global sd_model + sd_model = checkpoints_dir + "/" + st.session_state['sd_model_video'] - # Download the model and save it to the directory - response = requests.get(model_url, stream=True) - zip_filename = sd_model_dict[model_name_selected]["filename"] - filepath = os.path.join(save_directory, zip_filename) - print("filepath: ", filepath) - if response.status_code == 200: - total_size = int(response.headers.get('content-length', 0)) - - with open(filepath, 'wb') as f: - received_bytes = 0 - - for data in response.iter_content(chunk_size=8192): - f.write(data) - received_bytes += len(data) - progress = received_bytes / total_size - download_bar.progress(progress) - - st.success(f"Downloaded {model_name_selected} to {save_directory}") - download_bar.empty() - - if model_url.endswith(".zip") or model_url.endswith(".tar"): - st.success("Extracting the zip file. Please wait...") - new_filepath = filepath.replace(zip_filename, "") - if model_url.endswith(".zip"): - with zipfile.ZipFile(f"{filepath}", "r") as zip_ref: - zip_ref.extractall(new_filepath) - else: - with tarfile.open(f"{filepath}", "r") as tar_ref: - tar_ref.extractall(new_filepath) - - os.remove(filepath) - st.rerun() + if model_files and len(model_files): + sd_model = st.selectbox( + label="Which model would you like to use?", + options=model_files, + key="sd_model_video", + index=current_model_index, + on_change=update_model + ) + else: + st.write("") + st.info("Default model Deliberate V2 would be selected") + with model2: + if len(all_files) == 0: + st.write("") + st.info("This is the default model - to download more, go to the Download Models tab.") + else: + st.write("") + st.info("To download more models, go to the Download Models tab.") - elif where_to_get_model == "Upload a model": - st.info("It's simpler to just drop this into the ComfyUI/models/checkpoints directory.") + # if it's in sd_model-list, just pass the name. If not, stick checkpoints_dir in front of it + # sd_model = checkpoints_dir + "/" + sd_model - elif where_to_get_model == "From a URL": - text1, text2 = st.columns([1, 1]) - with text1: + # ---------------- ADD CKPT --------------- + with tab2: + where_to_get_model = st.radio("Where would you like to get the model from?", options=["Our list", "Upload a model", "From a URL"], key="where_to_get_model") - text_input = st.text_input("Enter the URL of the model", key="text_input") - with text2: - st.info("Make sure to get the download url of the model. \n\n For example, from Civit, this should look like this: https://civitai.com/api/download/models/179446. \n\n While from Hugging Face, it should look like this: https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/1000_jeep_driving_r32_temporal_unet.safetensors") - if st.button("Download Model", key="download_model"): - with st.spinner("Downloading model..."): - save_directory = "ComfyUI/models/checkpoints" - os.makedirs(save_directory, exist_ok=True) - response = requests.get(text_input) - if response.status_code == 200: - with open(os.path.join(save_directory, text_input.split("/")[-1]), 'wb') as f: - f.write(response.content) - st.success(f"Downloaded model to {save_directory}") - else: - st.error("Failed to download model") - - # if it's in local DEVELOPMENT ENVIRONMENT - st.markdown("***") - st.markdown("##### Motion guidance") - tab1, tab2, tab3 = st.tabs(["Apply LoRAs","Download LoRAs","Train LoRAs"]) - - lora_data = [] - lora_file_dest = "ComfyUI/models/animatediff_motion_lora" - - # ---------------- ADD LORA ----------------- - with tab1: - # Initialize a single list to hold dictionaries for LoRA data - # Check if the directory exists and list files, or use a default list - if os.path.exists(lora_file_dest): - files = os.listdir(lora_file_dest) - # remove files that start with a dot - files = [file for file in files if not file.startswith(".")] - else: - files = [] - - # Iterate through each current LoRA in session state - if len(files) == 0: - st.error("No LoRAs found in the directory - go to Explore to download some, or drop them into ComfyUI/models/animatediff_motion_lora") - if st.button("Check again", key="check_again"): - st.rerun() - else: - # cleaning empty lora vals - for idx, lora in enumerate(st.session_state[f"lora_data_{shot.uuid}"]): - if not lora: - st.session_state[f"lora_data_{shot.uuid}"].pop(idx) - - for idx, lora in enumerate(st.session_state[f"lora_data_{shot.uuid}"]): - if not lora: - continue - h1, h2, h3, h4 = st.columns([1, 1, 1, 0.5]) - with h1: - file_idx = files.index(lora["filename"]) - which_lora = st.selectbox("Which LoRA would you like to use?", options=files, key=f"which_lora_{idx}", index=file_idx) + if where_to_get_model == "Our list": + # Use the keys (model names) for the selection box + model_name_selected = st.selectbox("Which model would you like to download?", options=list(sd_model_dict.keys()), key="model_to_download") - with h2: - strength_of_lora = st.slider("How strong would you like the LoRA to be?", min_value=0.0, max_value=1.0, value=lora["lora_strength"], step=0.01, key=f"strength_of_lora_{idx}") - lora_data.append({"filename": which_lora, "lora_strength": strength_of_lora, "filepath": lora_file_dest + "/" + which_lora}) - - with h3: - when_to_apply_lora = st.slider("When to apply the LoRA?", min_value=0, max_value=100, value=(0,100), step=1, key=f"when_to_apply_lora_{idx}",disabled=True,help="This feature is not yet available.") - - with h4: - st.write("") - if st.button("Remove", key=f"remove_lora_{idx}"): - st.session_state[f"lora_data_{shot.uuid}"].pop(idx) - st.rerun() - - if len(st.session_state[f"lora_data_{shot.uuid}"]) == 0: - text = "Add a LoRA" - else: - text = "Add another LoRA" - if st.button(text, key="add_motion_guidance"): - if files and len(files): - st.session_state[f"lora_data_{shot.uuid}"].append({ - "filename": files[0], - "lora_strength": 0.5, - "filepath": lora_file_dest + "/" + files[0] - }) - st.rerun() - # ---------------- DOWNLOAD LORA --------------- - with tab2: - text1, text2 = st.columns([1, 1]) - with text1: - where_to_download_from = st.radio("Where would you like to get the LoRA from?", options=["Our list", "From a URL","Upload a LoRA"], key="where_to_download_from", horizontal=True) - - if where_to_download_from == "Our list": - with text1: - file_links = [ - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/1000_jeep_driving_r32_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/250_tony_stark_r64_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/250_train_r128_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/300_car_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_car_desert_48_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_car_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_jeep_driving_r32_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_man_running_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_rotation_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/750_jeep_driving_r32_temporal_unet.safetensors", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/300_zooming_in_temporal_unet.safetensors", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_cat_walking_temporal_unet.safetensors", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_playing_banjo_temporal_unet.safetensors", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_woman_dancing_temporal_unet.safetensors", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_zooming_out_temporal_unet.safetensors" - ] - - selected_lora_optn = st.selectbox("Which LoRA would you like to download?", options=[a.split("/")[-1] for a in file_links], key="selected_lora") - if st.button("Download LoRA", key="download_lora"): - with st.spinner("Downloading LoRA..."): - save_directory = "ComfyUI/models/animatediff_motion_lora" + if st.button("Download Model", key="download_model"): + with st.spinner("Downloading model..."): + download_bar = st.progress(0, text="") + save_directory = "ComfyUI/models/checkpoints" os.makedirs(save_directory, exist_ok=True) # Create the directory if it doesn't exist - # Extract the filename from the URL - selected_lora = next((ele for idx, ele in enumerate(file_links) if selected_lora_optn in ele), None) - filename = selected_lora.split("/")[-1] - save_path = os.path.join(save_directory, filename) + # Retrieve the URL using the selected model name + model_url = sd_model_dict[model_name_selected]["url"] - # Download the file - download_lora_bar = st.progress(0, text="") - response = requests.get(selected_lora, stream=True) + # Download the model and save it to the directory + response = requests.get(model_url, stream=True) + zip_filename = sd_model_dict[model_name_selected]["filename"] + filepath = os.path.join(save_directory, zip_filename) + print("filepath: ", filepath) if response.status_code == 200: total_size = int(response.headers.get('content-length', 0)) - with open(save_path, 'wb') as f: + + with open(filepath, 'wb') as f: received_bytes = 0 - + for data in response.iter_content(chunk_size=8192): f.write(data) received_bytes += len(data) progress = received_bytes / total_size - download_lora_bar.progress(progress) - - st.success(f"Downloaded LoRA to {save_path}") - download_lora_bar.empty() - st.rerun() - else: - st.error("Failed to download LoRA") - - elif where_to_download_from == "From a URL": + download_bar.progress(progress) + + st.success(f"Downloaded {model_name_selected} to {save_directory}") + download_bar.empty() + + if model_url.endswith(".zip") or model_url.endswith(".tar"): + st.success("Extracting the zip file. Please wait...") + new_filepath = filepath.replace(zip_filename, "") + if model_url.endswith(".zip"): + with zipfile.ZipFile(f"{filepath}", "r") as zip_ref: + zip_ref.extractall(new_filepath) + else: + with tarfile.open(f"{filepath}", "r") as tar_ref: + tar_ref.extractall(new_filepath) + + os.remove(filepath) + st.rerun() + + elif where_to_get_model == "Upload a model": + st.info("It's simpler to just drop this into the ComfyUI/models/checkpoints directory.") - with text1: - text_input = st.text_input("Enter the URL of the LoRA", key="text_input_lora") - with text2: - st.write("") - st.write("") - st.write("") - st.info("Make sure to get the download url of the LoRA. \n\n For example, from Hugging Face, it should look like this: https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/1000_jeep_driving_r32_temporal_unet.safetensors") - with text1: - if st.button("Download LoRA", key="download_lora"): - with st.spinner("Downloading LoRA..."): - save_directory = "ComfyUI/models/animatediff_motion_lora" + elif where_to_get_model == "From a URL": + text1, text2 = st.columns([1, 1]) + with text1: + + text_input = st.text_input("Enter the URL of the model", key="text_input") + with text2: + st.info("Make sure to get the download url of the model. \n\n For example, from Civit, this should look like this: https://civitai.com/api/download/models/179446. \n\n While from Hugging Face, it should look like this: https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/1000_jeep_driving_r32_temporal_unet.safetensors") + if st.button("Download Model", key="download_model"): + with st.spinner("Downloading model..."): + save_directory = "ComfyUI/models/checkpoints" os.makedirs(save_directory, exist_ok=True) response = requests.get(text_input) if response.status_code == 200: with open(os.path.join(save_directory, text_input.split("/")[-1]), 'wb') as f: f.write(response.content) - st.success(f"Downloaded LoRA to {save_directory}") + st.success(f"Downloaded model to {save_directory}") else: - st.error("Failed to download LoRA") - elif where_to_download_from == "Upload a LoRA": - st.info("It's simpler to just drop this into the ComfyUI/models/animatediff_motion_lora directory.") - # ---------------- TRAIN LORA -------------- - with tab3: - b1, b2 = st.columns([1, 1]) - with b1: - st.error("This feature is not yet available.") - name_this_lora = st.text_input("Name this LoRA", key="name_this_lora") - describe_the_motion = st.text_area("Describe the motion", key="describe_the_motion") - training_video = st.file_uploader("Upload a video to train a new LoRA", type=["mp4"]) - - if st.button("Train LoRA", key="train_lora", use_container_width=True): - st.write("Training LoRA") - - st.markdown("***") - st.markdown("##### Overall style settings") - - e1, e2, e3 = st.columns([1, 1,1]) - with e1: - strength_of_adherence = st.slider("How much would you like to force adherence to the input images?", min_value=0.0, max_value=1.0, step=0.01, key="strength_of_adherence", value=st.session_state[f"strength_of_adherence_value_{shot.uuid}"]) - with e2: - st.info("Higher values may cause flickering and sudden changes in the video. Lower values may cause the video to be less influenced by the input images but can lead to smoother motion and better colours.") - - f1, f2, f3 = st.columns([1, 1, 1]) - with f1: - overall_positive_prompt = "" - def update_prompt(): - global overall_positive_prompt - overall_positive_prompt = st.session_state[f"positive_prompt_video_{shot.uuid}"] - - overall_positive_prompt = st.text_area( - "What would you like to see in the videos?", - key="overall_positive_prompt", - value=st.session_state[f"positive_prompt_video_{shot.uuid}"], - on_change=update_prompt - ) - with f2: - overall_negative_prompt = st.text_area( - "What would you like to avoid in the videos?", - key="overall_negative_prompt", - value=st.session_state[f"negative_prompt_video_{shot.uuid}"] - ) - - with f3: - st.write("") - st.write("") - st.info("Use these sparingly, as they can have a large impact on the video. You can also edit them for individual frames in the advanced settings above.") - - st.markdown("***") - st.markdown("##### Overall motion settings") - h1, h2, h3 = st.columns([0.5, 1.5, 1]) - with h1: - # will fix this later - if f"type_of_motion_context_index_{shot.uuid}" in st.session_state and isinstance(st.session_state[f"type_of_motion_context_index_{shot.uuid}"], str): - st.session_state[f"type_of_motion_context_index_{shot.uuid}"] = ["Low", "Standard", "High"].index(st.session_state[f"type_of_motion_context_index_{shot.uuid}"]) - type_of_motion_context = st.radio("Type of motion context:", options=["Low", "Standard", "High"], key="type_of_motion_context", horizontal=False, index=st.session_state[f"type_of_motion_context_index_{shot.uuid}"]) - - with h2: - st.info("This is how much the motion will be informed by the previous and next frames. 'High' can make it smoother but increase artifacts - while 'Low' make the motion less smooth but removes artifacts. Naturally, we recommend Standard.") - st.write("") - i1, i3,_ = st.columns([1,2,1]) - with i1: - amount_of_motion = st.slider("Amount of motion:", min_value=0.5, max_value=1.5, step=0.01, key="amount_of_motion", value=st.session_state[f"amount_of_motion_{shot.uuid}"]) - st.write("") - if st.button("Bulk update amount of motion", key="update_motion", help="This will update this value in all the frames"): - for idx, timing in enumerate(timing_list): - st.session_state[f'motion_during_frame_{shot.uuid}_{idx}'] = amount_of_motion - st.success("Updated amount of motion") - time.sleep(0.3) - st.rerun() - with i3: - st.write("") - st.write("") - st.info("This actually updates the motion during frames in the advanced settings above - but we put it here because it has a big impact on the video. You can scroll up to see the changes and tweak for individual frames.") - - type_of_frame_distribution = "dynamic" - type_of_key_frame_influence = "dynamic" - type_of_strength_distribution = "dynamic" - linear_frame_distribution_value = 16 - linear_key_frame_influence_value = 1.0 - linear_cn_strength_value = 1.0 - relative_ipadapter_strength = 1.0 - relative_cn_strength = 0.0 - project_settings = data_repo.get_project_setting(shot.project.uuid) - width = project_settings.width - height = project_settings.height - img_dimension = f"{width}x{height}" - motion_scale = 1.3 - interpolation_style = 'ease-in-out' - buffer = 4 - - - (dynamic_strength_values, dynamic_key_frame_influence_values, dynamic_frame_distribution_values, - context_length, context_stride, context_overlap, multipled_base_end_percent, multipled_base_adapter_strength, - prompt_travel, negative_prompt_travel, motion_scales) = transform_data(strength_of_frames, - freedoms_between_frames, speeds_of_transitions, distances_to_next_frames, type_of_motion_context, - strength_of_adherence,individual_prompts, individual_negative_prompts, buffer, motions_during_frames) - - - settings.update( - ckpt=sd_model, - width=width, - height=height, - buffer=4, - motion_scale=motion_scale, - motion_scales=motion_scales, - image_dimension=img_dimension, - output_format="video/h264-mp4", - prompt=overall_positive_prompt, - negative_prompt=overall_negative_prompt, - interpolation_type=interpolation_style, - stmfnet_multiplier=2, - relative_ipadapter_strength=relative_ipadapter_strength, - relative_cn_strength=relative_cn_strength, - type_of_strength_distribution=type_of_strength_distribution, - linear_strength_value=str(linear_cn_strength_value), - dynamic_strength_values=str(dynamic_strength_values), - linear_frame_distribution_value=linear_frame_distribution_value, - dynamic_frame_distribution_values=dynamic_frame_distribution_values, - type_of_frame_distribution=type_of_frame_distribution, - type_of_key_frame_influence=type_of_key_frame_influence, - linear_key_frame_influence_value=float(linear_key_frame_influence_value), - dynamic_key_frame_influence_values=dynamic_key_frame_influence_values, - normalise_speed=True, - ipadapter_noise=0.3, - animation_style=AnimationStyleType.CREATIVE_INTERPOLATION.value, - context_length=context_length, - context_stride=context_stride, - context_overlap=context_overlap, - multipled_base_end_percent=multipled_base_end_percent, - multipled_base_adapter_strength=multipled_base_adapter_strength, - individual_prompts=prompt_travel, - individual_negative_prompts=negative_prompt_travel, - animation_stype=AnimationStyleType.CREATIVE_INTERPOLATION.value, - max_frames=str(dynamic_frame_distribution_values[-1]), - lora_data=lora_data, - shot_data=shot_meta_data - ) - - position = "generate_vid" - st.markdown("***") - st.markdown("##### Generation Settings") + st.error("Failed to download model") + + # if it's in local DEVELOPMENT ENVIRONMENT + st.markdown("***") + st.markdown("##### Motion guidance") + tab1, tab2, tab3 = st.tabs(["Apply LoRAs","Download LoRAs","Train LoRAs"]) - animate_col_1, animate_col_2, _ = st.columns([3, 1, 1]) - with animate_col_1: - variant_count = st.number_input("How many variants?", min_value=1, max_value=5, value=1, step=1, key="variant_count") + lora_data = [] + lora_file_dest = "ComfyUI/models/animatediff_motion_lora" - if "generate_vid_generate_inference" in st.session_state and st.session_state["generate_vid_generate_inference"]: - # last keyframe position * 16 - duration = float(dynamic_frame_distribution_values[-1] / 16) - data_repo.update_shot(uuid=shot.uuid, duration=duration) - shot_data = update_session_state_with_animation_details( - shot.uuid, - timing_list, - strength_of_frames, - distances_to_next_frames, - speeds_of_transitions, - freedoms_between_frames, - motions_during_frames, - individual_prompts, - individual_negative_prompts, - lora_data - ) - settings.update(shot_data=shot_data) - vid_quality = "full" # TODO: add this if video_resolution == "Full Resolution" else "preview" - st.success("Generating clip - see status in the Generation Log in the sidebar. Press 'Refresh log' to update.") - - positive_prompt = "" - append_to_prompt = "" # TODO: add this - for idx, timing in enumerate(timing_list): - if timing.primary_image and timing.primary_image.location: - b = timing.primary_image.inference_params - prompt = b.get("prompt", "") if b else "" - prompt += append_to_prompt - frame_prompt = f"{idx * linear_frame_distribution_value}_" + prompt - positive_prompt += ":" + frame_prompt if positive_prompt else frame_prompt - else: - st.error("Please generate primary images") - time.sleep(0.7) + # ---------------- ADD LORA ----------------- + with tab1: + # Initialize a single list to hold dictionaries for LoRA data + # Check if the directory exists and list files, or use a default list + if os.path.exists(lora_file_dest): + files = os.listdir(lora_file_dest) + # remove files that start with a dot + files = [file for file in files if not file.startswith(".")] + else: + files = [] + + # Iterate through each current LoRA in session state + if len(files) == 0: + st.error("No LoRAs found in the directory - go to Explore to download some, or drop them into ComfyUI/models/animatediff_motion_lora") + if st.button("Check again", key="check_again"): st.rerun() + else: + # cleaning empty lora vals + for idx, lora in enumerate(st.session_state[f"lora_data_{shot.uuid}"]): + if not lora: + st.session_state[f"lora_data_{shot.uuid}"].pop(idx) + + for idx, lora in enumerate(st.session_state[f"lora_data_{shot.uuid}"]): + if not lora: + continue + h1, h2, h3, h4 = st.columns([1, 1, 1, 0.5]) + with h1: + file_idx = files.index(lora["filename"]) + which_lora = st.selectbox("Which LoRA would you like to use?", options=files, key=f"which_lora_{idx}", index=file_idx) - if f'{shot_uuid}_backlog_enabled' not in st.session_state: - st.session_state[f'{shot_uuid}_backlog_enabled'] = False - - create_single_interpolated_clip( - shot_uuid, - vid_quality, - settings, - variant_count, - st.session_state[f'{shot_uuid}_backlog_enabled'] - ) + with h2: + strength_of_lora = st.slider("How strong would you like the LoRA to be?", min_value=0.0, max_value=1.0, value=lora["lora_strength"], step=0.01, key=f"strength_of_lora_{idx}") + lora_data.append({"filename": which_lora, "lora_strength": strength_of_lora, "filepath": lora_file_dest + "/" + which_lora}) + + with h3: + when_to_apply_lora = st.slider("When to apply the LoRA?", min_value=0, max_value=100, value=(0,100), step=1, key=f"when_to_apply_lora_{idx}",disabled=True,help="This feature is not yet available.") + + with h4: + st.write("") + if st.button("Remove", key=f"remove_lora_{idx}"): + st.session_state[f"lora_data_{shot.uuid}"].pop(idx) + st.rerun() + + if len(st.session_state[f"lora_data_{shot.uuid}"]) == 0: + text = "Add a LoRA" + else: + text = "Add another LoRA" + if st.button(text, key="add_motion_guidance"): + if files and len(files): + st.session_state[f"lora_data_{shot.uuid}"].append({ + "filename": files[0], + "lora_strength": 0.5, + "filepath": lora_file_dest + "/" + files[0] + }) + st.rerun() + # ---------------- DOWNLOAD LORA --------------- + with tab2: + text1, text2 = st.columns([1, 1]) + with text1: + where_to_download_from = st.radio("Where would you like to get the LoRA from?", options=["Our list", "From a URL","Upload a LoRA"], key="where_to_download_from", horizontal=True) + + if where_to_download_from == "Our list": + with text1: + file_links = [ + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/1000_jeep_driving_r32_temporal_unet.safetensors", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/250_tony_stark_r64_temporal_unet.safetensors", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/250_train_r128_temporal_unet.safetensors", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/300_car_temporal_unet.safetensors", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_car_desert_48_temporal_unet.safetensors", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_car_temporal_unet.safetensors", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_jeep_driving_r32_temporal_unet.safetensors", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_man_running_temporal_unet.safetensors", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_rotation_temporal_unet.safetensors", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/750_jeep_driving_r32_temporal_unet.safetensors", + "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/300_zooming_in_temporal_unet.safetensors", + "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_cat_walking_temporal_unet.safetensors", + "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_playing_banjo_temporal_unet.safetensors", + "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_woman_dancing_temporal_unet.safetensors", + "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_zooming_out_temporal_unet.safetensors" + ] + + selected_lora_optn = st.selectbox("Which LoRA would you like to download?", options=[a.split("/")[-1] for a in file_links], key="selected_lora") + if st.button("Download LoRA", key="download_lora"): + with st.spinner("Downloading LoRA..."): + save_directory = "ComfyUI/models/animatediff_motion_lora" + os.makedirs(save_directory, exist_ok=True) # Create the directory if it doesn't exist + + # Extract the filename from the URL + selected_lora = next((ele for idx, ele in enumerate(file_links) if selected_lora_optn in ele), None) + filename = selected_lora.split("/")[-1] + save_path = os.path.join(save_directory, filename) + + # Download the file + download_lora_bar = st.progress(0, text="") + response = requests.get(selected_lora, stream=True) + if response.status_code == 200: + total_size = int(response.headers.get('content-length', 0)) + with open(save_path, 'wb') as f: + received_bytes = 0 + + for data in response.iter_content(chunk_size=8192): + f.write(data) + received_bytes += len(data) + progress = received_bytes / total_size + download_lora_bar.progress(progress) + + st.success(f"Downloaded LoRA to {save_path}") + download_lora_bar.empty() + st.rerun() + else: + st.error("Failed to download LoRA") - backlog_update = {f'{shot_uuid}_backlog_enabled': False} - toggle_generate_inference(position, **backlog_update) - st.rerun() + elif where_to_download_from == "From a URL": + + with text1: + text_input = st.text_input("Enter the URL of the LoRA", key="text_input_lora") + with text2: + st.write("") + st.write("") + st.write("") + st.info("Make sure to get the download url of the LoRA. \n\n For example, from Hugging Face, it should look like this: https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/1000_jeep_driving_r32_temporal_unet.safetensors") + with text1: + if st.button("Download LoRA", key="download_lora"): + with st.spinner("Downloading LoRA..."): + save_directory = "ComfyUI/models/animatediff_motion_lora" + os.makedirs(save_directory, exist_ok=True) + response = requests.get(text_input) + if response.status_code == 200: + with open(os.path.join(save_directory, text_input.split("/")[-1]), 'wb') as f: + f.write(response.content) + st.success(f"Downloaded LoRA to {save_directory}") + else: + st.error("Failed to download LoRA") + elif where_to_download_from == "Upload a LoRA": + st.info("It's simpler to just drop this into the ComfyUI/models/animatediff_motion_lora directory.") + # ---------------- TRAIN LORA -------------- + with tab3: + b1, b2 = st.columns([1, 1]) + with b1: + st.error("This feature is not yet available.") + name_this_lora = st.text_input("Name this LoRA", key="name_this_lora") + describe_the_motion = st.text_area("Describe the motion", key="describe_the_motion") + training_video = st.file_uploader("Upload a video to train a new LoRA", type=["mp4"]) + + if st.button("Train LoRA", key="train_lora", use_container_width=True): + st.write("Training LoRA") + + st.markdown("***") + st.markdown("##### Overall style settings") + + e1, e2, e3 = st.columns([1, 1,1]) + with e1: + strength_of_adherence = st.slider("How much would you like to force adherence to the input images?", min_value=0.0, max_value=1.0, step=0.01, key="strength_of_adherence", value=st.session_state[f"strength_of_adherence_value_{shot.uuid}"]) + with e2: + st.info("Higher values may cause flickering and sudden changes in the video. Lower values may cause the video to be less influenced by the input images but can lead to smoother motion and better colours.") + + f1, f2, f3 = st.columns([1, 1, 1]) + with f1: + overall_positive_prompt = "" + def update_prompt(): + global overall_positive_prompt + overall_positive_prompt = st.session_state[f"positive_prompt_video_{shot.uuid}"] + + overall_positive_prompt = st.text_area( + "What would you like to see in the videos?", + key="overall_positive_prompt", + value=st.session_state[f"positive_prompt_video_{shot.uuid}"], + on_change=update_prompt + ) + with f2: + overall_negative_prompt = st.text_area( + "What would you like to avoid in the videos?", + key="overall_negative_prompt", + value=st.session_state[f"negative_prompt_video_{shot.uuid}"] + ) - btn1, btn2, btn3 = st.columns([1, 1, 1]) - backlog_no_update = {f'{shot_uuid}_backlog_enabled': False} - with btn1: - st.button("Add to queue", key="generate_animation_clip", disabled=disable_generate, help=help, on_click=lambda: toggle_generate_inference(position, **backlog_no_update),type="primary",use_container_width=True) + with f3: + st.write("") + st.write("") + st.info("Use these sparingly, as they can have a large impact on the video. You can also edit them for individual frames in the advanced settings above.") + + st.markdown("***") + st.markdown("##### Overall motion settings") + h1, h2, h3 = st.columns([0.5, 1.5, 1]) + with h1: + # will fix this later + if f"type_of_motion_context_index_{shot.uuid}" in st.session_state and isinstance(st.session_state[f"type_of_motion_context_index_{shot.uuid}"], str): + st.session_state[f"type_of_motion_context_index_{shot.uuid}"] = ["Low", "Standard", "High"].index(st.session_state[f"type_of_motion_context_index_{shot.uuid}"]) + type_of_motion_context = st.radio("Type of motion context:", options=["Low", "Standard", "High"], key="type_of_motion_context", horizontal=False, index=st.session_state[f"type_of_motion_context_index_{shot.uuid}"]) + + with h2: + st.info("This is how much the motion will be informed by the previous and next frames. 'High' can make it smoother but increase artifacts - while 'Low' make the motion less smooth but removes artifacts. Naturally, we recommend Standard.") + st.write("") + i1, i3,_ = st.columns([1,2,1]) + with i1: + amount_of_motion = st.slider("Amount of motion:", min_value=0.5, max_value=1.5, step=0.01, key="amount_of_motion", value=st.session_state[f"amount_of_motion_{shot.uuid}"]) + st.write("") + if st.button("Bulk update amount of motion", key="update_motion", help="This will update this value in all the frames"): + for idx, timing in enumerate(timing_list): + st.session_state[f'motion_during_frame_{shot.uuid}_{idx}'] = amount_of_motion + st.success("Updated amount of motion") + time.sleep(0.3) + st.rerun() + with i3: + st.write("") + st.write("") + st.info("This actually updates the motion during frames in the advanced settings above - but we put it here because it has a big impact on the video. You can scroll up to see the changes and tweak for individual frames.") + + type_of_frame_distribution = "dynamic" + type_of_key_frame_influence = "dynamic" + type_of_strength_distribution = "dynamic" + linear_frame_distribution_value = 16 + linear_key_frame_influence_value = 1.0 + linear_cn_strength_value = 1.0 + relative_ipadapter_strength = 1.0 + relative_cn_strength = 0.0 + project_settings = data_repo.get_project_setting(shot.project.uuid) + width = project_settings.width + height = project_settings.height + img_dimension = f"{width}x{height}" + motion_scale = 1.3 + interpolation_style = 'ease-in-out' + buffer = 4 + + + (dynamic_strength_values, dynamic_key_frame_influence_values, dynamic_frame_distribution_values, + context_length, context_stride, context_overlap, multipled_base_end_percent, multipled_base_adapter_strength, + prompt_travel, negative_prompt_travel, motion_scales) = transform_data(strength_of_frames, + freedoms_between_frames, speeds_of_transitions, distances_to_next_frames, type_of_motion_context, + strength_of_adherence,individual_prompts, individual_negative_prompts, buffer, motions_during_frames) + + + + settings.update( + ckpt=sd_model, + width=width, + height=height, + buffer=4, + motion_scale=motion_scale, + motion_scales=motion_scales, + image_dimension=img_dimension, + output_format="video/h264-mp4", + prompt=overall_positive_prompt, + negative_prompt=overall_negative_prompt, + interpolation_type=interpolation_style, + stmfnet_multiplier=2, + relative_ipadapter_strength=relative_ipadapter_strength, + relative_cn_strength=relative_cn_strength, + type_of_strength_distribution=type_of_strength_distribution, + linear_strength_value=str(linear_cn_strength_value), + dynamic_strength_values=str(dynamic_strength_values), + linear_frame_distribution_value=linear_frame_distribution_value, + dynamic_frame_distribution_values=dynamic_frame_distribution_values, + type_of_frame_distribution=type_of_frame_distribution, + type_of_key_frame_influence=type_of_key_frame_influence, + linear_key_frame_influence_value=float(linear_key_frame_influence_value), + dynamic_key_frame_influence_values=dynamic_key_frame_influence_values, + normalise_speed=True, + ipadapter_noise=0.3, + animation_style=AnimationStyleType.CREATIVE_INTERPOLATION.value, + context_length=context_length, + context_stride=context_stride, + context_overlap=context_overlap, + multipled_base_end_percent=multipled_base_end_percent, + multipled_base_adapter_strength=multipled_base_adapter_strength, + individual_prompts=prompt_travel, + individual_negative_prompts=negative_prompt_travel, + animation_stype=AnimationStyleType.CREATIVE_INTERPOLATION.value, + max_frames=str(dynamic_frame_distribution_values[-1]), + lora_data=lora_data, + shot_data=shot_meta_data + ) - backlog_update = {f'{shot_uuid}_backlog_enabled': True} - with btn2: - st.button("Add to backlog", key="generate_animation_clip_backlog", disabled=disable_generate, help=backlog_help, on_click=lambda: toggle_generate_inference(position, **backlog_update),type="secondary") - - - with st.sidebar: - with st.expander("⚙️ Animation settings", expanded=True): - if st_memory.toggle("Open", key="open_motion_data"): - - st.markdown("### Visualisation of current motion") - keyframe_positions = get_keyframe_positions(type_of_frame_distribution, dynamic_frame_distribution_values, timing_list, linear_frame_distribution_value) - keyframe_positions = [int(kf * 16) for kf in keyframe_positions] - last_key_frame_position = (keyframe_positions[-1]) - strength_values = extract_strength_values(type_of_strength_distribution, dynamic_strength_values, keyframe_positions, linear_cn_strength_value) - key_frame_influence_values = extract_influence_values(type_of_key_frame_influence, dynamic_key_frame_influence_values, keyframe_positions, linear_key_frame_influence_value) - weights_list, frame_numbers_list = calculate_weights(keyframe_positions, strength_values, 4, key_frame_influence_values,last_key_frame_position) - plot_weights(weights_list, frame_numbers_list) + position = "generate_vid" + st.markdown("***") + st.markdown("##### Generation Settings") + + animate_col_1, animate_col_2, _ = st.columns([3, 1, 1]) + with animate_col_1: + variant_count = st.number_input("How many variants?", min_value=1, max_value=5, value=1, step=1, key="variant_count") + + if "generate_vid_generate_inference" in st.session_state and st.session_state["generate_vid_generate_inference"]: + + st.success("Generating clip - see status in the Generation Log in the sidebar. Press 'Refresh log' to update.") + # last keyframe position * 16 + duration = float(dynamic_frame_distribution_values[-1] / 16) + data_repo.update_shot(uuid=shot.uuid, duration=duration) + shot_data = update_session_state_with_animation_details( + shot.uuid, + timing_list, + strength_of_frames, + distances_to_next_frames, + speeds_of_transitions, + freedoms_between_frames, + motions_during_frames, + individual_prompts, + individual_negative_prompts, + lora_data + ) + settings.update(shot_data=shot_data) + vid_quality = "full" # TODO: add this if video_resolution == "Full Resolution" else "preview" - st.markdown("***") - bulk1, bulk2 = st.columns([1, 1]) - with bulk1: - st.markdown("### Bulk edit frame settings") - with bulk2: - if st.button("Reset to Default", use_container_width=True, key="reset_to_default"): - for idx, timing in enumerate(timing_list): - for k, v in DEFAULT_SHOT_MOTION_VALUES.items(): - st.session_state[f'{k}_{shot.uuid}_{idx}'] = v - - st.success("All frames have been reset to default values.") - st.rerun() - - editable_entity = st.selectbox("What would you like to edit?", options=["Seconds to next frames", "Speed of transitions", "Freedom between frames","Strength of frames","Motion during frames"], key="editable_entity") - if editable_entity == "Seconds to next frames": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.25, max_value=6.00, step=0.25, value=1.0, key="entity_new_val") - if editable_entity == "Strength of frames": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.25, max_value=1.0, step=0.01, value=0.5, key="entity_new_val") - elif editable_entity == "Speed of transitions": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.45, max_value=0.7, step=0.01, value=0.6, key="entity_new_val") - elif editable_entity == "Freedom between frames": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.15, max_value=0.85, step=0.01, value=0.5, key="entity_new_val") - elif editable_entity == "Motion during frames": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.5, max_value=1.5, step=0.01, value=1.3, key="entity_new_val") - - bulk1, bulk2 = st.columns([1, 1]) - with bulk1: - if st.button("Bulk edit", key="bulk_edit", use_container_width=True): - if editable_entity == "Strength of frames": - for idx, timing in enumerate(timing_list): - st.session_state[f'strength_of_frame_{shot.uuid}_{idx}'] = entity_new_val - elif editable_entity == "Seconds to next frames": - for idx, timing in enumerate(timing_list): - st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}'] = entity_new_val - elif editable_entity == "Speed of transitions": - for idx, timing in enumerate(timing_list): - st.session_state[f'speed_of_transition_{shot.uuid}_{idx}'] = entity_new_val - elif editable_entity == "Freedom between frames": - for idx, timing in enumerate(timing_list): - st.session_state[f'freedom_between_frames_{shot.uuid}_{idx}'] = entity_new_val - elif editable_entity == "Motion during frames": - for idx, timing in enumerate(timing_list): - st.session_state[f'motion_during_frame_{shot.uuid}_{idx}'] = entity_new_val - st.rerun() - - st.markdown("***") - st.markdown("### Save current settings") - if st.button("Save current settings", key="save_current_settings",use_container_width=True,help="Settings will also be saved when you generate the animation."): - update_session_state_with_animation_details(shot.uuid, timing_list, strength_of_frames, distances_to_next_frames, speeds_of_transitions, freedoms_between_frames, motions_during_frames, individual_prompts, individual_negative_prompts) - st.success("Settings saved successfully.") + positive_prompt = "" + append_to_prompt = "" # TODO: add this + for idx, timing in enumerate(timing_list): + if timing.primary_image and timing.primary_image.location: + b = timing.primary_image.inference_params + prompt = b.get("prompt", "") if b else "" + prompt += append_to_prompt + frame_prompt = f"{idx * linear_frame_distribution_value}_" + prompt + positive_prompt += ":" + frame_prompt if positive_prompt else frame_prompt + else: + st.error("Please generate primary images") time.sleep(0.7) st.rerun() + + if f'{shot_uuid}_backlog_enabled' not in st.session_state: + st.session_state[f'{shot_uuid}_backlog_enabled'] = False + + create_single_interpolated_clip( + shot_uuid, + vid_quality, + settings, + variant_count, + st.session_state[f'{shot_uuid}_backlog_enabled'] + ) + + backlog_update = {f'{shot_uuid}_backlog_enabled': False} + toggle_generate_inference(position, **backlog_update) + st.rerun() + + btn1, btn2, btn3 = st.columns([1, 1, 1]) + backlog_no_update = {f'{shot_uuid}_backlog_enabled': False} + with btn1: + st.button("Add to queue", key="generate_animation_clip", disabled=disable_generate, help=help, on_click=lambda: toggle_generate_inference(position, **backlog_no_update),type="primary",use_container_width=True) + + backlog_update = {f'{shot_uuid}_backlog_enabled': True} + with btn2: + st.button("Add to backlog", key="generate_animation_clip_backlog", disabled=disable_generate, help=backlog_help, on_click=lambda: toggle_generate_inference(position, **backlog_update),type="secondary") + + + with st.sidebar: + with st.expander("⚙️ Animation settings", expanded=True): + if st_memory.toggle("Open", key="open_motion_data"): + + st.markdown("### Visualisation of current motion") + keyframe_positions = get_keyframe_positions(type_of_frame_distribution, dynamic_frame_distribution_values, timing_list, linear_frame_distribution_value) + keyframe_positions = [int(kf * 16) for kf in keyframe_positions] + last_key_frame_position = (keyframe_positions[-1]) + strength_values = extract_strength_values(type_of_strength_distribution, dynamic_strength_values, keyframe_positions, linear_cn_strength_value) + key_frame_influence_values = extract_influence_values(type_of_key_frame_influence, dynamic_key_frame_influence_values, keyframe_positions, linear_key_frame_influence_value) + weights_list, frame_numbers_list = calculate_weights(keyframe_positions, strength_values, 4, key_frame_influence_values,last_key_frame_position) + plot_weights(weights_list, frame_numbers_list) + + st.markdown("***") + + bulk1, bulk2 = st.columns([1, 1]) + with bulk1: + st.markdown("### Bulk edit frame settings") + with bulk2: + if st.button("Reset to Default", use_container_width=True, key="reset_to_default"): + for idx, timing in enumerate(timing_list): + for k, v in DEFAULT_SHOT_MOTION_VALUES.items(): + st.session_state[f'{k}_{shot.uuid}_{idx}'] = v + + st.success("All frames have been reset to default values.") + st.rerun() + + editable_entity = st.selectbox("What would you like to edit?", options=["Seconds to next frames", "Speed of transitions", "Freedom between frames","Strength of frames","Motion during frames"], key="editable_entity") + if editable_entity == "Seconds to next frames": + entity_new_val = st.slider("What would you like to change it to?", min_value=0.25, max_value=6.00, step=0.25, value=1.0, key="entity_new_val") + if editable_entity == "Strength of frames": + entity_new_val = st.slider("What would you like to change it to?", min_value=0.25, max_value=1.0, step=0.01, value=0.5, key="entity_new_val") + elif editable_entity == "Speed of transitions": + entity_new_val = st.slider("What would you like to change it to?", min_value=0.45, max_value=0.7, step=0.01, value=0.6, key="entity_new_val") + elif editable_entity == "Freedom between frames": + entity_new_val = st.slider("What would you like to change it to?", min_value=0.15, max_value=0.85, step=0.01, value=0.5, key="entity_new_val") + elif editable_entity == "Motion during frames": + entity_new_val = st.slider("What would you like to change it to?", min_value=0.5, max_value=1.5, step=0.01, value=1.3, key="entity_new_val") + + bulk1, bulk2 = st.columns([1, 1]) + with bulk1: + if st.button("Bulk edit", key="bulk_edit", use_container_width=True): + if editable_entity == "Strength of frames": + for idx, timing in enumerate(timing_list): + st.session_state[f'strength_of_frame_{shot.uuid}_{idx}'] = entity_new_val + elif editable_entity == "Seconds to next frames": + for idx, timing in enumerate(timing_list): + st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}'] = entity_new_val + elif editable_entity == "Speed of transitions": + for idx, timing in enumerate(timing_list): + st.session_state[f'speed_of_transition_{shot.uuid}_{idx}'] = entity_new_val + elif editable_entity == "Freedom between frames": + for idx, timing in enumerate(timing_list): + st.session_state[f'freedom_between_frames_{shot.uuid}_{idx}'] = entity_new_val + elif editable_entity == "Motion during frames": + for idx, timing in enumerate(timing_list): + st.session_state[f'motion_during_frame_{shot.uuid}_{idx}'] = entity_new_val + st.rerun() + + st.markdown("***") + st.markdown("### Save current settings") + if st.button("Save current settings", key="save_current_settings",use_container_width=True,help="Settings will also be saved when you generate the animation."): + update_session_state_with_animation_details(shot.uuid, timing_list, strength_of_frames, distances_to_next_frames, speeds_of_transitions, freedoms_between_frames, motions_during_frames, individual_prompts, individual_negative_prompts) + st.success("Settings saved successfully.") + time.sleep(0.7) + st.rerun() + + elif type_of_animation == "2-Image Realistic Interpolation": + + col1, col2, col3 = st.columns([1, 1, 1]) + for i in range(0, len(timing_list), 2): # Iterate two items at a time + if i < len(timing_list): + timing_first = timing_list[i] + if timing_first.primary_image and timing_first.primary_image.location: + with col1: + st.image(timing_first.primary_image.location, use_column_width=True) + + if i + 1 < len(timing_list): + timing_second = timing_list[i + 1] + if timing_second.primary_image and timing_second.primary_image.location: + with col3: + st.image(timing_second.primary_image.location, use_column_width=True) + + with col2: + description_of_motion = st.text_area("Describe the motion you want between the frames:", key="description_of_motion") + st.info("This is very important and will likely require some iteration.") + + variant_count = 1 # Assuming a default value for variant_count, adjust as necessary + vid_quality = "full" # Assuming full quality, adjust as necessary based on your requirements + + if "generate_vid_generate_inference" in st.session_state and st.session_state["generate_vid_generate_inference"]: + + st.success("Generating clip - see status in the Generation Log in the sidebar. Press 'Refresh log' to update.") + # Assuming the logic to generate the clip based on two images, the described motion, and fixed duration + duration = 4 # Fixed duration of 4 seconds + data_repo.update_shot(uuid=shot.uuid, duration=duration) + + project_settings = data_repo.get_project_setting(shot.project.uuid) + + settings.update( + duration= duration, + animation_style=AnimationStyleType.DIRECT_MORPHING.value, + output_format="video/h264-mp4", + width=project_settings.width, + height=project_settings.height, + prompt=description_of_motion + ) + + create_single_interpolated_clip( + shot_uuid, + vid_quality, + settings, + variant_count, + st.session_state[f'{shot_uuid}_backlog_enabled'] + ) + + backlog_update = {f'{shot_uuid}_backlog_enabled': False} + toggle_generate_inference(position, **backlog_update) + # settings.update(shot_data=shot_data) # Save compiled shot_data into settings + st.rerun() + + + # Placeholder for the logic to generate the clip and update session state as needed + # This should include calling the function that handles the interpolation process with the updated settings + + # Buttons for adding to queue or backlog, assuming these are still relevant + btn1, btn2, btn3 = st.columns([1, 1, 1]) + with btn1: + st.button("Add to queue", key="generate_2_image_interpolation_clip", disabled=False, help="Generate the interpolation clip based on the two images and described motion.", on_click=lambda: toggle_generate_inference("generate_vid"), type="primary", use_container_width=True) + backlog_update = {f'{shot_uuid}_backlog_enabled': True} + with btn2: + st.button("Add to backlog", key="generate_2_image_interpolation_clip_backlog", disabled=False, help="Add the 2-Image Realistic Interpolation to the backlog.", on_click=lambda: toggle_generate_inference("generate_vid", **backlog_update), type="secondary") # --------------------- METHODS ----------------------- def toggle_generate_inference(position, **kwargs): for k,v in kwargs.items(): diff --git a/utils/media_processor/interpolator.py b/utils/media_processor/interpolator.py index 57198fdd..deb1bfb0 100644 --- a/utils/media_processor/interpolator.py +++ b/utils/media_processor/interpolator.py @@ -54,6 +54,8 @@ def create_interpolated_clip(img_location_list, animation_style, settings, varia return VideoInterpolator.video_through_direct_morphing( img_location_list, settings, + variant_count, + queue_inference, backlog ) @@ -62,26 +64,10 @@ def create_interpolated_clip(img_location_list, animation_style, settings, varia @staticmethod def video_through_frame_interpolation(img_location_list, settings, variant_count, queue_inference=False, backlog=False): ml_client = get_ml_client() - # zip_filename = zip_images(img_location_list) - # zip_url = ml_client.upload_training_data(zip_filename, delete_after_upload=True) - # print("zipped file url: ", zip_url) - # animation_tool = settings['animation_tool'] if 'animation_tool' in settings else AnimationToolType.G_FILM.value final_res = [] for _ in range(variant_count): - # if animation_tool == AnimationToolType.G_FILM.value: - # res = ml_client.predict_model_output( - # ML_MODEL.google_frame_interpolation, - # frame1=img1, - # frame2=img2, - # times_to_interpolate=settings['interpolation_steps'], - # queue_inference=queue_inference - # ) - - # since workflows can have multiple input params it's not standardized yet - # elif animation_tool == AnimationToolType.ANIMATEDIFF.value: - # defaulting to animatediff interpolation if True: # NOTE: @Peter these are all the settings you passed in from the UI sm_data = { @@ -155,37 +141,42 @@ def video_through_frame_interpolation(img_location_list, settings, variant_count @staticmethod - def video_through_direct_morphing(img_location_list, settings, backlog=False): - def load_image(image_path_or_url): - if image_path_or_url.startswith("http"): - response = r.get(image_path_or_url) - image = np.asarray(bytearray(response.content), dtype="uint8") - image = cv2.imdecode(image, cv2.IMREAD_COLOR) - else: - image = cv2.imread(image_path_or_url) - - return image - - img1 = load_image(img_location_list[0]) - img2 = load_image(img_location_list[1]) + def video_through_direct_morphing(img_location_list, settings, variant_count, queue_inference=False, backlog=False): + ml_client = get_ml_client() + + final_res = [] + for _ in range(variant_count): + + if True: + # NOTE: @Peter these are all the settings you passed in from the UI + sm_data = { + "width": settings['width'] + } + + for idx, img_uuid in enumerate(settings['file_uuid_list']): + sm_data[f"file_image_{padded_integer(idx+1)}" + "_uuid"] = img_uuid + + ml_query_object = MLQueryObject( + prompt="SM", # hackish fix + timing_uuid=None, + model_uuid=None, + guidance_scale=None, + seed=None, + num_inference_steps=None, + strength=None, + adapter_type=None, + negative_prompt="", + height=512, + width=512, + image_uuid=None, + mask_uuid=None, + data=sm_data + ) + res = ml_client.predict_model_output_standardized(ML_MODEL.dynamicrafter, ml_query_object, QUEUE_INFERENCE_QUERIES, backlog) + + final_res.append(res) + + return final_res + + - if img1 is None or img2 is None: - raise ValueError("Could not read one or both of the images.") - - num_frames = settings['interpolation_steps'] # Number of frames in the video - video_frames = [] - - for alpha in np.linspace(0, 1, num_frames): - morphed_image = cv2.addWeighted(img1, alpha, img2, 1 - alpha, 0) - video_frames.append(morphed_image) - - fourcc = cv2.VideoWriter_fourcc(*"avc1") - video_bytes = [] - for frame in video_frames: - ret, frame_bytes = cv2.imencode('.mp4', frame, fourcc) - if not ret: - raise ValueError("Failed to encode video frame") - video_bytes.append(frame_bytes.tobytes()) - - video_data = b''.join(video_bytes) - return [(video_data, InferenceLogObject({}))] # returning None for inference log \ No newline at end of file diff --git a/utils/ml_processor/comfy_data_transform.py b/utils/ml_processor/comfy_data_transform.py index d7862f4b..06d07621 100644 --- a/utils/ml_processor/comfy_data_transform.py +++ b/utils/ml_processor/comfy_data_transform.py @@ -25,7 +25,8 @@ ComfyWorkflow.IP_ADAPTER_FACE: {"workflow_path": 'comfy_workflows/ipadapter_face_api.json', "output_node_id": 29}, ComfyWorkflow.IP_ADAPTER_FACE_PLUS: {"workflow_path": 'comfy_workflows/ipadapter_face_plus_api.json', "output_node_id": 29}, ComfyWorkflow.STEERABLE_MOTION: {"workflow_path": 'comfy_workflows/steerable_motion_api.json', "output_node_id": 281}, - ComfyWorkflow.UPSCALER: {"workflow_path": 'comfy_workflows/video_upscaler_api.json', "output_node_id": 243} + ComfyWorkflow.UPSCALER: {"workflow_path": 'comfy_workflows/video_upscaler_api.json', "output_node_id": 243}, + ComfyWorkflow.DYNAMICRAFTER: {"workflow_path": 'comfy_workflows/dynamicrafter_api.json', "output_node_id": 2} } @@ -348,8 +349,7 @@ def update_json_with_loras(json_data, loras): sm_data = query.data.get('data', {}) workflow, output_node_ids = ComfyDataTransform.get_workflow_json(ComfyWorkflow.STEERABLE_MOTION) workflow = update_json_with_loras(workflow, sm_data.get('lora_data')) - - print(sm_data) + workflow['464']['inputs']['height'] = sm_data.get('height') workflow['464']['inputs']['width'] = sm_data.get('width') @@ -399,6 +399,24 @@ def update_json_with_loras(json_data, loras): ignore_list = sm_data.get("lora_data", []) return json.dumps(workflow), output_node_ids, [], ignore_list + + @staticmethod + def transform_dynamicrafter_workflow(query: MLQueryObject): + data_repo = DataRepo() + workflow, output_node_ids = ComfyDataTransform.get_workflow_json(ComfyWorkflow.DYNAMICRAFTER) + sm_data = query.data.get('data', {}) + # get the first images from settings - it was put there in a list like this: settings.update(file_uuid_list=[t.primary_image.uuid for t in timing_list]) + import streamlit as st + st.write(sm_data) + # wriet file_image_0001_uuid to file_image_0005_uuid + st.write(sm_data.get('file_image_0001_uuid')) + image_1 = data_repo.get_file_from_uuid(sm_data.get('file_image_0001_uuid')) + image_2 = data_repo.get_file_from_uuid(sm_data.get('file_image_0002_uuid')) + + + + + @staticmethod def transform_video_upscaler_workflow(query: MLQueryObject): data_repo = DataRepo() @@ -468,6 +486,7 @@ def transform_video_upscaler_workflow(query: MLQueryObject): ML_MODEL.ipadapter_face.workflow_name: ComfyDataTransform.transform_ipadaptor_face_workflow, ML_MODEL.ipadapter_face_plus.workflow_name: ComfyDataTransform.transform_ipadaptor_face_plus_workflow, ML_MODEL.ad_interpolation.workflow_name: ComfyDataTransform.transform_steerable_motion_workflow, + ML_MODEL.dynamicrafter.workflow_name: ComfyDataTransform.transform_dynamicrafter_workflow, ML_MODEL.sdxl_img2img.workflow_name: ComfyDataTransform.transform_sdxl_img2img_workflow, ML_MODEL.video_upscaler.workflow_name: ComfyDataTransform.transform_video_upscaler_workflow } diff --git a/utils/ml_processor/comfy_workflows/dynamicrafter_api.json b/utils/ml_processor/comfy_workflows/dynamicrafter_api.json new file mode 100644 index 00000000..d35ca185 --- /dev/null +++ b/utils/ml_processor/comfy_workflows/dynamicrafter_api.json @@ -0,0 +1,109 @@ +{ + "2": { + "inputs": { + "frame_rate": 12, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "pingpong": false, + "save_output": true, + "images": [ + "34", + 0 + ] + }, + "class_type": "VHS_VideoCombine", + "_meta": { + "title": "Video Combine 🎥🅥🅗🅢" + } + }, + "11": { + "inputs": { + "ckpt_name": "dynamicrafter_512_interp_v1.ckpt", + "dtype": "auto" + }, + "class_type": "DynamiCrafterModelLoader", + "_meta": { + "title": "DynamiCrafterModelLoader" + } + }, + "12": { + "inputs": { + "steps": 50, + "cfg": 4, + "eta": 1, + "frames": 16, + "prompt": "dolly zoom", + "seed": 898857724114134, + "fs": 10, + "keep_model_loaded": true, + "vae_dtype": "auto", + "model": [ + "11", + 0 + ], + "images": [ + "15", + 0 + ] + }, + "class_type": "DynamiCrafterBatchInterpolation", + "_meta": { + "title": "DynamiCrafterBatchInterpolation" + } + }, + "15": { + "inputs": { + "image1": [ + "16", + 0 + ], + "image2": [ + "17", + 0 + ] + }, + "class_type": "ImageBatch", + "_meta": { + "title": "Batch Images" + } + }, + "16": { + "inputs": { + "image": "ComfyUI_temp_fshjc_00001_.png", + "upload": "image" + }, + "class_type": "LoadImage", + "_meta": { + "title": "Load Image" + } + }, + "17": { + "inputs": { + "image": "ComfyUI_temp_fshjc_00002_ (1).png", + "upload": "image" + }, + "class_type": "LoadImage", + "_meta": { + "title": "Load Image" + } + }, + "34": { + "inputs": { + "ckpt_name": "film_net_fp32.pt", + "clear_cache_after_n_frames": 10, + "multiplier": 3, + "frames": [ + "12", + 0 + ] + }, + "class_type": "FILM VFI", + "_meta": { + "title": "FILM VFI" + } + } +} \ No newline at end of file diff --git a/utils/ml_processor/constants.py b/utils/ml_processor/constants.py index 266546d3..0c11016e 100644 --- a/utils/ml_processor/constants.py +++ b/utils/ml_processor/constants.py @@ -16,6 +16,7 @@ class ComfyWorkflow(ExtendedEnum): SDXL_IMG2IMG = "sdxl_img2img" UPSCALER = "upscale" IPADAPTER_COMPOSITION = "ipadapter_composition" + DYNAMICRAFTER = "dynamicrafter" @dataclass class MLModel: @@ -82,6 +83,7 @@ class ML_MODEL: sdxl_controlnet = MLModel("lucataco/sdxl-controlnet", "db2ffdbdc7f6cb4d6dab512434679ee3366ae7ab84f89750f8947d5594b79a47", ComfyWorkflow.SDXL_CONTROLNET) realistic_vision_v5_img2img = MLModel("lucataco/realistic-vision-v5-img2img", "82bbb4595458d6be142450fc6d8c4d79c936b92bd184dd2d6dd71d0796159819") ad_interpolation = MLModel(ComfyRunnerModel.name, ComfyRunnerModel.version, ComfyWorkflow.STEERABLE_MOTION) + dynamicrafter = MLModel(ComfyRunnerModel.name, ComfyRunnerModel.version, ComfyWorkflow.DYNAMICRAFTER) # addition 17/10/2023 llama_2_7b = MLModel("meta/llama-2-7b", "527827021d8756c7ab79fde0abbfaac885c37a3ed5fe23c7465093f0878d55ef", ComfyWorkflow.LLAMA_2_7B) @@ -114,6 +116,7 @@ def get_model_by_db_obj(model_db_obj): ML_MODEL.sdxl_img2img, ML_MODEL.sdxl_inpainting, ML_MODEL.ad_interpolation, + ML_MODEL.dynamicrafter, ML_MODEL.ipadapter_face, ML_MODEL.ipadapter_face_plus, ML_MODEL.ipadapter_plus, From e7d9106dbd76fc61417c060da517f864f9e06f93 Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Mon, 18 Mar 2024 11:11:54 +0000 Subject: [PATCH 28/43] motion lora training added --- banodoco_runner.py | 24 +- shared/constants.py | 1 + ui_components/methods/common_methods.py | 55 +++ ui_components/methods/file_methods.py | 70 ++- ui_components/methods/ml_methods.py | 47 +- .../widgets/animation_style_element.py | 69 ++- utils/local_storage/local_storage.py | 65 ++- utils/ml_processor/comfy_data_transform.py | 64 ++- .../comfy_workflows/motion_lora_api.json | 459 ++++++++++++++++++ .../comfy_workflows/motion_lora_test_api.json | 270 +++++++++++ utils/ml_processor/constants.py | 2 + 11 files changed, 1032 insertions(+), 94 deletions(-) create mode 100644 utils/ml_processor/comfy_workflows/motion_lora_api.json create mode 100644 utils/ml_processor/comfy_workflows/motion_lora_test_api.json diff --git a/banodoco_runner.py b/banodoco_runner.py index 97bdeec6..26a08636 100644 --- a/banodoco_runner.py +++ b/banodoco_runner.py @@ -21,7 +21,7 @@ from ui_components.methods.file_methods import get_file_bytes_and_extension, load_from_env, save_or_host_file_bytes, save_to_env from utils.common_utils import acquire_lock, release_lock from utils.data_repo.data_repo import DataRepo -from utils.ml_processor.constants import replicate_status_map +from utils.ml_processor.constants import ComfyWorkflow, replicate_status_map from utils.constants import RUNNER_PROCESS_NAME, RUNNER_PROCESS_PORT, AUTH_TOKEN, REFRESH_AUTH_TOKEN from utils.ml_processor.gpu.utils import is_comfy_runner_present, predict_gpu_output, setup_comfy_runner @@ -176,13 +176,19 @@ def find_process_by_port(port): return pid -def stop_server(self, port): +def stop_server(port): pid = find_process_by_port(port) if pid: app_logger.log(LoggingType.DEBUG, "comfy server stopped") process = psutil.Process(pid) process.terminate() process.wait() + +def format_model_output(output, model_display_name): + if model_display_name and model_display_name == ComfyWorkflow.MOTION_LORA.value: + return output + else: + return [output[-1]] def check_and_update_db(): # print("updating logs") @@ -308,11 +314,15 @@ def check_and_update_db(): data['output_node_ids'], data.get("extra_model_list", []), data.get("ignore_model_list", [])) end_time = time.time() - output = output[-1] # TODO: different models can have different logic - destination_path = "./videos/temp/" + str(uuid.uuid4()) + "." + output.split(".")[-1] - shutil.copy2("./output/" + output, destination_path) + res_output = format_model_output(output, log.model_name) + destination_path_list = [] + for output in res_output: + destination_path = "./videos/temp/" + str(uuid.uuid4()) + "." + output.split(".")[-1] + shutil.copy2("./output/" + output, destination_path) + destination_path_list.append(destination_path) + output_details = json.loads(log.output_details) - output_details['output'] = destination_path + output_details['output'] = destination_path_list[0] if len(destination_path_list) == 1 else destination_path_list update_data = { "status" : InferenceStatus.COMPLETED.value, "output_details" : json.dumps(output_details), @@ -321,7 +331,7 @@ def check_and_update_db(): InferenceLog.objects.filter(id=log.id).update(**update_data) origin_data = json.loads(log.input_params).get(InferenceParamType.ORIGIN_DATA.value, {}) - origin_data['output'] = destination_path + origin_data['output'] = destination_path_list[0] if len(destination_path_list) == 1 else destination_path_list origin_data['log_uuid'] = log.uuid print("processing inference output") diff --git a/shared/constants.py b/shared/constants.py index 3a105436..e05d373c 100644 --- a/shared/constants.py +++ b/shared/constants.py @@ -82,6 +82,7 @@ class InferenceType(ExtendedEnum): FRAME_INTERPOLATION = "frame_interpolation" # for generating single/multiple interpolated videos GALLERY_IMAGE_GENERATION = "gallery_image_generation" # for generating gallery images FRAME_INPAINTING = "frame_inpainting" # for generating inpainted frames + MOTION_LORA_TRAINING = "motion_lora_training" # for training new motion loras class InferenceStatus(ExtendedEnum): QUEUED = "queued" diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index aeb4a39a..f47b1f78 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -3,6 +3,7 @@ from typing import List import os from PIL import Image, ImageDraw, ImageFilter +from utils.local_storage.local_storage import write_to_motion_lora_local_db from moviepy.editor import * import cv2 import requests as r @@ -824,12 +825,66 @@ def process_inference_output(**kwargs): del kwargs['log_uuid'] data_repo.update_inference_log_origin_data(log_uuid, **kwargs) + # --------------------- MOTION LORA TRAINING -------------------------- + elif inference_type == InferenceType.MOTION_LORA_TRAINING.value: + output = kwargs.get('output') + log_uuid = kwargs.get('log_uuid') + + if output and len(output): + # output is a list of generated videos + # we store video_url <--> motion_lora map in a json file + + # NOTE: need to convert 'lora_trainer' into a separate module if it needs to work on hosted version + # fetching the current generated loras + spatial_lora_path = os.path.join('ComfyUI', 'models', 'loras', 'trained_spatial') + temporal_lora_path = os.path.join('ComfyUI', 'models', 'animatediff_motion_lora') + lora_path = temporal_lora_path + _, latest_trained_files = get_latest_project_files(lora_path) + + cur_idx, data = 0, {} + for vid in output: + if vid.endswith(".gif"): + data[latest_trained_files[cur_idx]] = vid + cur_idx += 1 + + write_to_motion_lora_local_db(data) + else: + del kwargs['log_uuid'] + data_repo.update_inference_log_origin_data(log_uuid, **kwargs) + if inference_time: credits_used = round(inference_time * 0.004, 3) # make this more granular for different models data_repo.update_usage_credits(-credits_used, log_uuid) return True +def get_latest_project_files(parent_directory): + latest_project = None + latest_time = 0 + + for date_folder in os.listdir(parent_directory): + date_folder_path = os.path.join(parent_directory, date_folder) + + if os.path.isdir(date_folder_path): + for time_folder in os.listdir(date_folder_path): + time_folder_path = os.path.join(date_folder_path, time_folder) + + if os.path.isdir(time_folder_path): + for project_name_folder in os.listdir(time_folder_path): + project_folder_path = os.path.join(time_folder_path, project_name_folder) + + if os.path.isdir(project_folder_path): + creation_time = os.path.getctime(project_folder_path) + + if creation_time > latest_time: + latest_time = creation_time + latest_project = project_folder_path + + if latest_project: + latest_files = sorted(os.listdir(latest_project)) + return latest_project, latest_files + else: + return None, None def check_project_meta_data(project_uuid): ''' diff --git a/ui_components/methods/file_methods.py b/ui_components/methods/file_methods.py index 6f8f6332..b27b94ce 100644 --- a/ui_components/methods/file_methods.py +++ b/ui_components/methods/file_methods.py @@ -15,6 +15,7 @@ import cv2 import numpy as np import uuid +from moviepy.editor import VideoFileClip from dotenv import set_key, get_key import requests import streamlit as st @@ -27,29 +28,35 @@ def save_or_host_file(file, path, mime_type='image/png', dim=None): data_repo = DataRepo() uploaded_url = None + file_type, file_ext = mime_type.split("/") try: - # TODO: fix session state management, remove direct access outside the main code - if dim: - width, height = dim[0], dim[1] - elif 'project_uuid' in st.session_state and st.session_state['project_uuid']: - project_setting = data_repo.get_project_setting(st.session_state['project_uuid']) - width, height = project_setting.width, project_setting.height - else: - # Default dimensions for new project - width, height = 512, 512 - # Apply zoom and crop based on determined dimensions - file = zoom_and_crop(file, width, height) - # download PIL image to bytee + if file_type == "image": + # TODO: fix session state management, remove direct access outside the main code + if dim: + width, height = dim[0], dim[1] + elif 'project_uuid' in st.session_state and st.session_state['project_uuid']: + project_setting = data_repo.get_project_setting(st.session_state['project_uuid']) + width, height = project_setting.width, project_setting.height + else: + # Default dimensions for new project + width, height = 512, 512 + # Apply zoom and crop based on determined dimensions + file = zoom_and_crop(file, width, height) if SERVER != ServerType.DEVELOPMENT.value: - image_bytes = BytesIO() - file.save(image_bytes, format=mime_type.split('/')[1]) - image_bytes.seek(0) + file_bytes = BytesIO() + file.save(file_bytes, format=file_ext) + file_bytes.seek(0) - uploaded_url = data_repo.upload_file(image_bytes, '.png') + uploaded_url = data_repo.upload_file(file_bytes, '.' + file_ext) else: os.makedirs(os.path.dirname(path), exist_ok=True) - file.save(path) + if file_type == "image": + file.save(path) + else: + with open(path, "wb") as f: + f.write(file.read()) + except Exception as e: # Log the error. You can replace 'print' with logging to a file or external logging service. print(f"Error saving or hosting file: {e}") @@ -434,4 +441,31 @@ def get_file_size(file_path): else: print("File does not exist:", file_path) - return int(file_size / (1024 * 1024)) \ No newline at end of file + return int(file_size / (1024 * 1024)) + +def get_media_dimensions(media_path): + try: + if media_path.endswith(('.mp4', '.avi', '.mov', '.mkv')): + video_clip = VideoFileClip(media_path) + width = video_clip.size[0] + height = video_clip.size[1] + return width, height + else: + with Image.open(media_path) as img: + width, height = img.size + return width, height + except Exception as e: + print(f"Error: {e}") + return None + +# fetches all the files (including subfolders) in a directory +def get_files_in_a_directory(directory, ext_list=[]): + res = [] + + if os.path.exists(directory): + for root, _, files in os.walk(directory): + for file in files: + if ext_list and len(ext_list) and file.split(".")[-1] in ext_list: + res.append(file) #(os.path.join(root, file)) + + return res \ No newline at end of file diff --git a/ui_components/methods/ml_methods.py b/ui_components/methods/ml_methods.py index 3609a4af..2c357b23 100644 --- a/ui_components/methods/ml_methods.py +++ b/ui_components/methods/ml_methods.py @@ -188,12 +188,51 @@ # emotion = (f"neutral expression") # return emotion +# NOTE: don't update max_step, its logic is hardcoded at the moment +def train_motion_lora(input_video: InternalFileObject, lora_prompt: str, lora_name: str, width, height, ckpt, max_step = 500): + query_obj = MLQueryObject( + timing_uuid=None, + model_uuid=None, + guidance_scale=7.5, + seed=-1, + num_inference_steps=25, + strength=0.7, + adapter_type=None, + prompt=lora_prompt, + negative_prompt="", + width=width, + height=height, + low_threshold=100, + high_threshold=200, + image_uuid=None, + mask_uuid=None, + data={ + "file_video": input_video.uuid, + "max_step": max_step, + "lora_name": lora_name, + "ckpt": ckpt + } + ) + + ml_client = get_ml_client() + output, log = ml_client.predict_model_output_standardized( + ML_MODEL.motion_lora_trainer, + query_obj, + QUEUE_INFERENCE_QUERIES + ) + + if log: + inference_data = { + "inference_type": InferenceType.MOTION_LORA_TRAINING.value, + "output": output, + "log_uuid": log.uuid, + "settings": {} + } + + process_inference_output(**inference_data) + def inpainting(input_image: str, prompt, negative_prompt, width, height, shot_uuid, project_uuid) -> InternalFileObject: data_repo = DataRepo() - # timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - - - project = data_repo.get_project_from_uuid(project_uuid) mask = st.session_state['mask_to_use'] if not mask.startswith("http"): diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index 6e8d3f38..388163cc 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -1,14 +1,20 @@ import json +import random +import string import tarfile import time +import uuid import zipfile +from ui_components.methods.ml_methods import train_motion_lora import streamlit as st from typing import List -from shared.constants import AnimationStyleType, AnimationToolType, InferenceParamType +from shared.constants import AnimationStyleType, AnimationToolType, InferenceParamType, InternalFileType from ui_components.constants import DEFAULT_SHOT_MOTION_VALUES, DefaultProjectSettingParams, ShotMetaData from ui_components.methods.animation_style_methods import load_shot_settings +from ui_components.methods.file_methods import get_files_in_a_directory, get_media_dimensions, save_or_host_file from ui_components.methods.video_methods import create_single_interpolated_clip from utils.data_repo.data_repo import DataRepo +from utils.local_storage.local_storage import read_from_motion_lora_local_db from utils.ml_processor.motion_module import AnimateDiffCheckpoint from ui_components.models import InternalFrameTimingObject, InternalShotObject from utils import st_memory @@ -298,14 +304,7 @@ def update_model(): # ---------------- ADD LORA ----------------- with tab1: - # Initialize a single list to hold dictionaries for LoRA data - # Check if the directory exists and list files, or use a default list - if os.path.exists(lora_file_dest): - files = os.listdir(lora_file_dest) - # remove files that start with a dot - files = [file for file in files if not file.startswith(".")] - else: - files = [] + files = get_files_in_a_directory(lora_file_dest, ['safetensors', 'ckpt']) # Iterate through each current LoRA in session state if len(files) == 0: @@ -313,6 +312,7 @@ def update_model(): if st.button("Check again", key="check_again"): st.rerun() else: + filename_video_dict = read_from_motion_lora_local_db() # cleaning empty lora vals for idx, lora in enumerate(st.session_state[f"lora_data_{shot.uuid}"]): if not lora: @@ -324,11 +324,11 @@ def update_model(): h1, h2, h3, h4 = st.columns([1, 1, 1, 0.5]) with h1: file_idx = files.index(lora["filename"]) - which_lora = st.selectbox("Which LoRA would you like to use?", options=files, key=f"which_lora_{idx}", index=file_idx) + motion_lora = st.selectbox("Which LoRA would you like to use?", options=files, key=f"motion_lora_{idx}", index=file_idx) with h2: strength_of_lora = st.slider("How strong would you like the LoRA to be?", min_value=0.0, max_value=1.0, value=lora["lora_strength"], step=0.01, key=f"strength_of_lora_{idx}") - lora_data.append({"filename": which_lora, "lora_strength": strength_of_lora, "filepath": lora_file_dest + "/" + which_lora}) + lora_data.append({"filename": motion_lora, "lora_strength": strength_of_lora, "filepath": lora_file_dest + "/" + motion_lora}) with h3: when_to_apply_lora = st.slider("When to apply the LoRA?", min_value=0, max_value=100, value=(0,100), step=1, key=f"when_to_apply_lora_{idx}",disabled=True,help="This feature is not yet available.") @@ -338,6 +338,10 @@ def update_model(): if st.button("Remove", key=f"remove_lora_{idx}"): st.session_state[f"lora_data_{shot.uuid}"].pop(idx) st.rerun() + + # displaying preview + if motion_lora and motion_lora in filename_video_dict: + st.image(filename_video_dict[motion_lora]) if len(st.session_state[f"lora_data_{shot.uuid}"]) == 0: text = "Add a LoRA" @@ -435,13 +439,48 @@ def update_model(): with tab3: b1, b2 = st.columns([1, 1]) with b1: - st.error("This feature is not yet available.") - name_this_lora = st.text_input("Name this LoRA", key="name_this_lora") - describe_the_motion = st.text_area("Describe the motion", key="describe_the_motion") + lora_name = st.text_input("Name this LoRA", key="lora_name") + if model_files and len(model_files): + base_sd_model = st.selectbox( + label="Select base sd model for training", + options=model_files, + key="base_sd_model_video", + index=current_model_index, + on_change=update_model + ) + else: + base_sd_model = "" + st.info("Default model Deliberate V2 would be selected") + + lora_prompt = st.text_area("Describe the motion", key="lora_prompt") training_video = st.file_uploader("Upload a video to train a new LoRA", type=["mp4"]) if st.button("Train LoRA", key="train_lora", use_container_width=True): - st.write("Training LoRA") + filename = str(uuid.uuid4()) + ".mp4" + hosted_url = save_or_host_file(training_video, "videos/temp/" + filename, "video/mp4") + + file_data = { + "name": filename, + "type": InternalFileType.VIDEO.value, + "project_id": shot.project.uuid, + } + + if hosted_url: + file_data.update({'hosted_url': hosted_url}) + else: + file_data.update({'local_path': "videos/temp/" + filename}) + + video_file = data_repo.create_file(**file_data) + video_width, video_height = get_media_dimensions(video_file.location) + unique_file_tag = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6)) + train_motion_lora( + video_file, + lora_prompt, + lora_name + "_" + unique_file_tag, + video_width, + video_height, + base_sd_model + ) st.markdown("***") st.markdown("##### Overall style settings") diff --git a/utils/local_storage/local_storage.py b/utils/local_storage/local_storage.py index 2d13ced1..643e3770 100644 --- a/utils/local_storage/local_storage.py +++ b/utils/local_storage/local_storage.py @@ -1,47 +1,38 @@ import json import os + +MOTION_LORA_DB = 'data.json' + def is_file_present(filename): script_directory = os.path.dirname(os.path.abspath(__file__)) file_path = os.path.join(script_directory, filename) return os.path.isfile(file_path) -# def get_current_user(): -# logger = AppLogger() -# data_store = 'data.json' - -# if not is_file_present(data_store): -# with open(data_store, 'w') as file: -# json.dump({}, file, indent=4) - - -# data = {} -# try: -# with open(data_store, 'r') as file: -# data = json.loads(file.read()) -# except Exception as e: -# logger.log(LoggingType.ERROR, 'user not found in local storage') - +def write_to_motion_lora_local_db(update_data): + data_store = MOTION_LORA_DB + + data = {} + if os.path.exists(data_store): + try: + with open(data_store, 'r', encoding='utf-8') as file: + data = json.loads(file.read()) + except Exception as e: + pass -# if not ( data and 'current_user' in data): -# from utils.data_repo.data_repo import DataRepo -# data_repo = DataRepo() -# user = data_repo.get_first_active_user() -# data = {} -# data['current_user'] = user.to_json() if user else None + for key, value in update_data.items(): + data[key] = value + + data = json.dumps(data, indent=4) + with open(data_store, 'w', encoding='utf-8') as file: + file.write(data) -# with open(data_store, 'w') as file: -# json.dump(data, file, indent=4) - -# with open(data_store, 'r') as file: -# data = json.loads(file.read()) - -# logger.log(LoggingType.DEBUG, 'user found in local storage' + str(data)) -# return json.loads(data['current_user']) if data['current_user'] else None - -# def get_current_user_uuid(): -# current_user = get_current_user() -# if current_user and 'uuid' in current_user: -# return current_user['uuid'] -# else: -# return None \ No newline at end of file +def read_from_motion_lora_local_db(key = None): + data_store = MOTION_LORA_DB + + data = {} + if os.path.exists(data_store): + with open(data_store, 'r', encoding='utf-8') as file: + data = json.loads(file.read()) + + return data[key] if key in data else data \ No newline at end of file diff --git a/utils/ml_processor/comfy_data_transform.py b/utils/ml_processor/comfy_data_transform.py index d9448160..1a2107ee 100644 --- a/utils/ml_processor/comfy_data_transform.py +++ b/utils/ml_processor/comfy_data_transform.py @@ -14,17 +14,19 @@ MODEL_PATH_DICT = { - ComfyWorkflow.SDXL: {"workflow_path": 'comfy_workflows/sdxl_workflow_api.json', "output_node_id": 19}, - ComfyWorkflow.SDXL_IMG2IMG: {"workflow_path": 'comfy_workflows/sdxl_img2img_workflow_api.json', "output_node_id": 31}, - ComfyWorkflow.SDXL_CONTROLNET: {"workflow_path": 'comfy_workflows/sdxl_controlnet_workflow_api.json', "output_node_id": 9}, - ComfyWorkflow.SDXL_CONTROLNET_OPENPOSE: {"workflow_path": 'comfy_workflows/sdxl_openpose_workflow_api.json', "output_node_id": 9}, - ComfyWorkflow.LLAMA_2_7B: {"workflow_path": 'comfy_workflows/llama_workflow_api.json', "output_node_id": 14}, - ComfyWorkflow.SDXL_INPAINTING: {"workflow_path": 'comfy_workflows/sdxl_inpainting_workflow_api.json', "output_node_id": 56}, - ComfyWorkflow.IP_ADAPTER_PLUS: {"workflow_path": 'comfy_workflows/ipadapter_plus_api.json', "output_node_id": 29}, - ComfyWorkflow.IP_ADAPTER_FACE: {"workflow_path": 'comfy_workflows/ipadapter_face_api.json', "output_node_id": 29}, - ComfyWorkflow.IP_ADAPTER_FACE_PLUS: {"workflow_path": 'comfy_workflows/ipadapter_face_plus_api.json', "output_node_id": 29}, - ComfyWorkflow.STEERABLE_MOTION: {"workflow_path": 'comfy_workflows/steerable_motion_api.json', "output_node_id": 281}, - ComfyWorkflow.UPSCALER: {"workflow_path": 'comfy_workflows/video_upscaler_api.json', "output_node_id": 243} + ComfyWorkflow.SDXL: {"workflow_path": 'comfy_workflows/sdxl_workflow_api.json', "output_node_id": [19]}, + ComfyWorkflow.SDXL_IMG2IMG: {"workflow_path": 'comfy_workflows/sdxl_img2img_workflow_api.json', "output_node_id": [31]}, + ComfyWorkflow.SDXL_CONTROLNET: {"workflow_path": 'comfy_workflows/sdxl_controlnet_workflow_api.json', "output_node_id": [9]}, + ComfyWorkflow.SDXL_CONTROLNET_OPENPOSE: {"workflow_path": 'comfy_workflows/sdxl_openpose_workflow_api.json', "output_node_id": [9]}, + ComfyWorkflow.LLAMA_2_7B: {"workflow_path": 'comfy_workflows/llama_workflow_api.json', "output_node_id": [14]}, + ComfyWorkflow.SDXL_INPAINTING: {"workflow_path": 'comfy_workflows/sdxl_inpainting_workflow_api.json', "output_node_id": [56]}, + ComfyWorkflow.IP_ADAPTER_PLUS: {"workflow_path": 'comfy_workflows/ipadapter_plus_api.json', "output_node_id": [29]}, + ComfyWorkflow.IP_ADAPTER_FACE: {"workflow_path": 'comfy_workflows/ipadapter_face_api.json', "output_node_id": [29]}, + ComfyWorkflow.IP_ADAPTER_FACE_PLUS: {"workflow_path": 'comfy_workflows/ipadapter_face_plus_api.json', "output_node_id": [29]}, + ComfyWorkflow.STEERABLE_MOTION: {"workflow_path": 'comfy_workflows/steerable_motion_api.json', "output_node_id": [281]}, + ComfyWorkflow.UPSCALER: {"workflow_path": 'comfy_workflows/video_upscaler_api.json', "output_node_id": [243]}, + ComfyWorkflow.MOTION_LORA: {"workflow_path": 'comfy_workflows/motion_lora_api.json', "output_node_id": [11, 14, 26, 30, 34]}, + # ComfyWorkflow.MOTION_LORA: {"workflow_path": 'comfy_workflows/motion_lora_test_api.json', "output_node_id": [11, 14]}, } @@ -36,7 +38,7 @@ def get_workflow_json(model: ComfyWorkflow): # Specify encoding as 'utf-8' when opening the file with open(json_file_path, 'r', encoding='utf-8') as f: json_data = json.load(f) - return json_data, [MODEL_PATH_DICT[model]['output_node_id']] + return json_data, MODEL_PATH_DICT[model]['output_node_id'] @staticmethod def transform_sdxl_workflow(query: MLQueryObject): @@ -426,6 +428,41 @@ def transform_video_upscaler_workflow(query: MLQueryObject): return json.dumps(workflow), output_node_ids, extra_models_list, [] + @staticmethod + def transform_motion_lora_workflow(query: MLQueryObject): + data_repo = DataRepo() + workflow, output_node_ids = ComfyDataTransform.get_workflow_json(ComfyWorkflow.MOTION_LORA) + data = query.data.get("data", {}) + video_uuid = data.get("file_video", None) + video = data_repo.get_file_from_uuid(video_uuid) + lora_name = data.get("lora_name", "") + + workflow["5"]["inputs"]["video"] = os.path.basename(video.filename) + workflow["5"]["inputs"]["custom_width"] = query.width + workflow["5"]["inputs"]["custom_height"] = query.height + workflow["4"]["inputs"]["lora_name"] = lora_name + workflow["4"]["inputs"]["prompt"] = query.prompt + workflow["15"]["inputs"]["validation_prompt"] = query.prompt + + ckpt = data.get('ckpt') + if "ComfyUI/models/checkpoints/" != ckpt and ckpt: + workflow['1']['inputs']['ckpt_name'] = ckpt + + extra_models_list = [ + { + "filename": "v3_sd15_mm.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v3_sd15_mm.ckpt?download=true", + "dest": "./ComfyUI/models/animatediff_models/" + }, + { + "filename": "v3_sd15_adapter.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v3_sd15_adapter.ckpt?download=true", + "dest": "./ComfyUI/models/loras/" + }, + ] + + return json.dumps(workflow), output_node_ids, extra_models_list, [] + # NOTE: only populating with models currently in use MODEL_WORKFLOW_MAP = { @@ -439,7 +476,8 @@ def transform_video_upscaler_workflow(query: MLQueryObject): ML_MODEL.ipadapter_face_plus.workflow_name: ComfyDataTransform.transform_ipadaptor_face_plus_workflow, ML_MODEL.ad_interpolation.workflow_name: ComfyDataTransform.transform_steerable_motion_workflow, ML_MODEL.sdxl_img2img.workflow_name: ComfyDataTransform.transform_sdxl_img2img_workflow, - ML_MODEL.video_upscaler.workflow_name: ComfyDataTransform.transform_video_upscaler_workflow + ML_MODEL.video_upscaler.workflow_name: ComfyDataTransform.transform_video_upscaler_workflow, + ML_MODEL.motion_lora_trainer.workflow_name: ComfyDataTransform.transform_motion_lora_workflow } # returns stringified json of the workflow diff --git a/utils/ml_processor/comfy_workflows/motion_lora_api.json b/utils/ml_processor/comfy_workflows/motion_lora_api.json new file mode 100644 index 00000000..18ec3737 --- /dev/null +++ b/utils/ml_processor/comfy_workflows/motion_lora_api.json @@ -0,0 +1,459 @@ +{ + "1": { + "inputs": { + "ckpt_name": "Deliberate_v2.safetensors", + "scheduler": "DDIMScheduler", + "use_xformers": false, + "additional_models": [ + "2", + 0 + ] + }, + "class_type": "ADMD_CheckpointLoader", + "_meta": { + "title": "ADMD_CheckpointLoader" + } + }, + "2": { + "inputs": { + "motion_module": "v3_sd15_mm.ckpt", + "use_adapter_lora": true, + "optional_adapter_lora": "v3_sd15_adapter.ckpt" + }, + "class_type": "ADMD_AdditionalModelSelect", + "_meta": { + "title": "ADMD_AdditionalModelSelect" + } + }, + "4": { + "inputs": { + "lora_name": "motion_director_lora", + "prompt": "car is driving in desert", + "max_train_steps": 500, + "learning_rate": 0.0005, + "learning_rate_spatial": 0.0001, + "lora_rank": 64, + "seed": 817550656066000, + "optimization_method": "Lion", + "include_resnet": true, + "pipeline": [ + "1", + 0 + ], + "images": [ + "6", + 0 + ] + }, + "class_type": "ADMD_InitializeTraining", + "_meta": { + "title": "ADMD_InitializeTraining" + } + }, + "5": { + "inputs": { + "video": "AD__00003_.mp4", + "force_rate": 0, + "force_size": "Disabled", + "custom_width": 512, + "custom_height": 512, + "frame_load_cap": 16, + "skip_first_frames": 0, + "select_every_nth": 2 + }, + "class_type": "VHS_LoadVideo", + "_meta": { + "title": "Load Video (Upload) 🎥🅥🅗🅢" + } + }, + "6": { + "inputs": { + "width": 512, + "height": 512, + "interpolation": "nearest", + "keep_proportion": false, + "condition": "always", + "multiple_of": 0, + "image": [ + "5", + 0 + ] + }, + "class_type": "ImageResize+", + "_meta": { + "title": "🔧 Image Resize" + } + }, + "7": { + "inputs": { + "frame_rate": 10, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "image/gif", + "pingpong": false, + "save_output": true, + "images": [ + "4", + 0 + ] + }, + "class_type": "VHS_VideoCombine", + "_meta": { + "title": "Video Combine 🎥🅥🅗🅢" + } + }, + "8": { + "inputs": { + "steps": 100, + "admd_pipeline": [ + "4", + 1 + ] + }, + "class_type": "ADMD_TrainLora", + "_meta": { + "title": "ADMD_TrainLora" + } + }, + "9": { + "inputs": { + "validation_settings": [ + "15", + 0 + ], + "admd_pipeline": [ + "8", + 0 + ] + }, + "class_type": "ADMD_ValidationSampler", + "_meta": { + "title": "ADMD_ValidationSampler" + } + }, + "11": { + "inputs": { + "frame_rate": 10, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "image/gif", + "pingpong": false, + "save_output": true, + "images": [ + "9", + 1 + ] + }, + "class_type": "VHS_VideoCombine", + "_meta": { + "title": "Video Combine 🎥🅥🅗🅢" + } + }, + "12": { + "inputs": { + "steps": 100, + "admd_pipeline": [ + "54", + 1 + ] + }, + "class_type": "ADMD_TrainLora", + "_meta": { + "title": "ADMD_TrainLora" + } + }, + "13": { + "inputs": { + "validation_settings": [ + "15", + 0 + ], + "admd_pipeline": [ + "12", + 0 + ] + }, + "class_type": "ADMD_ValidationSampler", + "_meta": { + "title": "ADMD_ValidationSampler" + } + }, + "14": { + "inputs": { + "frame_rate": 10, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "image/gif", + "pingpong": false, + "save_output": true, + "images": [ + "13", + 1 + ] + }, + "class_type": "VHS_VideoCombine", + "_meta": { + "title": "Video Combine 🎥🅥🅗🅢" + } + }, + "15": { + "inputs": { + "seed": 584873287563136, + "inference_steps": 25, + "guidance_scale": 8, + "spatial_scale": 0.5, + "validation_prompt": "a hippo is walking in a jungle" + }, + "class_type": "ADMD_ValidationSettings", + "_meta": { + "title": "ADMD_ValidationSettings" + } + }, + "16": { + "inputs": { + "input": [ + "5", + 1 + ], + "output": "" + }, + "class_type": "Display Int (rgthree)", + "_meta": { + "title": "Display Int (rgthree)" + } + }, + "24": { + "inputs": { + "steps": 100, + "admd_pipeline": [ + "56", + 1 + ] + }, + "class_type": "ADMD_TrainLora", + "_meta": { + "title": "ADMD_TrainLora" + } + }, + "25": { + "inputs": { + "validation_settings": [ + "15", + 0 + ], + "admd_pipeline": [ + "24", + 0 + ] + }, + "class_type": "ADMD_ValidationSampler", + "_meta": { + "title": "ADMD_ValidationSampler" + } + }, + "26": { + "inputs": { + "frame_rate": 10, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "image/gif", + "pingpong": false, + "save_output": true, + "images": [ + "25", + 1 + ] + }, + "class_type": "VHS_VideoCombine", + "_meta": { + "title": "Video Combine 🎥🅥🅗🅢" + } + }, + "28": { + "inputs": { + "steps": 100, + "admd_pipeline": [ + "58", + 1 + ] + }, + "class_type": "ADMD_TrainLora", + "_meta": { + "title": "ADMD_TrainLora" + } + }, + "29": { + "inputs": { + "validation_settings": [ + "15", + 0 + ], + "admd_pipeline": [ + "28", + 0 + ] + }, + "class_type": "ADMD_ValidationSampler", + "_meta": { + "title": "ADMD_ValidationSampler" + } + }, + "30": { + "inputs": { + "frame_rate": 10, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "image/gif", + "pingpong": false, + "save_output": true, + "images": [ + "29", + 1 + ] + }, + "class_type": "VHS_VideoCombine", + "_meta": { + "title": "Video Combine 🎥🅥🅗🅢" + } + }, + "32": { + "inputs": { + "steps": 100, + "admd_pipeline": [ + "60", + 1 + ] + }, + "class_type": "ADMD_TrainLora", + "_meta": { + "title": "ADMD_TrainLora" + } + }, + "33": { + "inputs": { + "validation_settings": [ + "15", + 0 + ], + "admd_pipeline": [ + "32", + 0 + ] + }, + "class_type": "ADMD_ValidationSampler", + "_meta": { + "title": "ADMD_ValidationSampler" + } + }, + "34": { + "inputs": { + "frame_rate": 10, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "image/gif", + "pingpong": false, + "save_output": true, + "images": [ + "33", + 1 + ] + }, + "class_type": "VHS_VideoCombine", + "_meta": { + "title": "Video Combine 🎥🅥🅗🅢" + } + }, + "54": { + "inputs": { + "admd_pipeline": [ + "9", + 0 + ], + "lora_info": [ + "4", + 2 + ] + }, + "class_type": "ADMD_SaveLora", + "_meta": { + "title": "ADMD_SaveLora" + } + }, + "56": { + "inputs": { + "admd_pipeline": [ + "13", + 0 + ], + "lora_info": [ + "4", + 2 + ] + }, + "class_type": "ADMD_SaveLora", + "_meta": { + "title": "ADMD_SaveLora" + } + }, + "58": { + "inputs": { + "admd_pipeline": [ + "25", + 0 + ], + "lora_info": [ + "4", + 2 + ] + }, + "class_type": "ADMD_SaveLora", + "_meta": { + "title": "ADMD_SaveLora" + } + }, + "60": { + "inputs": { + "admd_pipeline": [ + "29", + 0 + ], + "lora_info": [ + "4", + 2 + ] + }, + "class_type": "ADMD_SaveLora", + "_meta": { + "title": "ADMD_SaveLora" + } + }, + "62": { + "inputs": { + "admd_pipeline": [ + "33", + 0 + ], + "lora_info": [ + "4", + 2 + ] + }, + "class_type": "ADMD_SaveLora", + "_meta": { + "title": "ADMD_SaveLora" + } + }, + "64": { + "inputs": { + "output": "", + "source": [ + "62", + 0 + ] + }, + "class_type": "Display Any (rgthree)", + "_meta": { + "title": "Display Any (rgthree)" + } + } + } \ No newline at end of file diff --git a/utils/ml_processor/comfy_workflows/motion_lora_test_api.json b/utils/ml_processor/comfy_workflows/motion_lora_test_api.json new file mode 100644 index 00000000..46f6008b --- /dev/null +++ b/utils/ml_processor/comfy_workflows/motion_lora_test_api.json @@ -0,0 +1,270 @@ +{ + "1": { + "inputs": { + "ckpt_name": "Deliberate_v2.safetensors", + "scheduler": "DDIMScheduler", + "use_xformers": false, + "additional_models": [ + "2", + 0 + ] + }, + "class_type": "ADMD_CheckpointLoader", + "_meta": { + "title": "ADMD_CheckpointLoader" + } + }, + "2": { + "inputs": { + "motion_module": "v3_sd15_mm.ckpt", + "use_adapter_lora": true, + "optional_adapter_lora": "v3_sd15_adapter.ckpt" + }, + "class_type": "ADMD_AdditionalModelSelect", + "_meta": { + "title": "ADMD_AdditionalModelSelect" + } + }, + "4": { + "inputs": { + "lora_name": "motion_director_lora", + "prompt": "car is driving in desert", + "max_train_steps": 500, + "learning_rate": 0.0005, + "learning_rate_spatial": 0.0001, + "lora_rank": 64, + "seed": 101623484429674, + "optimization_method": "Lion", + "include_resnet": true, + "pipeline": [ + "1", + 0 + ], + "images": [ + "6", + 0 + ] + }, + "class_type": "ADMD_InitializeTraining", + "_meta": { + "title": "ADMD_InitializeTraining" + } + }, + "5": { + "inputs": { + "video": "test_sm.mp4", + "force_rate": 0, + "force_size": "Disabled", + "custom_width": 512, + "custom_height": 512, + "frame_load_cap": 16, + "skip_first_frames": 0, + "select_every_nth": 2 + }, + "class_type": "VHS_LoadVideo", + "_meta": { + "title": "Load Video (Upload) 🎥🅥🅗🅢" + } + }, + "6": { + "inputs": { + "width": 512, + "height": 512, + "interpolation": "nearest", + "keep_proportion": false, + "condition": "always", + "multiple_of": 0, + "image": [ + "5", + 0 + ] + }, + "class_type": "ImageResize+", + "_meta": { + "title": "🔧 Image Resize" + } + }, + "7": { + "inputs": { + "frame_rate": 10, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "image/gif", + "pingpong": false, + "save_output": true, + "images": [ + "4", + 0 + ] + }, + "class_type": "VHS_VideoCombine", + "_meta": { + "title": "Video Combine 🎥🅥🅗🅢" + } + }, + "8": { + "inputs": { + "steps": 5, + "admd_pipeline": [ + "4", + 1 + ] + }, + "class_type": "ADMD_TrainLora", + "_meta": { + "title": "ADMD_TrainLora" + } + }, + "9": { + "inputs": { + "validation_settings": [ + "15", + 0 + ], + "admd_pipeline": [ + "8", + 0 + ] + }, + "class_type": "ADMD_ValidationSampler", + "_meta": { + "title": "ADMD_ValidationSampler" + } + }, + "11": { + "inputs": { + "frame_rate": 10, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "image/gif", + "pingpong": false, + "save_output": true, + "images": [ + "9", + 1 + ] + }, + "class_type": "VHS_VideoCombine", + "_meta": { + "title": "Video Combine 🎥🅥🅗🅢" + } + }, + "12": { + "inputs": { + "steps": 5, + "admd_pipeline": [ + "54", + 1 + ] + }, + "class_type": "ADMD_TrainLora", + "_meta": { + "title": "ADMD_TrainLora" + } + }, + "13": { + "inputs": { + "validation_settings": [ + "15", + 0 + ], + "admd_pipeline": [ + "12", + 0 + ] + }, + "class_type": "ADMD_ValidationSampler", + "_meta": { + "title": "ADMD_ValidationSampler" + } + }, + "14": { + "inputs": { + "frame_rate": 10, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "image/gif", + "pingpong": false, + "save_output": true, + "images": [ + "13", + 1 + ] + }, + "class_type": "VHS_VideoCombine", + "_meta": { + "title": "Video Combine 🎥🅥🅗🅢" + } + }, + "15": { + "inputs": { + "seed": 391486873294625, + "inference_steps": 25, + "guidance_scale": 8, + "spatial_scale": 0.5, + "validation_prompt": "a hippo is walking in a jungle" + }, + "class_type": "ADMD_ValidationSettings", + "_meta": { + "title": "ADMD_ValidationSettings" + } + }, + "16": { + "inputs": { + "input": [ + "5", + 1 + ], + "output": "" + }, + "class_type": "Display Int (rgthree)", + "_meta": { + "title": "Display Int (rgthree)" + } + }, + "54": { + "inputs": { + "admd_pipeline": [ + "9", + 0 + ], + "lora_info": [ + "4", + 2 + ] + }, + "class_type": "ADMD_SaveLora", + "_meta": { + "title": "ADMD_SaveLora" + } + }, + "56": { + "inputs": { + "admd_pipeline": [ + "13", + 0 + ], + "lora_info": [ + "4", + 2 + ] + }, + "class_type": "ADMD_SaveLora", + "_meta": { + "title": "ADMD_SaveLora" + } + }, + "64": { + "inputs": { + "output": "", + "source": [ + "56", + 0 + ] + }, + "class_type": "Display Any (rgthree)", + "_meta": { + "title": "Display Any (rgthree)" + } + } +} \ No newline at end of file diff --git a/utils/ml_processor/constants.py b/utils/ml_processor/constants.py index 42c36127..73ea4d1b 100644 --- a/utils/ml_processor/constants.py +++ b/utils/ml_processor/constants.py @@ -15,6 +15,7 @@ class ComfyWorkflow(ExtendedEnum): STEERABLE_MOTION = "steerable_motion" SDXL_IMG2IMG = "sdxl_img2img" UPSCALER = "upscale" + MOTION_LORA = "motion_lora" @dataclass class MLModel: @@ -93,6 +94,7 @@ class ML_MODEL: ipadapter_face = MLModel(ComfyRunnerModel.name, ComfyRunnerModel.version, ComfyWorkflow.IP_ADAPTER_FACE) ipadapter_face_plus = MLModel(ComfyRunnerModel.name, ComfyRunnerModel.version, ComfyWorkflow.IP_ADAPTER_FACE_PLUS) video_upscaler = MLModel(ComfyRunnerModel.name, ComfyRunnerModel.version, ComfyWorkflow.UPSCALER) + motion_lora_trainer = MLModel(ComfyRunnerModel.name, ComfyRunnerModel.version, ComfyWorkflow.MOTION_LORA) @staticmethod From d31443d0726635ad114023a35a1a793b7c6102f7 Mon Sep 17 00:00:00 2001 From: peteromallet Date: Tue, 19 Mar 2024 16:20:08 +0100 Subject: [PATCH 29/43] Adding dynamicrafter --- text.json | 1 + .../widgets/animation_style_element.py | 10 +++++--- utils/media_processor/interpolator.py | 8 +++--- utils/ml_processor/comfy_data_transform.py | 25 +++++++++++++------ 4 files changed, 30 insertions(+), 14 deletions(-) create mode 100644 text.json diff --git a/text.json b/text.json new file mode 100644 index 00000000..03799414 --- /dev/null +++ b/text.json @@ -0,0 +1 @@ +{"2": {"inputs": {"frame_rate": 12, "loop_count": 0, "filename_prefix": "AnimateDiff", "format": "video/h264-mp4", "pix_fmt": "yuv420p", "crf": 19, "save_metadata": true, "pingpong": false, "save_output": true, "images": ["34", 0]}, "class_type": "VHS_VideoCombine", "_meta": {"title": "Video Combine \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62"}}, "11": {"inputs": {"ckpt_name": "dynamicrafter_512_interp_v1.ckpt", "dtype": "auto"}, "class_type": "DynamiCrafterModelLoader", "_meta": {"title": "DynamiCrafterModelLoader"}}, "12": {"inputs": {"steps": 50, "cfg": 4, "eta": 1, "frames": 16, "prompt": "tasdas", "seed": 848590531769060, "fs": 10, "keep_model_loaded": true, "vae_dtype": "auto", "model": ["11", 0], "images": ["15", 0]}, "class_type": "DynamiCrafterBatchInterpolation", "_meta": {"title": "DynamiCrafterBatchInterpolation"}}, "15": {"inputs": {"image1": ["16", 0], "image2": ["17", 0]}, "class_type": "ImageBatch", "_meta": {"title": "Batch Images"}}, "16": {"inputs": {"image": "fe43291a-ae2e-48a5-bc0d-a73ad96c9785.png", "upload": "image"}, "class_type": "LoadImage", "_meta": {"title": "Load Image"}}, "17": {"inputs": {"image": "707dd8bd-23cd-498f-ad5a-09da482a11de.png", "upload": "image"}, "class_type": "LoadImage", "_meta": {"title": "Load Image"}}, "34": {"inputs": {"ckpt_name": "film_net_fp32.pt", "clear_cache_after_n_frames": 10, "multiplier": 3, "frames": ["12", 0]}, "class_type": "FILM VFI", "_meta": {"title": "FILM VFI"}}} \ No newline at end of file diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index 6e0d0aa8..0e9dc491 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -712,7 +712,7 @@ def update_prompt(): elif type_of_animation == "2-Image Realistic Interpolation": col1, col2, col3 = st.columns([1, 1, 1]) - for i in range(0, len(timing_list), 2): # Iterate two items at a time + for i in range(0, 2, 2): # Iterate two items at a time if i < len(timing_list): timing_first = timing_list[i] if timing_first.primary_image and timing_first.primary_image.location: @@ -731,6 +731,7 @@ def update_prompt(): variant_count = 1 # Assuming a default value for variant_count, adjust as necessary vid_quality = "full" # Assuming full quality, adjust as necessary based on your requirements + position = "generate_vid" if "generate_vid_generate_inference" in st.session_state and st.session_state["generate_vid_generate_inference"]: @@ -769,12 +770,15 @@ def update_prompt(): # Buttons for adding to queue or backlog, assuming these are still relevant btn1, btn2, btn3 = st.columns([1, 1, 1]) + backlog_no_update = {f'{shot_uuid}_backlog_enabled': False} with btn1: - st.button("Add to queue", key="generate_2_image_interpolation_clip", disabled=False, help="Generate the interpolation clip based on the two images and described motion.", on_click=lambda: toggle_generate_inference("generate_vid"), type="primary", use_container_width=True) + st.button("Add to queue", key="generate_animation_clip", disabled=False, help="Generate the interpolation clip based on the two images and described motion.", on_click=lambda: toggle_generate_inference(position, **backlog_no_update), type="primary", use_container_width=True) backlog_update = {f'{shot_uuid}_backlog_enabled': True} with btn2: - st.button("Add to backlog", key="generate_2_image_interpolation_clip_backlog", disabled=False, help="Add the 2-Image Realistic Interpolation to the backlog.", on_click=lambda: toggle_generate_inference("generate_vid", **backlog_update), type="secondary") + st.button("Add to backlog", key="generate_animation_clip_backlog", disabled=False, help="Add the 2-Image Realistic Interpolation to the backlog.", on_click=lambda: toggle_generate_inference(position, **backlog_update), type="secondary") + + # --------------------- METHODS ----------------------- def toggle_generate_inference(position, **kwargs): for k,v in kwargs.items(): diff --git a/utils/media_processor/interpolator.py b/utils/media_processor/interpolator.py index deb1bfb0..fb4c568d 100644 --- a/utils/media_processor/interpolator.py +++ b/utils/media_processor/interpolator.py @@ -150,7 +150,9 @@ def video_through_direct_morphing(img_location_list, settings, variant_count, qu if True: # NOTE: @Peter these are all the settings you passed in from the UI sm_data = { - "width": settings['width'] + "width": settings['width'], + "height": settings['height'], + "prompt": settings["prompt"] } for idx, img_uuid in enumerate(settings['file_uuid_list']): @@ -166,8 +168,8 @@ def video_through_direct_morphing(img_location_list, settings, variant_count, qu strength=None, adapter_type=None, negative_prompt="", - height=512, - width=512, + height=settings['height'], + width=settings['width'], image_uuid=None, mask_uuid=None, data=sm_data diff --git a/utils/ml_processor/comfy_data_transform.py b/utils/ml_processor/comfy_data_transform.py index 06d07621..02e9578c 100644 --- a/utils/ml_processor/comfy_data_transform.py +++ b/utils/ml_processor/comfy_data_transform.py @@ -404,18 +404,27 @@ def update_json_with_loras(json_data, loras): def transform_dynamicrafter_workflow(query: MLQueryObject): data_repo = DataRepo() workflow, output_node_ids = ComfyDataTransform.get_workflow_json(ComfyWorkflow.DYNAMICRAFTER) - sm_data = query.data.get('data', {}) - # get the first images from settings - it was put there in a list like this: settings.update(file_uuid_list=[t.primary_image.uuid for t in timing_list]) - import streamlit as st - st.write(sm_data) - # wriet file_image_0001_uuid to file_image_0005_uuid - st.write(sm_data.get('file_image_0001_uuid')) + sm_data = query.data.get('data', {}) + image_1 = data_repo.get_file_from_uuid(sm_data.get('file_image_0001_uuid')) image_2 = data_repo.get_file_from_uuid(sm_data.get('file_image_0002_uuid')) - - + + workflow['16']['inputs']['image'] = image_1.filename + workflow['17']['inputs']['image'] = image_2.filename + workflow['12']['inputs']['seed'] = random_seed() + workflow['12']['inputs']['steps'] = 50 + workflow['12']['inputs']['cfg'] = 4 + workflow['12']['inputs']['prompt'] = sm_data.get('prompt') + extra_models_list = [ + { + "filename": "dynamicrafter_512_interp_v1.ckpt", + "url": "https://huggingface.co/Doubiiu/DynamiCrafter_512_Interp/resolve/main/model.ckpt?download=true", + "dest": "./ComfyUI/models/checkpoints/" + }] + + return json.dumps(workflow), output_node_ids, extra_models_list, [] @staticmethod def transform_video_upscaler_workflow(query: MLQueryObject): From 17650e7beb36e822f3ef3c6faf45c0dc357cd06e Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Tue, 19 Mar 2024 18:21:30 +0000 Subject: [PATCH 30/43] minor ux changes --- ui_components/components/animate_shot_page.py | 9 +--- .../widgets/animation_style_element.py | 50 +++++++++---------- ui_components/widgets/display_element.py | 17 ++++++- ui_components/widgets/sidebar_logger.py | 4 ++ .../widgets/variant_comparison_grid.py | 43 ++++++++++------ 5 files changed, 74 insertions(+), 49 deletions(-) diff --git a/ui_components/components/animate_shot_page.py b/ui_components/components/animate_shot_page.py index 6b6adb82..4e56d10f 100644 --- a/ui_components/components/animate_shot_page.py +++ b/ui_components/components/animate_shot_page.py @@ -8,11 +8,7 @@ def animate_shot_page(shot_uuid: str, h2): data_repo = DataRepo() shot = data_repo.get_shot_from_uuid(shot_uuid) - - - with st.sidebar: - frame_selector_widget(show_frame_selector=False) st.write("") @@ -20,7 +16,6 @@ def animate_shot_page(shot_uuid: str, h2): # if st_memory.toggle("Open", value=True, key="generaton_log_toggle"): sidebar_logger(st.session_state["shot_uuid"]) - st.write("") # frame_view(view='Video',show_current_frames=False) @@ -28,7 +23,7 @@ def animate_shot_page(shot_uuid: str, h2): st.markdown("***") variant_comparison_grid(st.session_state['shot_uuid'], stage="Shots") - animation_style_element(st.session_state['shot_uuid']) - st.markdown("***") \ No newline at end of file + st.markdown("***") + \ No newline at end of file diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index 388163cc..448c87d9 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -13,6 +13,7 @@ from ui_components.methods.animation_style_methods import load_shot_settings from ui_components.methods.file_methods import get_files_in_a_directory, get_media_dimensions, save_or_host_file from ui_components.methods.video_methods import create_single_interpolated_clip +from ui_components.widgets.display_element import display_motion_lora from utils.data_repo.data_repo import DataRepo from utils.local_storage.local_storage import read_from_motion_lora_local_db from utils.ml_processor.motion_module import AnimateDiffCheckpoint @@ -301,6 +302,23 @@ def update_model(): lora_data = [] lora_file_dest = "ComfyUI/models/animatediff_motion_lora" + lora_file_links = { + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/1000_jeep_driving_r32_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/250_tony_stark_r64_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/250_train_r128_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/300_car_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_car_desert_48_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_car_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_jeep_driving_r32_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_man_running_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_rotation_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/750_jeep_driving_r32_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/300_zooming_in_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_cat_walking_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_playing_banjo_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_woman_dancing_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_zooming_out_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif" + } # ---------------- ADD LORA ----------------- with tab1: @@ -312,7 +330,6 @@ def update_model(): if st.button("Check again", key="check_again"): st.rerun() else: - filename_video_dict = read_from_motion_lora_local_db() # cleaning empty lora vals for idx, lora in enumerate(st.session_state[f"lora_data_{shot.uuid}"]): if not lora: @@ -340,8 +357,7 @@ def update_model(): st.rerun() # displaying preview - if motion_lora and motion_lora in filename_video_dict: - st.image(filename_video_dict[motion_lora]) + display_motion_lora(motion_lora, lora_file_links) if len(st.session_state[f"lora_data_{shot.uuid}"]) == 0: text = "Add a LoRA" @@ -362,33 +378,18 @@ def update_model(): where_to_download_from = st.radio("Where would you like to get the LoRA from?", options=["Our list", "From a URL","Upload a LoRA"], key="where_to_download_from", horizontal=True) if where_to_download_from == "Our list": - with text1: - file_links = [ - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/1000_jeep_driving_r32_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/250_tony_stark_r64_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/250_train_r128_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/300_car_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_car_desert_48_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_car_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_jeep_driving_r32_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_man_running_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_rotation_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/750_jeep_driving_r32_temporal_unet.safetensors", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/300_zooming_in_temporal_unet.safetensors", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_cat_walking_temporal_unet.safetensors", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_playing_banjo_temporal_unet.safetensors", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_woman_dancing_temporal_unet.safetensors", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_zooming_out_temporal_unet.safetensors" - ] - - selected_lora_optn = st.selectbox("Which LoRA would you like to download?", options=[a.split("/")[-1] for a in file_links], key="selected_lora") + with text1: + selected_lora_optn = st.selectbox("Which LoRA would you like to download?", options=[a.split("/")[-1] for a in lora_file_links], key="selected_lora") + # Display selected Lora + display_motion_lora(selected_lora_optn, lora_file_links) + if st.button("Download LoRA", key="download_lora"): with st.spinner("Downloading LoRA..."): save_directory = "ComfyUI/models/animatediff_motion_lora" os.makedirs(save_directory, exist_ok=True) # Create the directory if it doesn't exist # Extract the filename from the URL - selected_lora = next((ele for idx, ele in enumerate(file_links) if selected_lora_optn in ele), None) + selected_lora, lora_idx = next(((ele, idx) for idx, ele in enumerate(lora_file_links.keys()) if selected_lora_optn in ele), None) filename = selected_lora.split("/")[-1] save_path = os.path.join(save_directory, filename) @@ -413,7 +414,6 @@ def update_model(): st.error("Failed to download LoRA") elif where_to_download_from == "From a URL": - with text1: text_input = st.text_input("Enter the URL of the LoRA", key="text_input_lora") with text2: diff --git a/ui_components/widgets/display_element.py b/ui_components/widgets/display_element.py index 1e2bcac6..5fa7cb21 100644 --- a/ui_components/widgets/display_element.py +++ b/ui_components/widgets/display_element.py @@ -2,6 +2,7 @@ import streamlit as st from ui_components.methods.file_methods import get_file_size from ui_components.models import InternalFileObject +from utils.local_storage.local_storage import read_from_motion_lora_local_db def individual_video_display_element(file: Union[InternalFileObject, str]): @@ -9,4 +10,18 @@ def individual_video_display_element(file: Union[InternalFileObject, str]): if file_location: st.video(file_location, format='mp4', start_time=0) if get_file_size(file_location) < 5 else st.info("Video file too large to display") else: - st.error("No video present") \ No newline at end of file + st.error("No video present") + + +def display_motion_lora(motion_lora, lora_file_dict = {}): + filename_video_dict = read_from_motion_lora_local_db() + + if motion_lora and motion_lora in filename_video_dict: + st.image(filename_video_dict[motion_lora]) + else: + loras = [ele.split("/")[-1] for ele in lora_file_dict.keys()] + idx = loras.index(motion_lora) + if idx >= 0: + st.image(lora_file_dict[list(lora_file_dict.keys())[idx]]) + else: + st.warning("No preview video available") \ No newline at end of file diff --git a/ui_components/widgets/sidebar_logger.py b/ui_components/widgets/sidebar_logger.py index abe316da..df4b530f 100644 --- a/ui_components/widgets/sidebar_logger.py +++ b/ui_components/widgets/sidebar_logger.py @@ -194,6 +194,10 @@ def sidebar_logger(shot_uuid): st.session_state['frame_styling_view_type'] = "Explorer" st.rerun() + + shot_data = origin_data.get("shot_data", None) + if shot_data: + pass # add jump_to_shot element # if it's not the last log #if _ != len(log_list) - 1: diff --git a/ui_components/widgets/variant_comparison_grid.py b/ui_components/widgets/variant_comparison_grid.py index 5676d7f7..0bb8c6e3 100644 --- a/ui_components/widgets/variant_comparison_grid.py +++ b/ui_components/widgets/variant_comparison_grid.py @@ -18,7 +18,7 @@ from ui_components.widgets.animation_style_element import update_interpolation_settings from utils import st_memory from utils.data_repo.data_repo import DataRepo -from utils.ml_processor.constants import ML_MODEL +from utils.ml_processor.constants import ML_MODEL, ComfyWorkflow @@ -78,18 +78,21 @@ def variant_comparison_grid(ele_uuid, stage=CreativeProcessType.MOTION.value): if stage == CreativeProcessType.MOTION.value: if current_variant != -1 and variants[current_variant]: individual_video_display_element(variants[current_variant]) - with st.expander("Upscale settings", expanded=False): - styling_model, upscaler_type, upscale_factor, upscale_strength, promote_to_main_variant = upscale_settings() - if st.button("Upscale Main Variant", key=f"upscale_main_variant_{shot_uuid}", help="Upscale the main variant with the selected settings", use_container_width=True): - upscale_video( - shot_uuid, - styling_model, - upscaler_type, - upscale_factor, - upscale_strength, - promote_to_main_variant - ) - + + if not is_upscaled_video(variants[current_variant]): + with st.expander("Upscale settings", expanded=False): + styling_model, upscaler_type, upscale_factor, upscale_strength, promote_to_main_variant = upscale_settings() + if st.button("Upscale Main Variant", key=f"upscale_main_variant_{shot_uuid}", help="Upscale the main variant with the selected settings", use_container_width=True): + upscale_video( + shot_uuid, + styling_model, + upscaler_type, + upscale_factor, + upscale_strength, + promote_to_main_variant + ) + else: + st.info("Upscaled video") create_video_download_button(variants[current_variant].location, tag="var_compare") variant_inference_detail_element(variants[current_variant], stage, shot_uuid, timing_list, tag="var_compare") @@ -123,6 +126,9 @@ def variant_comparison_grid(ele_uuid, stage=CreativeProcessType.MOTION.value): individual_video_display_element(variants[variant_index]) else: st.error("No video present") + + if is_upscaled_video(variants[variant_index]): + st.info("Upscaled video") create_video_download_button(variants[variant_index].location, tag="var_details") variant_inference_detail_element(variants[variant_index], stage, shot_uuid, timing_list, tag="var_details") @@ -143,7 +149,14 @@ def variant_comparison_grid(ele_uuid, stage=CreativeProcessType.MOTION.value): st.markdown("***") # Add markdown line cols = st.columns(num_columns) # Prepare for the next row # Add markdown line if this is not the last variant in page_indices - + +def is_upscaled_video(variant: InternalFileObject): + log = variant.inference_log + if log.output_details and json.loads(log.output_details).get("model_name", "") == ComfyWorkflow.UPSCALER.value: + return True + return False + + def image_variant_details(variant: InternalFileObject): with st.expander("Inference Details", expanded=False): if variant.inference_params and 'query_dict' in variant.inference_params: @@ -213,8 +226,6 @@ def variant_inference_detail_element(variant: InternalFileObject, stage, shot_uu st.markdown("##### Frame settings ---") st.write("To see the settings for each frame, click on the 'Boot up settings' button above and they'll load below.") st.button("Close settings", key=f"close_{tag}_{variant.name}", help="Close this section", use_container_width=True) - - if stage != CreativeProcessType.MOTION.value: h1, h2 = st.columns([1, 1]) From cc05eda62dab837c2580f279c58c05aedb3622a4 Mon Sep 17 00:00:00 2001 From: peteromallet Date: Wed, 20 Mar 2024 01:05:55 +0100 Subject: [PATCH 31/43] Structure image --- banodoco_settings.py | 86 +++++++++------- text.json | 1 - .../widgets/animation_style_element.py | 98 ++++++++++++++----- .../widgets/variant_comparison_grid.py | 1 + utils/media_processor/interpolator.py | 4 +- utils/ml_processor/comfy_data_transform.py | 60 +++++++++++- 6 files changed, 182 insertions(+), 68 deletions(-) delete mode 100644 text.json diff --git a/banodoco_settings.py b/banodoco_settings.py index d324a877..983903af 100644 --- a/banodoco_settings.py +++ b/banodoco_settings.py @@ -98,7 +98,18 @@ def create_new_user_data(user: InternalUserObject): def create_new_project(user: InternalUserObject, project_name: str, width=512, height=512): data_repo = DataRepo() - # creating a new project for this user + # Use get_all_project_list to check for existing projects for the user + existing_projects_list = data_repo.get_all_project_list(user.uuid) + + # If no projects are returned, set existing_projects_list to an empty list + existing_projects_list = existing_projects_list if existing_projects_list is not None else [] + + # Determine if initial frames should be added based on whether any projects are returned + add_initial_frames = len(existing_projects_list) == 0 + + # Determine if initial frames should be added based on whether any projects are returned + + # Proceed with creating a new project project_data = { "user_id": user.uuid, "name": project_name, @@ -115,43 +126,46 @@ def create_new_project(user: InternalUserObject, project_name: str, width=512, h } shot = data_repo.create_shot(**shot_data) - - # create timings for init_images - init_images_path = os.path.join("sample_assets", "sample_images", "init_frames") - init_image_list = list_files_in_folder(init_images_path) st.session_state["project_uuid"] = project.uuid - - for idx, img_path in enumerate(init_image_list): - img_path = os.path.join(init_images_path, img_path) - img = Image.open(img_path) - img = img.resize((width, height)) - - unique_file_name = f"{str(uuid.uuid4())}.png" - file_location = f"videos/{project.uuid}/resources/prompt_images/{unique_file_name}" - hosted_url = save_or_host_file(img, file_location, mime_type='image/png', dim=(width, height)) - file_data = { - "name": str(uuid.uuid4()), - "type": InternalFileType.IMAGE.value, - "project_id": project.uuid, - "dim": (width, height), - } - - if hosted_url: - file_data.update({'hosted_url': hosted_url}) - else: - file_data.update({'local_path': file_location}) - - source_image = data_repo.create_file(**file_data) - timing_data = { - "frame_time": 0.0, - "aux_frame_index": idx, - "source_image_id": source_image.uuid, - "shot_id": shot.uuid, - } - timing: InternalFrameTimingObject = data_repo.create_timing(**timing_data) - - add_image_variant(source_image.uuid, timing.uuid) + # Add initial frames only if there are no existing projects (i.e., it's the user's first project) + if add_initial_frames: + init_images_path = os.path.join("sample_assets", "sample_images", "init_frames") + init_image_list = list_files_in_folder(init_images_path) + image_extensions = {'.png', '.jpg', '.jpeg', '.gif'} + init_image_list = [img for img in init_image_list if os.path.splitext(img)[1].lower() in image_extensions] + + for idx, img_path in enumerate(init_image_list): + img_path = os.path.join(init_images_path, img_path) + img = Image.open(img_path) + img = img.resize((width, height)) + + unique_file_name = f"{str(uuid.uuid4())}.png" + file_location = f"videos/{project.uuid}/resources/prompt_images/{unique_file_name}" + hosted_url = save_or_host_file(img, file_location, mime_type='image/png', dim=(width, height)) + file_data = { + "name": str(uuid.uuid4()), + "type": InternalFileType.IMAGE.value, + "project_id": project.uuid, + "dim": (width, height), + } + + if hosted_url: + file_data.update({'hosted_url': hosted_url}) + else: + file_data.update({'local_path': file_location}) + + source_image = data_repo.create_file(**file_data) + + timing_data = { + "frame_time": 0.0, + "aux_frame_index": idx, + "source_image_id": source_image.uuid, + "shot_id": shot.uuid, + } + timing: InternalFrameTimingObject = data_repo.create_timing(**timing_data) + + add_image_variant(source_image.uuid, timing.uuid) # create default ai models model_list = create_predefined_models(user) diff --git a/text.json b/text.json deleted file mode 100644 index 03799414..00000000 --- a/text.json +++ /dev/null @@ -1 +0,0 @@ -{"2": {"inputs": {"frame_rate": 12, "loop_count": 0, "filename_prefix": "AnimateDiff", "format": "video/h264-mp4", "pix_fmt": "yuv420p", "crf": 19, "save_metadata": true, "pingpong": false, "save_output": true, "images": ["34", 0]}, "class_type": "VHS_VideoCombine", "_meta": {"title": "Video Combine \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62"}}, "11": {"inputs": {"ckpt_name": "dynamicrafter_512_interp_v1.ckpt", "dtype": "auto"}, "class_type": "DynamiCrafterModelLoader", "_meta": {"title": "DynamiCrafterModelLoader"}}, "12": {"inputs": {"steps": 50, "cfg": 4, "eta": 1, "frames": 16, "prompt": "tasdas", "seed": 848590531769060, "fs": 10, "keep_model_loaded": true, "vae_dtype": "auto", "model": ["11", 0], "images": ["15", 0]}, "class_type": "DynamiCrafterBatchInterpolation", "_meta": {"title": "DynamiCrafterBatchInterpolation"}}, "15": {"inputs": {"image1": ["16", 0], "image2": ["17", 0]}, "class_type": "ImageBatch", "_meta": {"title": "Batch Images"}}, "16": {"inputs": {"image": "fe43291a-ae2e-48a5-bc0d-a73ad96c9785.png", "upload": "image"}, "class_type": "LoadImage", "_meta": {"title": "Load Image"}}, "17": {"inputs": {"image": "707dd8bd-23cd-498f-ad5a-09da482a11de.png", "upload": "image"}, "class_type": "LoadImage", "_meta": {"title": "Load Image"}}, "34": {"inputs": {"ckpt_name": "film_net_fp32.pt", "clear_cache_after_n_frames": 10, "multiplier": 3, "frames": ["12", 0]}, "class_type": "FILM VFI", "_meta": {"title": "FILM VFI"}}} \ No newline at end of file diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index 0e9dc491..8978689e 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -7,16 +7,20 @@ from shared.constants import AnimationStyleType, AnimationToolType, InferenceParamType from ui_components.constants import DEFAULT_SHOT_MOTION_VALUES, DefaultProjectSettingParams, ShotMetaData from ui_components.methods.animation_style_methods import load_shot_settings +from ui_components.methods.common_methods import save_new_image from ui_components.methods.video_methods import create_single_interpolated_clip from utils.data_repo.data_repo import DataRepo from utils.ml_processor.motion_module import AnimateDiffCheckpoint from ui_components.models import InternalFrameTimingObject, InternalShotObject +from ui_components.methods.file_methods import save_or_host_file from utils import st_memory import numpy as np import matplotlib.pyplot as plt import os import requests +from PIL import Image # import re +import uuid import re default_model = "Deliberate_v2.safetensors" @@ -41,7 +45,7 @@ def animation_style_element(shot_uuid): st.markdown("### 🎥 Generate animations") st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") - type_of_animation = st.radio("What type of animation would you like to generate?", options=["Batch Creative Interpolation", "2-Image Realistic Interpolation"], key="type_of_animation",horizontal=True, help="**Batch Creative Interpolaton** lets you input multple images and control the motion and style of each frame - resulting in a fluid, surreal and highly-controllable motion. \n\n **2-Image Realistic Interpolation** is a simpler way to generate animations - it generates a video by interpolating between two images, and is best for realistic motion.") + type_of_animation = st_memory.radio("What type of animation would you like to generate?", options=["Batch Creative Interpolation", "2-Image Realistic Interpolation"],horizontal=True, help="**Batch Creative Interpolaton** lets you input multple images and control the motion and style of each frame - resulting in a fluid, surreal and highly-controllable motion. \n\n **2-Image Realistic Interpolation** is a simpler way to generate animations - it generates a video by interpolating between two images, and is best for realistic motion.",key=f"type_of_animation_{shot.uuid}") if type_of_animation == "Batch Creative Interpolation": @@ -452,10 +456,8 @@ def update_model(): e1, e2, e3 = st.columns([1, 1,1]) with e1: - strength_of_adherence = st.slider("How much would you like to force adherence to the input images?", min_value=0.0, max_value=1.0, step=0.01, key="strength_of_adherence", value=st.session_state[f"strength_of_adherence_value_{shot.uuid}"]) - with e2: - st.info("Higher values may cause flickering and sudden changes in the video. Lower values may cause the video to be less influenced by the input images but can lead to smoother motion and better colours.") - + strength_of_adherence = st.slider("How much would you like to force adherence to the input images?", min_value=0.0, max_value=1.0, step=0.01, key="strength_of_adherence", value=st.session_state[f"strength_of_adherence_value_{shot.uuid}"], help="Higher values may cause flickering and sudden changes in the video. Lower values may cause the video to be less influenced by the input images but can lead to smoother motion and better colours.") + f1, f2, f3 = st.columns([1, 1, 1]) with f1: overall_positive_prompt = "" @@ -483,31 +485,63 @@ def update_prompt(): st.markdown("***") st.markdown("##### Overall motion settings") - h1, h2, h3 = st.columns([0.5, 1.5, 1]) + h1, h2, h3 = st.columns([1, 0.5, 2]) with h1: # will fix this later if f"type_of_motion_context_index_{shot.uuid}" in st.session_state and isinstance(st.session_state[f"type_of_motion_context_index_{shot.uuid}"], str): st.session_state[f"type_of_motion_context_index_{shot.uuid}"] = ["Low", "Standard", "High"].index(st.session_state[f"type_of_motion_context_index_{shot.uuid}"]) - type_of_motion_context = st.radio("Type of motion context:", options=["Low", "Standard", "High"], key="type_of_motion_context", horizontal=False, index=st.session_state[f"type_of_motion_context_index_{shot.uuid}"]) + type_of_motion_context = st.radio("Type of motion context:", options=["Low", "Standard", "High"], key="type_of_motion_context", horizontal=True, index=st.session_state[f"type_of_motion_context_index_{shot.uuid}"], help="This is how much the motion will be informed by the previous and next frames. 'High' can make it smoother but increase artifacts - while 'Low' make the motion less smooth but removes artifacts. Naturally, we recommend Standard.") + + st.session_state[f"amount_of_motion_{shot.uuid}"] = st.slider("Amount of motion:", min_value=0.5, max_value=1.5, step=0.01,value=1.3, key="amount_of_motion_overall", on_change=lambda: update_motion_for_all_frames(shot.uuid, timing_list), help="You can also tweak this on an individual frame level in the advanced settings above.") + + + i1, i2, i3 = st.columns([1, 0.5, 1.5]) - with h2: - st.info("This is how much the motion will be informed by the previous and next frames. 'High' can make it smoother but increase artifacts - while 'Low' make the motion less smooth but removes artifacts. Naturally, we recommend Standard.") - st.write("") - i1, i3,_ = st.columns([1,2,1]) with i1: - amount_of_motion = st.slider("Amount of motion:", min_value=0.5, max_value=1.5, step=0.01, key="amount_of_motion", value=st.session_state[f"amount_of_motion_{shot.uuid}"]) - st.write("") - if st.button("Bulk update amount of motion", key="update_motion", help="This will update this value in all the frames"): - for idx, timing in enumerate(timing_list): - st.session_state[f'motion_during_frame_{shot.uuid}_{idx}'] = amount_of_motion - st.success("Updated amount of motion") - time.sleep(0.3) - st.rerun() - with i3: - st.write("") - st.write("") - st.info("This actually updates the motion during frames in the advanced settings above - but we put it here because it has a big impact on the video. You can scroll up to see the changes and tweak for individual frames.") + if f'structure_control_image_{shot.uuid}' not in st.session_state: + st.session_state[f"structure_control_image_{shot.uuid}"] = None + + if f"strength_of_structure_control_image_{shot.uuid}" not in st.session_state: + st.session_state[f"strength_of_structure_control_image_{shot.uuid}"] = None + control_motion_with_image = st_memory.toggle("Control motion with an image", help="This will allow you to upload images to control the motion of the video.",key=f"control_motion_with_image_{shot.uuid}") + + if control_motion_with_image: + uploaded_image = st.file_uploader("Upload images to control motion", type=["png", "jpg", "jpeg"], accept_multiple_files=False) + if st.button("Add image", key="add_images"): + if uploaded_image: + + + project_settings = data_repo.get_project_setting(shot.project.uuid) + + width, height = project_settings.width, project_settings.height + # Convert the uploaded image file to PIL Image + uploaded_image_pil = Image.open(uploaded_image) + uploaded_image_pil = uploaded_image_pil.resize((width, height)) + image = save_new_image(uploaded_image_pil, shot.project.uuid) + image_location = image.local_path + + # Update session state with the URL of the uploaded image + st.success("Image uploaded") + st.session_state[f"structure_control_image_{shot.uuid}"] = image_location + + + else: + st.warning("No images uploaded") + else: + st.session_state[f"structure_control_image_{shot.uuid}"] = None + with i2: + if st.session_state[f"structure_control_image_{shot.uuid}"]: + st.info("Control image:") + st.image(st.session_state[f"structure_control_image_{shot.uuid}"], use_column_width=True) + st.session_state[f"strength_of_structure_control_image_{shot.uuid}"] = st.slider("Strength of control image:", min_value=0.0, max_value=1.0, step=0.01, key="strength_of_structure_control_image", value=0.5, help="This is how much the control image will influence the motion of the video.") + if st.button("Remove image", key="remove_images"): + st.session_state[f"structure_control_image_{shot.uuid}"] = None + st.success("Image removed") + st.rerun() + + + type_of_frame_distribution = "dynamic" type_of_key_frame_influence = "dynamic" type_of_strength_distribution = "dynamic" @@ -523,6 +557,7 @@ def update_prompt(): motion_scale = 1.3 interpolation_style = 'ease-in-out' buffer = 4 + amount_of_motion = 1.3 (dynamic_strength_values, dynamic_key_frame_influence_values, dynamic_frame_distribution_values, @@ -570,7 +605,11 @@ def update_prompt(): animation_stype=AnimationStyleType.CREATIVE_INTERPOLATION.value, max_frames=str(dynamic_frame_distribution_values[-1]), lora_data=lora_data, - shot_data=shot_meta_data + shot_data=shot_meta_data, + structure_control_image=st.session_state[f"structure_control_image_{shot.uuid}"], + strength_of_structure_control_image=st.session_state[f"strength_of_structure_control_image_{shot.uuid}"], + + ) position = "generate_vid" @@ -726,7 +765,7 @@ def update_prompt(): st.image(timing_second.primary_image.location, use_column_width=True) with col2: - description_of_motion = st.text_area("Describe the motion you want between the frames:", key="description_of_motion") + description_of_motion = st_memory.text_area("Describe the motion you want between the frames:", key=f"description_of_motion_{shot.uuid}", value=st.session_state[f"description_of_motion_{shot.uuid}"]) st.info("This is very important and will likely require some iteration.") variant_count = 1 # Assuming a default value for variant_count, adjust as necessary @@ -823,7 +862,7 @@ def update_session_state_with_animation_details(shot_uuid, timing_list, strength main_setting_data[f"type_of_motion_context_index_{shot.uuid}"] = st.session_state["type_of_motion_context"] main_setting_data[f"positive_prompt_video_{shot.uuid}"] = st.session_state["overall_positive_prompt"] main_setting_data[f"negative_prompt_video_{shot.uuid}"] = st.session_state["overall_negative_prompt"] - main_setting_data[f"amount_of_motion_{shot.uuid}"] = st.session_state["amount_of_motion"] + # main_setting_data[f"amount_of_motion_{shot.uuid}"] = st.session_state["amount_of_motion"] checkpoints_dir = "ComfyUI/models/checkpoints" all_files = os.listdir(checkpoints_dir) @@ -850,6 +889,13 @@ def update_session_state_with_animation_details(shot_uuid, timing_list, strength data_repo.update_shot(**{"uuid": shot_uuid, "meta_data": json.dumps(meta_data)}) return meta_data + +def update_motion_for_all_frames(shot_uuid, timing_list): + amount_of_motion = st.session_state.get("amount_of_motion_overall", 1.0) # Default to 1.0 if not set + for idx, _ in enumerate(timing_list): + st.session_state[f'motion_during_frame_{shot_uuid}_{idx}'] = amount_of_motion + + def format_frame_prompts_with_buffer(frame_numbers, individual_prompts, buffer): adjusted_frame_numbers = [frame + buffer for frame in frame_numbers] diff --git a/ui_components/widgets/variant_comparison_grid.py b/ui_components/widgets/variant_comparison_grid.py index 5676d7f7..3390c163 100644 --- a/ui_components/widgets/variant_comparison_grid.py +++ b/ui_components/widgets/variant_comparison_grid.py @@ -190,6 +190,7 @@ def variant_inference_detail_element(variant: InternalFileObject, stage, shot_uu shot_meta_data = get_generation_settings_from_log(variant.inference_log.uuid) if shot_meta_data and shot_meta_data.get("main_setting_data", None): st.markdown("##### Main settings ---") + st.write(shot_meta_data) for k, v in shot_meta_data.get("main_setting_data", {}).items(): # Bold the title title = f"**{k.split(str(shot.uuid))[0][:-1]}:**" diff --git a/utils/media_processor/interpolator.py b/utils/media_processor/interpolator.py index fb4c568d..644c53c0 100644 --- a/utils/media_processor/interpolator.py +++ b/utils/media_processor/interpolator.py @@ -107,7 +107,9 @@ def video_through_frame_interpolation(img_location_list, settings, variant_count "individual_negative_prompts": settings["individual_negative_prompts"], "max_frames": settings["max_frames"], "lora_data": settings["lora_data"], - "shot_data": settings["shot_data"] + "shot_data": settings["shot_data"], + "structure_control_image": settings["structure_control_image"], + "strength_of_structure_control_image": settings["strength_of_structure_control_image"] } # adding the input images diff --git a/utils/ml_processor/comfy_data_transform.py b/utils/ml_processor/comfy_data_transform.py index 02e9578c..b49450df 100644 --- a/utils/ml_processor/comfy_data_transform.py +++ b/utils/ml_processor/comfy_data_transform.py @@ -311,6 +311,59 @@ def transform_ipadaptor_face_plus_workflow(query: MLQueryObject): @staticmethod def transform_steerable_motion_workflow(query: MLQueryObject): + + def update_structure_control_image(json, image, weight): + # Integrate all updates including new nodes and modifications in a single step + image = os.path.basename(image) + + json.update({ + "560": { + "inputs": { + "image": image, + "upload": "image" + }, + "class_type": "LoadImage", + "_meta": { + "title": "Load Image" + } + }, + "563": { + "inputs": { + "weight": weight, + "noise": 0.3, + "weight_type": "original", + "start_at": 0, + "end_at": 1, + "short_side_tiles": 2, + "tile_weight": 0.6, + "ipadapter": ["564", 0], + "clip_vision": ["370", 0], + "image": ["560", 0], + "model": ["558", 3] + }, + "class_type": "IPAdapterTilesMasked", + "_meta": { + "title": "IPAdapter Masked Tiles (experimental)" + } + }, + "564": { + "inputs": { + "ipadapter_file": "ip_plus_composition_sd15.safetensors" + }, + "class_type": "IPAdapterModelLoader", + "_meta": { + "title": "Load IPAdapter Model" + } + } + }) + + # Update the "207" node's model pair to point to "563" + if "207" in json: + json["207"]["inputs"]["model"] = ["563", 0] + + return json + + def update_json_with_loras(json_data, loras): start_id = 536 new_ids = [] @@ -392,9 +445,8 @@ def update_json_with_loras(json_data, loras): workflow["543"]["inputs"]["max_frames"] = int(float(sm_data.get('max_frames'))) workflow["543"]["inputs"]["text"] = sm_data.get('individual_negative_prompts') - # download the json file as text.json - # with open("text.json", "w") as f: - # f.write(json.dumps(workflow)) + if sm_data.get('structure_control_image'): + workflow = update_structure_control_image(workflow, sm_data.get('structure_control_image'), sm_data.get('strength_of_structure_control_image')) ignore_list = sm_data.get("lora_data", []) return json.dumps(workflow), output_node_ids, [], ignore_list @@ -420,7 +472,7 @@ def transform_dynamicrafter_workflow(query: MLQueryObject): extra_models_list = [ { "filename": "dynamicrafter_512_interp_v1.ckpt", - "url": "https://huggingface.co/Doubiiu/DynamiCrafter_512_Interp/resolve/main/model.ckpt?download=true", + "url": "https://huggingface.co/Kijai/DynamiCrafter_pruned/blob/resolve/dynamicrafter_512_interp_v1_bf16.safetensors?download=true", "dest": "./ComfyUI/models/checkpoints/" }] From 2e594002bd9608643863878567dc9060e39b9b2c Mon Sep 17 00:00:00 2001 From: peteromallet Date: Wed, 20 Mar 2024 18:04:23 +0100 Subject: [PATCH 32/43] dynamicrafter + image guidance --- resized_image.jpg | Bin 0 -> 26657 bytes text.json | 1 + .../widgets/animation_style_element.py | 17 +++-- utils/media_processor/interpolator.py | 6 ++ utils/ml_processor/comfy_data_transform.py | 5 +- .../comfy_workflows/dynamicrafter_api.json | 69 ++++++++++++++++-- 6 files changed, 83 insertions(+), 15 deletions(-) create mode 100644 resized_image.jpg create mode 100644 text.json diff --git a/resized_image.jpg b/resized_image.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4d8752545fb411457138c50809a90987da0f4abf GIT binary patch literal 26657 zcmbT7Wl$W^*XD=d9w1n7w?MGLC4}I?eemE6KIjmfKp0?f2m}chbRal`CAb9$?htfv zcgymB->u!M{j|HctNTme>bk$`u5(X6=Q;mo|NR9#S5;I|1fZb-0BBDK;NJp39)N>| zg^h)YgN==ii;IIt@Pd#4AD@7ll#KWV-Ae{~x|g)HjI4YdjLffCXlXe`xnA)L2n!1{ zaEMEb2}9rn50Ys za#&>AR@lrQF9bsq^Ke++R(Fu=j2*KIeeev!#iO94qJGK7&cVsWEesS96%&_`f2W|R zq^zQZXk-kswz0LdcW`uqczOHy`uRh{BR)k&eU64DC8wmOrGLrD%r7V``d(a8 zT2@nASKrXsglz8Y>h9_7>mL{#pO~DQo|&DSUtL?@*xW*G@9dtOo}FJ@UR~eZ{)Y<< zfd1cDPuKqj`#-oyp17W2V4!1Q|A!0hneWqyPJ)5SB!ERKr;TmpLB=c?iu2-aVqSFz zE{l-PG5H72F+2)Z;Z?Si|DgRB+5a6_*#8&V{{s8pT=M_|bhM|JhfV^J1&lN&l+$ME z%X_E(+A3Br?}vd5h4fmbQ{eV`@cOv4Y%Rk zv9T|ELxW7?K{d!;adL+!YwdcWG}>?zkoTBCMeYstschEf6GUdQt za8>|HH>vBmgs3b5%bO)}09Y0#2(f?v@vXD%8=F_Y_rG$cD5}@+quugO>5@po0{7x6 zRovAM7Xuw_TCImj-=32@34oFobE4F|D+ep&F}+=3OFS=+bonDw3%g?>A>Buaea_ak6(Yh%?tfC(%(8@eWE0v|4VpK)##e~ZRMtL)xQ%un0HvUe>A%4j% zoKzTkT!j|G_w#Pz>B>x9b$lcY+Qj~b$fO#h>|Kk=llX?bT)Fj|Sgo`4PG>8@naqLT z2vGFrG~Wb9r1!W@zP0V9%)^%}>fxKCxk-w}|zS9kQr}06JOj4Hp_Vy$G^2$#WS%dxlvwGBI-AA9AEcA)J?t)8S z?Ir4a?(dKKNl|LrC)H`4&ayu&DG$DH_9d^qCs(1QSuU3ezdE}4VhI@LyqF2;% z>Wm{j%eM>9v5+XxJd2;!?ueS^wW#H*b&0e*$)&{f=YJK6zhT%ssQX?fC=}0|7W;vF@DG{RvrI!CeZIs75`1Ff>+b%j(m|+;9+HmCEhOCHJH0^Ik^_7!DUAMvc1jY zJJ=VE^+UCP&6keV&$@LC&TFkEdOw>d@09`}-t2c#$>2t}6zf~V;_DwbLBbitT+EDp zck9&mdqZ{x>WU8`r~^`hNA~6qX|Y?$&?>=@25|W-@1*T!0`adbL#05MIG@|Uc|2~P zEZ&EZ2b%}u#lLA>SS#0et^R(PeHnbQS4hM$cEQY$o>h*vm<9+2VM4eV7p%wGWC-?PGrp-_SS?)i z-RuzvuZ zxGAK~sebf*k7ZYR2SDW30Zq^LyR__R8`#KB|17&*cjW5PTQIeOoBQqyG`_3cq#xuW zc<4~H5#dE+^dQvZzx0q(RInJBt6iV8EPv$-r-Fw6nQ4qXvv~e}KbYpsz<%8Mf=81@ zs}a5?t7M+5+U$qqurDR&uTCqM|26$V2^&UtGazLMS&NU1v&TrE$B5_twm1#<^myzz zItOlfE*VkbqbpIsHTyL1rdDP40M=$-LBRC!ew+ zWFIi_eJPYV)|~>?&PCuF6`Q4hpbu7X!$2J0T+kpl=dI8;*4<30Mu z%fe`hAub{ScIHwehr<>!Tg38>cxBSKdNL;JJ)!j$&E#2Y+!VD z)l8eS6P|n)URSz|E6x#UHe%=(wuV#I=MOGJ;}ykAy^b}UbFp_*MtmaT=)^lZZfMG7 zd0{8xsT*d+92kF|Nv(R~lkDi*wQ~z(J#cevpea|(NN(ac5oFn%lMANCM2JV*X0-&a zA7sV^1v=L74N!~nxO@=h>X%ucPx|5Mysf329Hao%?6QdTyw2S8mNpk>8fOv5lQTfd zgpUPkZYp*B17KFRR5a9=bRvpoX+s%rY}09p-P<%*&4+@0t`7BW{0^e;2Y{{Wxt zE3#oOa;Ec*trITvbb>>JX5miqc=sjP6QPEtD6vB#Fy)&caK;%PD(x#>c>DK89^At0 z#6lXU{h@{g_H_E?GUa>!O0u3B|1m3?^g3H~>8VCT^*|r5Is4G~ev{$&+kb#ur?X%( zQ(*gdc*H(ND(YHOb_h78Vz#(d%ELpQ!*qgf?nFU}yea$eJGI=)bnMc^*k^;%`nb+q5G14*+^)qh`=lJHWvd)wB=xiZzkIu{8QT@5K{h{WM%FL19zxilpNYJ+> z>Kz3u97tYZ%7tJ%jTEUl#1w1X*2Y>8wF-ReOhFGQyYStY2`lR9tW$UYF3fr$KTEXL ztjL03@A)9gDYlbn`Z(1#JSE&9__IFtJ?}yO((7_B2GCeTgUPf^;Mw5%5zfeq0K?Z1 zR7E^qId@B%ozOT`3ZI?E3#aC&!f-~A?+)g^Lyb80~No@Ywo+Ai$4dB zVe6LU_11rUWo38i77RUL%9&$uu6b+GWH&%gRFdhvAqMvwH=m`2Z5OlWd?#nl{Bx5e zNjJYXYyRE^_XGSll6zK+>Pn0O?iBZz-$I=v^c&~q$6}Wr>F6W#_;pQuwUT8(zVIaA zVXhzFy*=aN{Jwl>nIqB=_KtH~wI(qPXP3v9{XMBes>N~@Xiy0{446C~v39L5WV0^? zvUums8i^(eDw_^`vF2(jEUx;5{qulPR>(||zxmgX&ti|K z#uS2Mv_jhr0{d3b(Qh*6wYNY*nXglq@+TXrXl;)x!s7;i#h+BnJz9Wjw+^YLop49w zE)gWjf_x0DsN=E|8Jj)LmbjfTd&ef@B`5RejS?_D({(~cowr&Lc0?~*kF=D+cH*6< zANXwP_8pYmwJOKu(G!%Ma%nmwYXYWU_==|lcp=n${c$=vE{o}uSjRv+ITXA6nwwe~ z?nJVnDP)*9i#VWx2TQlQ0WPqos@fH^#hAMATe{;w>xcNT_-q<`vUsJxleYp+t2crd zUkQ1F)#z9>PcRH&eB;px-wzBKsF=Tuk7!mNuM@x6dgE3VBkyDsT>hCM<0sjDxoBWw z(fCyMKR_xtZ%1UAZam$kz9u5^4!H|C)MQT+ZPXe!8~I_PD8w2&dym~_5J_sRt^8`G zTT^2r`F18_54t)Hq!3UxAH?PXNMT)kgywF-ifH)sAV`s$LTJ`clDd9|!G;=SCfi@J z`GRBY%&_&|hL;IwwoUcYNpg+3rs1$t?>4}yN*}`HX*YDInMs_SxD)`zEU0YzzPzk# zj*C^bQ_LV*+U?pp2M#RVWTvpQTP*h$2C*c_X4@<;vV@5c{{h}2TwbEn z*X$1^hZP1K?A*Q_Q6~k1CDTGZN0h40}VX8dN|CSSwm5bo2U=woa?1xcxKYwLS zOB6V4*kp<5*fRi?nqJU;s80JH^On?6YeGO%f2-QAq&TpcdV{pvr^S6Z%PO!uy%pJF zK+^eMBqAY#A>$w5<>9ZjAN9Pi2Ha00=yjNE7j8pAW0^6Nus>TMDh~@|6B(vD|E)t4 z0A54m&v}wMO}eC~h+T|*vR3nQjX{W=IRTZbdnURL3SLOYd{_U2;)E#5RpnlT=TCJP zgv?}&TnjN=F&&as-rKV_I#F^PZtk&eMyko`v_F=C{z_LchmGU#CvrXW3SD}033>O< zSDl3R%PjxBg1b5EF`~T~?A}mwz7X(MlygG&J+2mZT>^c)2f|Gx0={Et3qzwv3Rm^B{ zFl{>ihMV!Fl-Ajt9T{b&$g#g_eHl4^u$n^GN1g?WKpKy+?kZ)_Wu5d=G6Di-WJ}+* zbcH5Jb9=SeQ@_r2p46>rrd`kNZFJg3#Z3s%LbP-0KUj*=>WNiyl?(j|Z+`6nQs`}~ zG~O{-*>f!-34a({d_-q&&wA`s=f*kUEpV%qd)Y^it$==?d{M=7%}^lRe11F8i?^`0 z%pBbQD~)Bl;2(g#%47PFfC8H^4CMkjlL0etB*)ElUe9PrFod!BnNUiEKV*s=mIR$J zM4eT=4rBbm4F`~yo)Cj_wWce)&Qf4=g4INGhk+wJBCierw_cTBcz9})3R+a2^+a8k zs$u933nxhE`?JpD(+1#;&j_|fWQ=l%VR?wVS`C0y~|Ur z>=E0R<{_H;^{l}q3(fS*SHiNwp@2j}Rl^KQW^$rR&;M9I+9@@ljfHDHURU%+CHCQ$ zpB6Z~Je$PFkWYG1;9*BE2uQhbFuIhg`Df~NZCBO}_(ta;hctMKrA3G8=o^wVK%}Cr{B^J)iN~LlQ=} zr4alUomnFl&l~TA_{#;i8*+O?B)=-X!(eJU} zed(J`rfK(k!&5I-^sj=RdEtPAUvRfNPKRf6h03Rd8%XF}3qxP=swoo&>curWqDs+lA^)~du-V9jGAz$UZ97OQAZDC z+WcfjUYWuwEB0e=?iM9S@`3&*X}t}4@VljGM^i&tAj@ly{$g76B(B&S4qCiHhjHvF zdaRm>ygzk2=OrJ@xm?op%bm8;n!1yhOXBP1XPEFIk}%aCOW15p0zm$N%;)gSU($#m!miTzv(qxOG7bj&Uwwh{>A;_ zZl&bYncJRqw_hAFYT93TEN1?6p$&Um@eQ+-;H}J?Ug%_X0-AJCinJW_J|Ee)jdR4% zGu3B)z~4&T(aN&E)QGP<`;6GJx09+w^W@Y{USn#p5YVM=l80&4yylls@mJ6p_3w(h zG{|9VPUDxozEC>yXncPX*Nna$P~&UomK@`8{-1tn=;UF>Ng`ZVleR%^+`IB=qRd%~8x9LjmrC5OB$@>_!4j>FBx+Y|inshgJb8@7 zToJKF`|FgK#Ukm}O#Lxd7xJ+6)Jh+o+4egVZudaFX`6W?6@qCEm(4@$CAX52;9B-D zd0xZ>u0ZI|+>tDApN9GLW8wF~aaelOuF^3&W-3hWlC57;PQU*H^qFisbvD>Am$hCt z$Fg!QO4ty|l^I-lnk>@zYp9-&dKfi**^f(g}2KO95p1-IINNYAWrdN;k) ze86%+4wzayYZ#>`zKX(ND9PtRs2W97FZD6!eA3{?&scUwY}V8U`w@tu)Zb|s2zlQr zALje7ZdNDviV~kx(VRRq3=QG0#x5@GLw5T;?qM@OsDJU2rU7!$y~(#^pCcOP-rjT; zq8wxoh&*nBQtMpn*nk09^~bK#VFY!hSOHjH_#bb-(sp2mNp+DOuG*9T!=l2 zq%H&->In--#jUR%=_^)UOzni9&sp;4el4zdQKNZRh>L}3JCwoT$QlpQ;Nr#8b=fr_ z@y}q1)l;V;$b-K~z&)ofUwHfawCuFo279J~C#TR0SS+nscJN(ybvBKy^BKzNmI9MAN{EjVQ(iHYt+{&jBpkYojAYyVkmU19+k-r>jyOX) zBjxUY04YOICbS@K0<1!Q^$b9owv?C`nQC-?O1*vXcw1!OrxI$o5Very3lXY?C6aZ=qSlVKBP=v{k*J3kmo zIc}#@W3Gc$VhE_DyEb$xm2n{hV|6V8iq-g>(gfckJQly0Omngnrw2PkmA0 zcn%2)#xB`6^G#SW3o%^Z$5np1VEt}xX9k*NB4>+Xut93WP5qTF5(3*h5I!y|r!Vlp zL+b@t$`Tl6DcG-R(;Ai*nRd5E*jcz~)Q>r0cT+wmW9#;iaj%wcL7j;GDlOmW(|FKc zU`t&fEU#=>x!pn*(^1^tkL?L2AH7U864+@z(Hbs!o3V>w5Bup-uq&uscH4r*asA;L zlA&}wH_=C(!88s+Uj`4Jk0ICh#xJ+OW8lE8utBI`Uw5YXPg9M_izG@#9T|tEsNq9QySd%( zWd8t+dyX%%vS**Ps;QU(!bMdI0YN?}T<)?g+5?!6DPnL~@E9OJiZYwbpCq-XGv<&i z2%6BFU=;&nHzZuaR5MM!%!Mg2rZD_XadA5a#=Dfx4T_jHFC=J@T{53wM1=`O$l}$LsV1wTkz- ztbbs~k!d5mLxxUZU6L=^;}VrQ^*pCIm771XfLeMOJU=&Ziu8Q5&)v!&bWVy*U*BTa zQm}C<%~O-yum!XY48>9;G**VdL_(apHLwRIr{VUpTrZU6Y0nFjS!wZF6Cby;?w5AU zmD1J`KxPnggIT|v^qs*{*hY!Dx^0DtqS7+rfElZ^nl`hM7q@zB?#(+!`lkFUqn^8p z53mi$yUSjx&)7MF`wd{V+=t$So+=kDH zNAfmdw3#ZF?-Kf4F3LJD`MYx%Jl2m3Jl(9B-;DwSxmhU2ni87IM{x`|O3Hi%(qrLa zDQV#`lb4#|LQA3%hc1JyK3msyIwW!ZhP;g@bb-5W#7EUG^9YtzImsDWIQ|XDfhS8v1RITbrA4K}p@%B|? z++YWZGjzS)zi6eWWv;I7jy|W_!c8*ke(c~9J8Q;x)x|O%O)0qKhhC!+!G`N#DI=-m zOw^Dqu;+J8UmyXPr}py{4t{bL5!7JTPf_KJh2Y&LwnMFM_cGt)M>XFUjo;)6)FL8D zeZm*Bt}4PkPid9OPUZ#*?jPR(IJw_0 zdU-lrfHZ0L-I2meA?0QUF}xukS6WZQA9hR_63WV{>eR(mq(@D1d*@l02u?|*LmunX zr1b5^-3&e#ThiKJS-LB1<%Fet$!@fn>tb}P5%eL z)$=4wyF6fl5(THKrr$0m706jLKbNEa#ee|I;r$XRP(0bJW3eM-{m`aV1VNaE> zyF5w0XGPuMg)9$k0n-q!W!eDHrG%^Lqr$w4pKkr;m zd&ca8DOxfTh?&^x-}(^T_#Ye?CwI`6SZ8=foa{S|4mpY@Z$-)f_OwR`J{A8y4O9T% z&#{gT4`C!M)oEpD!R2&2$UX5}wgsBKS;mz|h}XC$K_=!JD{V{Pp>{e=ecn&g9m1rIdwcS3VcqMKj43-Xi=yDWz%<>YtROy^JgOaZ#{ zvsb(=*MA5XLhg~3NVk-MA-ej7nes>+(EyhUWtV4eef-f;x|U9Wwv!9$kM0*^uVf7egon?U z3W>yRxNE^PN-Q?5K8Z;ai2-%EhB&2c^oAwBj2VCV##)97&1ouEbVg#)Mf#CXIBJ*j zgl2B;&(Y0f*{o;od*!{X)*3hsfwWXjOqYR}lnlJ46F)HZa+V@KpZ1VtV4b;BTpg$Wds3dJqFO-Z#fD(k^T0T!Cky(p`jV9O`Zf(?cxb3bf zEwp3a7Ems?x8X@DP%oX|5G*| zJj>8C0)*@v1r`lYtbO1eG})flBO`fikaSnR0-TuTMqZR2N%8V77STJHmUC)-^IAur zZ$%uEc&dG(N0vi<;}5*(1Rv*E?(}BxBLJ(~QPgf0oj(w&B7c_Sj3G|0*Q0s?uSs`q zvlvdtF`c0)tRy!iX1!HaavWiN7h*Q_CUdo1S3I#n-hZ2;V8Qiu{XGkk7V_?@{C8pO z=6Xc4jDZkP+A?!MZq(JSq&OipsUqZ7F#Ng^^n z%UC8hLJXV_SJ{-^6(N}g-aMd=AoP$!KMU(+lkII!J6b zFTP_v?TlaRM++PQ|FLY1Tq93373BYW2|TU3EE*&i=+{r#5P`sI9+{`uEmoW;1sM!e za`#LR6l(o?m2XpB5uw~1pG5=}PbQ|3!6f(ce_CaBNRVm+87f>3tCs-+BbXm|uacF! z2ZUp(Ebsdg-wG?i9XZR>?Oilgr`mZ`f>Rn`M%J?a#;M`N-vwpSjotm=C%snL38KvG zAM_uirkkuWB3yBlFJ4@`nlsgw`Ramt4aN$@b=ymGvJ3n&4{R+#YB7+<21*7givx7? z?4nVkf)7dM*!^AB)9t|%K+!NiloInhS=Q{nS}5$&0@JB_dA=4?Ve}RDaqcm22o~nC zrP$;TaWji7insr&;PW?v;xLV(m zHnuG1MsXK&1J{rY-ImEkSaYAmeg)O$zhL8>+xKt0XUyTsnKwte4Z`zvnl@tfueMDX(Suc_^V#WX;%izS5mq~Yv#(YRf0HLWl^!@gC*3| zqpY5q998wUANx^oLX%JU)|EWXe5hJ(p5@Y8bkI5;+)oxv$-Z*^x? zm_JWb42VftGk_*1go{4g$v5T9mZ!|{efNlIkG!bK&gdZ*CgBc;3r)jTT}%(OeZ*-l z2(zYzDwk8Mp?|N2?Zl2UcZ;h(sSkQ4P-YG{NqKr|I!$JH@`$Xl46xTJ+IfxEd))6v zR3V#2o63EieC{sGhzUaFe=zFj5iP$#2bvsceJ^m}i`=u2mRilS6(wBx?qK)9aJly4 z8Cpl5G(=l>)pp$cR62Z3CVh2JEWhaooCjQzNSuymWiIF^MGyq!=q$~H-RU&m2_u9{ z;Nzip^>R7Hc~344Kj3Jz@Jfx@Z<{6IHSv3aqEOd#YnM2 zz=1%{Q*7=VZ}rtKX?DJ(WBXmk5_(;{R^F0J76=4c8%Z}zaK?GAMUd})CBqD{1YRPpsaej@mV$z9Mpid(lK7H71FY>_1>pJWh zVc5dnGaiv#vhbmC(SZ=V{AvElXV-gLyr{S4cxcSzZ{jG{>sO4G z%wA}sIGr0n9pmVl1$qCe_3Hq=K#<)r-oBciiNfPnW%_{=*#VR!6n+7 zO$~(<9XDNflpY22)1em>K4- z(K%&R)QJ3jhWew!3Cy8#zr;b}6cwFu9&;vE>bdevKoS)cO3e!u!3EhhgCd~@M z-ZLevV_sWHqlE{S|F{lbC49SX+%T2jERuG-h`B8IQttjuFkc{8;&Q{f@Q4kvIQs%} zGb38M+bhmim%_rQ(Kq5AJnFP(^QJ3w*n^($2RPvKAv3y77Xh*Q$^&$@3pi@$_W37! z6t7fb{k=0x&n8To82V=dF+5%DhDT7VO#D~8qGaG~}WK@iy*dSM8=uY(`l<$}u_Ieyi-w zOx(_dtyu4Hv8D^deK-#gYjb#neSZuB&zV;`=iBbX=>r!jK1)ZM{k#Mncn@POD9ms) zHZE;CE2*PcZ6LR>j+5b-`FnwJb~dHllGfjnS&``HeHd&rf12b5!%ofYcMZ+7*BRK z-n5uWJ!W(no02TAgDFAM)7H%`SYmyp;b9)(BW8=#Kz$OUuGqoR?}B7bB(|G-z(x|~ zIEp|_sT3R^e8jc7Fre|OtUq2MIbywp1-w`z^G!yVVjF>P1rJ@4$k`N?55Zq1<%CF~ zhAMtWkto>K;mt<~4EFWq*y64fx=^8^2`(XR*&@{PA0VX4`cKjgU(Z99)1~j6S_ENT z591?K?%A;4&#vAxAb5ch>&9w1B+DlJDM6{SKgO!H2UgsIe}^57@Fgan@y`psx##Jr z$gQ)lSARU9H(a-fYEDh7_xF#?oK{i8RXcPIXnfXsyY;(6HnoZ3ysEs_F6|&s(sW3e zBqTWS1tE)4;#Ir3(o8YJ&oB~kFY3LH>rzjX=i%~7Pyyq#Dy3qBusA0Q;YMvv$zQA6 zDE}qAt5MgX#wm`|h*TlAiuG(|=RJSiD^t!;4*!DFl)=2P!$&r%yXB~QUm2!mOohfsdK-IcZhj39&^)U1?tqt|!S{zIkw?@G&FNNg^-0{PoNx1m2W-QK zOC{tu@90A^i-XtI<&CokWK1pHCH@BJq%PDc(UEW2nOy6v+36v{-a109;{6Qo4PK;` zgAvkY16!|jQi!mZJN7^`w^=#zwSxyNAIs1fiGv^O?RNO7T|ovLjJtvAdQ5yaU&{32 zc{~)m`A5B9r@CMyHC}1iOjHR6f~FbAchWysxFe(CnyY4RUG<;pdBWRbLy_;@u_RE_ zZN+L4{wMr%iLATt08gaC3R(M zBa*C?b`%2kjaL-XiIpZoIov7^cPz{HVh-Bg+UVq^#=8zsjiy#jYi z*2Vi~{VCZe8?f(s07X+K{&lEdCtyJ$mNt?BHQxFN)^BaiQ8LV2v*Uml#Y1b0udDPX zm``y{TJ*(EzmE=`vm}~;f@AjCkeGGU<{#cXlMD~jT+J*|?*S=~xT~MN1qs%*6Xm!t z3r9}n?6MBnP>+^eRVtoov`$n?#S9r+2DR9j8n)96Aw=J+T;QyaGX*g>e)3%$45XBn zDJ~L3b*8lokCCGqM9R^V9l~^slCOTpEjD0FS8MG`o&jxiV5&m~{;#9XYdwSol`nT( z`;yr@tG2L*Zw*`gJhysj#1Jl9K}0V%Pufe;lw2NI)ALy!$LT? z^|feSwxvm{&~&eP zulzS)yoVqA)i1PN@Tg{*p%U0IE@O*Q1m2PTqVBlT2<>;`?Ssu+W)J+g1d_LlsjFUHmzGzSug!h<9M9Z? zS>qx;BF=FpcFzOCJ3~@M&nHE9M`6R?96Z?^=HfG-O)+{b*S+Gmp*=H?HQpLFreU9@ z^1PpZ45@l)?TYEd5fOT93LP{%Ot$|rmLRe&_DiPTLKE++971mbL|YT1sSBMl>KDk| zIlq*AGpdS0P+7B(w^({Yr^$e!EtD)pFMcGDeRrq~$fgh*f}U72ajZf&$dD@lEvUuIZCduaMo!LMPC+2 zs`bpdNEMeYV=D-4%vi!-m~}oT2lWLI6eFLD?0ok9hil~Gj9h7GQ=$eD~6RZ#KQuYN^KkMqyst~bHIyWfsaUT|t=O3)uv&MmI&kapR7PM;;F zCMtT|#-l8Fz|+aIeqp=QYn!fYhD)s9tY7&9FM@LY>9$hA{xq&|2hul6d0OC3DXUc- zm-=8zFu~<(e{m6mH-z0epB}`he{JQ*A_}QY54H0yB1MM{>E^R?RL z<-Z|_rqJX)jk+erzg`>+s?EtCf3>jPXvSE?o^JO& z&dEs!rMM1KXX@*ZPPZqI=RZtrx;HJDoAZg5($GgzKOW?Bg{1qeW-rNAWj?Z8E3^Wu zgcFz^M_SnaGP*}K2hp{a?--)B~As?T+$UU&637AYSgG?!MUvrsr`iztGo zU^C&pE48NTOk?|pP`K)OM>*&pAi_JF^x@#pYY$F@UiiNM@o(sOop4o@4DaNt-^UIU zg+_}6-HImg4kU1=*AWbXX?rTtbtt)?R8C)zgVoz6$E^rqX{qo{es)@t#Sh=2G~3er zdDBQa;w1}$Q%)u=uMkTz?-N- zwlm&9d;r*`jg-vRs*#v&6(|~ZHAe69EPk*I&M@-5)h{3g5&nMOAnsDYUJ!}0;jHk- zL*r<(1xpmCQd85;yx2uLKwoULm0DepY`u&-qTjWseHXk}03}xKsD3gS9cd=f{sUA< z{{t*N8Q^yIT}UzUV&b;~htd~GvI-A=vx`&i~F`~*j)e=}i ziB%pcEY37+Lc^2Kt7MLsVrN7v9(DT_aT$F_K^I5F+gnuOgR;#Dasb`R0gOhJ(=V0qm_58%Zi05VE8|O#~FxqnSj*^TPRbq91wGSs6?) zlcZc>TIDg{eEg$Myd_I4WBFqn>=Fy4ZURO+13kv;5QmklEpd9}7d?>6?liMOIofbD_Bd|guV3z`YQ#6->6$H> z;%OY*LRmr$*b~4>huc_DL7W-);I}+kk^I*zjm(Yby|ceUFm1IRB;0f>V`!jbS|$U? z!0k%;lC4eC%#gkj4W73~%0S%rMBp}|xB_mk8$a?4PJ$jK&^+?Z+=B5a(Np8V^q8HX z0-1whX0g{hi`T$EO7xop)r(jK)^WOG*e(OQS(L)vyxt zjkYB^tO2KcvVhx!HeU6dm>{wAo}=|s12@3s1loI(?NAn-bvD?tu!cJZgA_`aSG@5skeFirVpV9dM8=S zp=FsM_9TQ(W0o#B0Unvb@_O*g0OP||i*w~I6VL124YJU4^b3TU{F}z(UB4ij23}uZ zhFsa<)BL1om9o#&ZFTaI6HwlvDi#}+H|Hq%s4`svylYY6 zI$&&zUYr}kr`W6vB{L6EQbiI*y_ot+~~FIQ?h`Q}8Z!w5G6g^c^sNH@8bRk!RT zq)e>V0{KGV2}f>_1K-W7{pV9`(no@D65juOgKu z4Ga9NyBl&?#;tzVek?fN))Z*;26|4IhpD1OPw+cr=L_z1HDmem7+7D^JNyGMsGdpk ztR)GKQt@V~&=!O#Rd0j0cb+SM(v&y%UOU262zI{`xrQH66T?|A-A>Aj_4}3rs7Eq` zP8T=ou;TTniryKyr0Q;3$Cey5E>3YCeP}r8nQ9Ta{>H|LUHnGrqNGA<__av4!CRjTt-NvsR0FI{bbX*pC1=?%D=cR7Ikr{2+ z#gj9VcC0syQCk^JqZhy(3Hq<(sbSx4vQCed;yfpdshr@m1}P8IGMp4ZJ%!HDUkDSU z0d(NAt_&Vka-6~}0G3Ln%Ul~$d+<6}`*p?6t&|33@W?-a?!-CvStWiS!Koi?<-PV@ zx!gtzZ*-{kp=e}Dt_X(=`DGHgp7(@wb^cV!0am(q%6G?(+}|9i)_3 zWgSv`G@vEE_P8fLsv=3!dvQ}KbK`Uv-oBzbJ%MFq75%*P!x;4VzDD&1I?nPi%j()2l|aIk(xw_K!%OFuX4DKN(Z z_$}i+ol?3b;xE1%=!k*sO3~J@pvXo4Vq=qiP{&AdAVfeN-pe_wKfq#3w-bi-kv?bY zgL1g`#MVS!Ez<87?0|ZWse=_gJaN)fata*K7=ils-2(Jw>rA}Mr+C(JVr7tA?`d1E zRw?vtCY(h!(v9$xPvo=4J8jkl3RGynAv8-3!NYQL;>G!XA@bL(6wdiwvCDR)D@J8rY$hnmH@U-^rbaQY)(_b1pZp zB17uShPBi)8*as=9`^uR*fhH{Y8kLmJ*FAHu;>MB2N?I`YwhWs?RoS&|B_mCxv&jKYL8+FE zyDU*K-cvsyZ4TYZo1YB~oC#)cmOhUtKU$wuUiGBM{{!H(LL;bkR;eMASUvvQ$ua8m zg=>Wseoa}USXyu;)p9Fz-o4sL)-0P_-6E{SapfZ)p7DXVN`Bw+wJ+S4t8y`zD?GztzpkKU>AItAMWr1^eF<=@CKK$+Jxb zLEX(pk>`o$yI}sMUp@z{CmlQgBm5=<+4xo-uj5@0!;_%ZF8s3K>{J_x{5?JE$F#o~ z&7z5#?)bc0av~=z&FCwSxtme(X0-E_l6$$^2@)x64$23hu60UIUh&b-9$gQir`J3+ z@f*Y1u8jrIFXWj3kN{AQRmVMzc?ZLf6(0+01MPd8PP!o{3;ZXukMq*IpND@7FLacj zP}QX{&v64nB8>dVjn|)3T;8+r{{T_aFQm1%w3_lPZi-l!Ag`hHKGnQntm4dNJ3FJ_ zt*#-uwn^>eSt4@l5(n>*)84%N}r%$S_rhRp+6vT@M_V za}*iis)TWs~)u52?VFiE3DB$T{aA!cGO}e?jvk8edcO~=CgcdsSO{&I;GfRtgw_* z{?mM;`PQ6gCb<6qj1!1{9ZH=H*D)vSpXprmS2U5+UGl`@{0Nr!(?*fbtTzzE1J@ln z9+lc^8YSkA>33|cHM4WNF_RhhuO#?=e)gLC#*o+>P>uxp(u#5&cw@{0A0ZffkKAk_(Y z&1(`QR+W#vQ;~p0S6+lwg{?M9r4XtA(WRwy1vtIbqIW) zBGd1Nd@(+Ne~n^E2}S5DT2RAq3_v`Qo(?LUu;gdHdgiS+)HKq@73^7zoxN)cRK|GY z(AQaXpD6;lyU6g(*P5AXi-WT|i+_m_Po~&d>-v0`X35mYG;e~)KZSw%3iQoW#I}Ab z_;C83oRY^E+C*g}05X*$JwBqoa-DX)dT+zo%>Mun_5D4G`7WZB0QF)20NB@;TAGfi z^=VI<$(1zwM$~+3q}l_W_ba>T4o~O}begn{bKHE&W1L(XD&{jW$qi6VwrDG?{ zk(m89n(wUqCmydpr!~xyKvrC%jsXRUIrhaAR! zH7efRpyESXZQ=P{$c=*c!*r;l(B~Jc4adl&WHAB z*1L#~25Si?dlaUuZ%ZgK0;jJWR&=Uh%Hcpq(^hNu0MtU00YP6TmAv@H}=JZmeaN_U;v!|01!RvslB#_?g?$@jbw&p zD(Kw^Jwu^3Gyeb{&UnkP{VCoaW)=z0%6S;9 zD@)x+RvtQ?Btzc3-{XbBUkvHC`@a3&;yrMu{ArV>3^ejvFS?2l>k}LRb;5eGzRFQscV+S?u1H^iK7eePv^Pz@NOt$`6z;995 zSBL3WNo#T%TWj z$5zKp$NE+kwbjm%slln)oJ|8~Z)P3T`w>?r#F!$hZRZ>4;lB##O+8V_(@xCRfqqfNPvio30nHYX zl|UM;gsHe#iY3QmS0Y+!$5vxX8yrz84mwd%Djac|D?6H%WzA5Uug%<6bmZXHu``*S zJ9E~lGaU34Zf>TkFs_7P#-3^!S0e(X&uVKo7^X>`G+8*?lUZ|1v=LhO1Y)tSqbEF7 zp%f*QHganc^-Hf#htV#W{*kuZKp>+ zi#1E5*3us-J#xh4{zLVy+r_tOqxepALIkto1wF}br}E8nKMx~oe-vvG5sb%{`tBe7 zdav;hTaNEiTNs!ata8#ftb}qu0sPHz(p=I=>a8wR*}%;wnRMg$Q{^AlgSIinMHeU! z@iEE#YCDfG9E{??& z?;2zeBq3P?bQ#Y${3_SQ9SP!!%T|R??gy7J{{Uq4KY{*rL%Vb z;AMfxAI`mN#QJ(&>3&d@WthhB&^5g9K zos`-6WZesWPb2vs&YC92Q(2xQos&j;i1}-GNrpX-Kc;A~Tk*CFG}DDJdX z)Ky2HVBTi$Sjoc=HWaqswFNI)0d`HOC3!Jr?Y6(kzg<^}qyI9q{Vuyo;-5Y_b(>euw#2PvTbEnoZ9j zM|J`HK^4U~^3<{27tITh_~Qpif>ZoA3J=hOQt2^>U|7f99SuX{5M}V@)fr=P7#Q{D zt!cwBrh6!^EJs7q!+b2?wB(BO&x_DNo)o&&ahdL}p|(~h_!Dz+^&-0~o7T30Vub$y zq)vRe!0n!=@UAQ4P9pHWkEf~mjO`|Ukw5zNz7bwX=fyiFcw|Qnj6mb>|_rPRdVTTGBeJobu7w?tBuleXQL(;mH9& z8NkhY_CpMg{?+534daeWo2d5?>Q5N#abCS@`>oSGK*6qR+nrQimKk#jO%o)D83@4y zaa=w2m^Ay(+=9@zA$yN%^adPstjqfpxr~AZDsr>bbzya(%S1yUW$9KFR5RPlXF1+J zQ~X`NwKQiSR_8G~txSTWOx2jw9=&Q?Eh1>fMHMmt2c;FA%~CRNq*k5w;3zl%3e1$} z-mgx*>pEka-k!!49ob4#mN}}g?N(;%Ra@y@2XgT6#%hr)g5p8i zvo56=Ijtv*6(=>7bgIR(oul=uRwT}sXi~&heDZ)eJXP7463lpB!m?#1X$W4Fq|1sS zavbGFG8m>8up3~_a3ofZP-K<|BDVA&4&Lk2GFqUH+B5s>sdz~L0L1$W^vfMHL(puR z&r4rA>Q(*y+@t3s`=itFtS2?HIw2OdX6B7;HI}Qb>eELYO7JV?A9C)-G9l20bRSV@nEnxiiavC6q0r}M8V*3ka|V;l^DjMp_7^0stUmo=G2-gP4s zmpAZRC+{DhN}#UN`AHT8k}y$Acca^x*a;+&hBC#oliwB4s4qN6*0!{R0G@EyPpnfo)Xbw2L9IW`r{ZZ@F|Ud-CX{rx{>4W z2u~4&&i1^5H`|Z_AMWO`pE_D2q7tUvn>@lF2l$rPYs;ZsF?B7@)+7$6u4tWH1Pz%`)X6J?s(STFR%3nCP4u18Uh;=RZ1 zG00Joi2x9I>OUHzdv9xajUC0T62`*}vPK&t-->?K^+Y;Rdmaa(c*0FL5xFk_KXW{f zr?q-+o2y%DHu2nA${i1(LA;X1 zmge2rRX%AH90nfs)U7pib4hYL9hZP6W3OK*04Ux60HK=hJWzqWI?ew8JmrnPhj6YF zz!5rJ&oKk$P)W}@&TF;TE*11EfJiuW{{Ya1aMu1I*+u)MU1CO)LbNBV%@M6DcUZ0W zGX+H%Vt4|fYa@H2+Tnp{%E`&}u6JM58sb#Z?VCG@^T_o6@A%gpNOLo~jO?s*J{h|H z*48J#c$mb4WwAXJcR!Eyu3O^8(7f>tjf!%%wx!$s=RAIc^{)5BwrOXl#yADIU#D|k zN3Gu-OI_5p0!b50LS+8{0`NaUUDz0V*X(iQs%dWLoTeg~)aQmH^{;ODTQPk-(DGLR zH>efjY>h4Ch`mdkes%624D3y`AQ7HcafSmsT zEZ2?SDQLv8oH}w1d%m?7pQ2wxe~#Wo{{Y8LctSs!4m$DPw4mFOGNy}r@5$jQzK&3xhD%h{}aQ~+Px1`OWZ*U<4k9yiY&4P{wsZ$;SCH5&}m zv&Ar)<~oXZ7O656bquDt+3h6LvSBRgneI%(=)#^B7jQqW5tn6!ZHsuJ;;kWRtnfun}p>)!g zRbnIQf!?rg7-Wjtoy@A)Jqj|M8m?Gomf^)#69HXGXmYYwBNd%=4{Ee#2fbIBmAx@k zv=cL);Km4J$GuY2W4N|e`$RIrWL@YbLc5pLRF}nAkPb0b{7vE;opmgY)Ji6{i_2+4 zb1BbKItp#Os{%=hJ6mfD zNNjBxg}f*f0tY9%eNS^qaw0K>AxHy*=}|P`FBOfeYGxQqE^<0naYZ(DJ|A!GO$S=h zcmDa2iUNA>7zgtLy#D1{<4<^Y{N+lOJ&iNNz9pMa@y4BW1gIeAq zwK{K!br=)?tmaf6&(HJCC8XLd>jcgxPPGZS=E!9wMO&C}$x<^<7H=Gk%-m<7q__(b z@!q#c=55@F77OzZYP9ndINSiuESD+=QT43pAAmWm`Qa@tSF@6L zvFSQ~xMd2ZvVFVO+;;_rayj*{70@-diuqxcnl_B#m6U=#i0xjfWqPwjrC1d`#d6h! zwmK=#xvMOTbuO=lC(>j=B~Ofh_FUS+9VqzU+Y}whP0{g=le{XmH^p#5AnC4 z{Ld2y}kPjMkDxZzi{YuF0xblAa;4KC6~@|8=?E)U9`Sk@9<5!(gJ8?#4K)JC76 zUTO+Jc@*1OFS+2i^FM`oMba2;2yOzcb-K;*)Tdbrl!hU;2(!X3_}3w>zAWU&1cP3- z9+$I*r;muM?Pq?6B9Y3nD`1CjnRCuTucJH?o=%T$S}=_9uHZd!&3M0tbh&2KUOR~K zDijJF@VwW(>5+(RAixX2JW(x)zNUrN8s1Py`y=|(0qB~tEle=ji$-v zw#?kfqvingPDD{A<>28~qthYdWTs&4^^*3ZD4Er;}MR+%#l`Yy;_6(p~Bg zsIF;7$5dx@N=-ELNLr)w3KDx#u1?zQRK@?IP4 z%fxDZ3H~qTSXHW_xxFebdl?=J&{Iaxf8is%P;TxA`ZP>A3VI*MuhOj|kRIDXt(J((c2m@DF$c=qTdJ_?4R=?XviPUMR$BTW_?YSY zZFK}4N}sy_078G6uQk7yamQ+(!`ke+2BqhS{QFsu#dU5^6GVEi*SI~6VK=SO(@s9^ zj=Wr}jsVYG8ml$C%H)iFE7J5W6G`z8h2wxIxwa(zj`}bJioHU=qbVd1de;xBS+YE`Qw}rGn59!v+ac^Rh%^(c3P4loA_v!doQqn6-2IURtIK_E|j;C{I@x>H_Zg>P| z@T-vcjpXqYB$5>Y_9#=q1B};~Q=gfhoH=<>>UQJi7y`B;KOoI>))zsL7RT2$u^qt2 zT<4`kUvio=b-xK|THNC5SQabLFfL;NF&>Tm;q|V19Xd-XnjM(fa52#PitSo#uLOfs zrG;cUBPTVdw9>>ncXu)tG7}wZ8&=cB$iVAeB&rk~cI{agw=mkS`A#{kZttP0ic3=; zPSL)@x<}xe;q|?4Ek|CO!rEix#&=61@)?H!XV8B#YeU9-?*Q(IWC@xbv7G028~6fmJb?d@F^{_hXze`Co0 z(7jF9EI;L(eUN=>ZvkmDT-&Cze5Ea78MK6s-`_vdxy?%UO-A~AsZQpRv-BN@Y*rE}JL-6S z{H%OSsxwI!=r|jrfK6mgFj9NgtUz^WNvXb>r51=E zHfHjWkKrU9Ygw65OA-JBB-a(<4OTryXrs4sV2ly)~KU&9)_eMxO*6FD$qn1%|c4e2m0Toq+>$tyj)%tImhOOf*m^ovWd@q?UuSIw~M z1si-)GRQvZ9dli-f#Xd?IF9#kmKWu9UP%0sh)>;_?8fCPC%RI+6Pn_! z{77yP$Xo^YH5K=ZmK5A^kT5XV$*u)Tr(?U?Q|@$n&ar2tY4ToO%e54Nv~pPb4|?%0 z75Ljz)invN;bevYKv5CN106H#^siBW55?k70p4ESW=rXzk)dWhL$^2!>C>;heEn|W zCPrbvAe!o<6unGpJF}#)wel^6#LKm@z%|-<55Us+ru{WdMmCQ~g}zWm4Z{P_f3xa; z3Wvbn620-I(p}u6L83(1XXs`l_*eR#)$eH>Hdd<|Dn$e-fCm9o5Ady}E{Mr4tmk}L zsjicxEv2^9mUS_pU*JyX`d64a0Z*lC>)NT+wOOvRe7x+49>_X=eSaFjyp$pno`SmR zLzX8cS@Sa^SiD&Frg$fw^@*q2HQu2TIu5ucM_ziGw|0KhBuvUeRLe>%~bHhMRQ>|(RCQi?J_86E3h;%_S5iT)Xp6Dr7W=DRZI+o0z8sD6e|Yx#3RAA4tcs^W$=#6Y zdRUDXNstVE*!@1WwVI4^P+r}ooure$L>v!O*jE#7OzlZU89MH)q0N+J@IQJ-{)7HC z%E&`Lp{V{y+JCG>CZo{ZQJ;p z!Fs$>KC7o(={H?>Se?k;z;^trsnvWnVXNBtQ$o2Pb<|;$`x^5-f5G>^MXgaPN?J0OqHO29X453P7TDn3ZQf7(2Z2yWZ!71Kf!?FGV6p&y>Fw!T5X6Oe zJXCYLmf)?iNLjitaOXa_t8vSIK2>yIL$1b!kU(!G~y4D zo;iv8p@+;qzMopg_;K*MUlEw~O<^R1Ne3~;epk8w00sB&+Pd!s{4ckN$8q8*;<wZLz)XGTPn?D_E_qqDWwgLL_J~ss~^zl=05C zE$u(EAaCDJGbta!o}Z8Mu9s20k51F)yqoSqg^15zLHO5<-f9zH-A#FN+ohGaHamI& z><1OK3VhD*p^a(ss}V^cRV4JRd#OaS`CYoV(x$w+3J!W!Y#XI<$1)tBVO{n(nVV7ivp zJhziEK*z2-X1!KPH^NGumB~*AH3{2j%%^es)-g1aH#{=?A&N3a;4xi^e~?!trbHu+ z9~dK^E2CE|fYwr4nnCDDUni|OPrXB)dQ^&WP|HPxHDwJ{Q&uB24AB->nzd`gx6-Pm zS-0W;01D1q8pm?zqLVaMMyC%fW}}NumhII_?Tqz3>!AA_8%Xko4NDYj8h|@i{?|<(ovQ{K7fs$*D@eZ-}nU>lk`=mKP?KNv_nvdD-+BnW# zhAW76CGy>CqLezbp0xTR!pMDT%_kK^$VVXYQOQ54t@%tjg6eIcoQlG^y21mLe>RSzb%ZtcNR zRa0$lQbLius`%WQhL& z-C%3m^oDsf%V#93JZi)@e2j`wihGqN*2hr{dNYtd-E$wap#FHNZZv4FVr93ME4Wx- zqY<2cBUurvl^MaQmN4B^RXBN(&E$EPjQjesPTXS^9RaR?_WL?>QCk zi5AS5*^rTrIv=R-T-L4N{TBJ!Nwmd{`k5@>$tUW(ujnf`P2WRBRa(sR$aM&me65r8 zsRhiJGNdsL?&K<}^(5DQru;qdeC_r*r@FVglz!`V;xvT&ApWAYbk7KB9vYr0Y$cZF z>}NL;y5wv+u1-M(TE`o|ql&~~mq zX^27xTvEuT1_x@bDQfiT!9iIZ6O)s>GUqKLg(UpPpJ7_MbRdH#ALq4Hj?QTr?sgv` z7)PJ>k2oKIt*tu3QdNrJIn7nfKl7NfD$>YVlm!_D}s%PcjBc-&w7+V2n3F_@op-4kzA}0 z=B~gw=AcV@w-Rj?n`Ol^U@F85^VBE{908M8BTb9%S;?D45>ZJ-W@u!CP6mo8jBw_g zGx<=wgm9pqD59DZ*yOGquH-X0<+=|+S=SbdbDU8{c1J^sMpfi~LT>HtRFTZBiYTpC z#xHWm_Cj2I%-w23pv0qw{V1ZPwpxl2^hjPZtj8y~u6I(<%qJ-@z#IZ7qJ+gHtaA4d zN{m(6PtvmH01`1p6r@!>$N(5M>>du(8&vRxtGeP_2|mcp{q)A={JK#^X17M~W^~bH z0h5}xp17il(U}dnoIw$C2?MaND)AqSueCX(wu#ycV8wQ+!EdReicD*BU*SHQllYQr z#>Qcf3Jk?MJ(iq5vily@%* zW2YSe7_A*2!?0gl8^)E8$Uap;_fN0A6jv0dB^#!7C`HcA8biX!7n|7$ z&tEK4zu_2e*SKEayXX1QMMt)ua@{C)2JpOW+}*z4_XcV!j}5~NTdn17>OO7fA-}?k zDJrvXaCD%S#S42jmTxSuTyjXQ5vK$4hd){ zJxMfCSvfOh6k2GJ#vK)hp&6+D#coOc%5&%_qNd}q7xr=k^2p$NaB5L)5Ci4TJ$unb YJF!_3Dk^ZQ2hx^`DHJUfQAiK}*=>JuV*mgE literal 0 HcmV?d00001 diff --git a/text.json b/text.json new file mode 100644 index 00000000..c65dab33 --- /dev/null +++ b/text.json @@ -0,0 +1 @@ +{"207": {"inputs": {"add_noise": "enable", "noise_seed": 940648779231253, "steps": 20, "cfg": 8, "sampler_name": "euler_ancestral", "scheduler": "normal", "start_at_step": 0, "end_at_step": 20, "return_with_leftover_noise": "disable", "preview_method": "auto", "vae_decode": "true", "model": ["563", 0], "positive": ["505", 0], "negative": ["505", 1], "latent_image": ["464", 0], "optional_vae": ["458", 0]}, "class_type": "KSampler Adv. (Efficient)", "_meta": {"title": "KSampler Adv. (Efficient), CN sampler"}}, "281": {"inputs": {"frame_rate": 15, "loop_count": 0, "filename_prefix": "steerable-motion/AD_", "format": "video/h264-mp4", "pix_fmt": "yuv420p", "crf": 20, "save_metadata": true, "pingpong": false, "save_output": true, "images": ["559", 0]}, "class_type": "VHS_VideoCombine", "_meta": {"title": "Video Combine \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62"}}, "342": {"inputs": {"context_length": 16, "context_stride": 2, "context_overlap": 4, "context_schedule": "uniform", "closed_loop": false, "fuse_method": "flat", "use_on_equal_length": false, "start_percent": 0, "guarantee_steps": 1}, "class_type": "ADE_AnimateDiffUniformContextOptions", "_meta": {"title": "Context Options\u25c6Looped Uniform \ud83c\udfad\ud83c\udd50\ud83c\udd53"}}, "354": {"inputs": {"split_index": 3, "images": ["207", 5]}, "class_type": "VHS_SplitImages", "_meta": {"title": "Split Image Batch \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62"}}, "369": {"inputs": {"ipadapter_file": "ip-adapter-plus_sd15.bin"}, "class_type": "IPAdapterModelLoader", "_meta": {"title": "Load IPAdapter Model"}}, "370": {"inputs": {"clip_name": "SD1.5/pytorch_model.bin"}, "class_type": "CLIPVisionLoader", "_meta": {"title": "Load CLIP Vision"}}, "389": {"inputs": {"images": ["401", 0]}, "class_type": "PreviewImage", "_meta": {"title": "Preview Image"}}, "401": {"inputs": {"directory": "./ComfyUI/input/", "image_load_cap": 0, "skip_first_images": 0, "select_every_nth": 1}, "class_type": "VHS_LoadImagesPath", "_meta": {"title": "Load Images (Path) \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62"}}, "436": {"inputs": {"images": ["558", 0]}, "class_type": "PreviewImage", "_meta": {"title": "Preview Image"}}, "458": {"inputs": {"vae_name": "vae-ft-mse-840000-ema-pruned.safetensors"}, "class_type": "VAELoader", "_meta": {"title": "Load VAE"}}, "461": {"inputs": {"ckpt_name": "Deliberate_v2.safetensors"}, "class_type": "CheckpointLoaderSimple", "_meta": {"title": "Load Checkpoint"}}, "464": {"inputs": {"width": 512, "height": 512, "batch_size": ["558", 5]}, "class_type": "ADE_EmptyLatentImageLarge", "_meta": {"title": "Empty Latent Image (Big Batch) \ud83c\udfad\ud83c\udd50\ud83c\udd53"}}, "467": {"inputs": {"sparsectrl_name": "v3_sd15_sparsectrl_rgb.ckpt", "use_motion": true, "motion_strength": 1, "motion_scale": 1, "sparse_method": ["558", 4]}, "class_type": "ACN_SparseCtrlLoaderAdvanced", "_meta": {"title": "Load SparseCtrl Model \ud83d\udec2\ud83c\udd50\ud83c\udd52\ud83c\udd5d"}}, "468": {"inputs": {"strength": 0.6, "start_percent": 0, "end_percent": 0.05, "positive": ["558", 1], "negative": ["558", 2], "control_net": ["467", 0], "image": ["469", 0]}, "class_type": "ACN_AdvancedControlNetApply", "_meta": {"title": "Apply Advanced ControlNet \ud83d\udec2\ud83c\udd50\ud83c\udd52\ud83c\udd5d"}}, "469": {"inputs": {"image": ["401", 0], "vae": ["458", 0], "latent_size": ["464", 0]}, "class_type": "ACN_SparseCtrlRGBPreprocessor", "_meta": {"title": "RGB SparseCtrl \ud83d\udec2\ud83c\udd50\ud83c\udd52\ud83c\udd5d"}}, "470": {"inputs": {"lora_name": "v3_sd15_adapter.ckpt", "strength_model": 0.1, "strength_clip": 0.25, "model": ["461", 0], "clip": ["461", 1]}, "class_type": "LoraLoader", "_meta": {"title": "Load LoRA"}}, "505": {"inputs": {"strength": 0.5, "start_percent": 0.6, "end_percent": 0.675, "positive": ["468", 0], "negative": ["468", 1], "control_net": ["467", 0], "image": ["469", 0]}, "class_type": "ACN_AdvancedControlNetApply", "_meta": {"title": "Apply Advanced ControlNet \ud83d\udec2\ud83c\udd50\ud83c\udd52\ud83c\udd5d"}}, "508": {"inputs": {"image": ["401", 0]}, "class_type": "GetImageSize+", "_meta": {"title": "\ud83d\udd27 Get Image Size"}}, "541": {"inputs": {"text": "\"4\": \"\", \"12\": \"\"", "max_frames": 8, "current_frame": 0, "print_output": false, "pre_text": "", "app_text": "", "pw_a": 0, "pw_b": 0, "pw_c": 0, "pw_d": 0, "clip": ["470", 1]}, "class_type": "PromptSchedule", "_meta": {"title": "Positive Prompt"}}, "543": {"inputs": {"text": "\"4\": \"\", \"12\": \"\"", "max_frames": 8, "current_frame": 0, "print_output": false, "pre_text": "", "app_text": "", "pw_a": 0, "pw_b": 0, "pw_c": 0, "pw_d": 0, "clip": ["470", 1]}, "class_type": "PromptSchedule", "_meta": {"title": "Negative Prompt"}}, "544": {"inputs": {"float_val": ["548", 0]}, "class_type": "ADE_MultivalDynamic", "_meta": {"title": "Multival Dynamic \ud83c\udfad\ud83c\udd50\ud83c\udd53"}}, "545": {"inputs": {"start_percent": 0, "end_percent": 1, "motion_model": ["546", 0], "scale_multival": ["544", 0]}, "class_type": "ADE_ApplyAnimateDiffModel", "_meta": {"title": "Apply AnimateDiff Model (Adv.) \ud83c\udfad\ud83c\udd50\ud83c\udd53\u2461"}}, "546": {"inputs": {"model_name": "v3_sd15_mm.ckpt"}, "class_type": "ADE_LoadAnimateDiffModel", "_meta": {"title": "Load AnimateDiff Model \ud83c\udfad\ud83c\udd50\ud83c\udd53\u2461"}}, "547": {"inputs": {"beta_schedule": "sqrt_linear (AnimateDiff)", "model": ["558", 3], "m_models": ["545", 0], "context_options": ["342", 0]}, "class_type": "ADE_UseEvolvedSampling", "_meta": {"title": "Use Evolved Sampling \ud83c\udfad\ud83c\udd50\ud83c\udd53\u2461"}}, "548": {"inputs": {"text": "0:(1.3), 12:(1.3)", "print_output": true, "num_latents": ["464", 0]}, "class_type": "BatchValueScheduleLatentInput", "_meta": {"title": "Batch Value Schedule (Latent Input) \ud83d\udcc5\ud83c\udd55\ud83c\udd5d"}}, "558": {"inputs": {"control_net_name": "SD1.5/animatediff/v3_sd15_sparsectrl_rgb.ckpt", "type_of_frame_distribution": "dynamic", "linear_frame_distribution_value": 16, "dynamic_frame_distribution_values": "0, 8", "type_of_key_frame_influence": "dynamic", "linear_key_frame_influence_value": 1.0, "dynamic_key_frame_influence_values": "(None, 0.8), (0.8, None)", "type_of_strength_distribution": "dynamic", "linear_strength_value": "1.0", "dynamic_strength_values": "(None, 0.7, 0.42), (0.42, 0.7, None)", "soft_scaled_cn_weights_multiplier": 0.85, "buffer": 4, "relative_cn_strength": 0.0, "relative_ipadapter_strength": 1.0, "ipadapter_noise": 0.3, "ipadapter_start_at": 0, "ipadapter_end_at": 0.75, "cn_start_at": 0, "cn_end_at": 0.75, "positive": ["541", 0], "negative": ["543", 1], "images": ["401", 0], "model": ["470", 0], "ipadapter": ["369", 0], "clip_vision": ["370", 0]}, "class_type": "BatchCreativeInterpolation", "_meta": {"title": "Batch Creative Interpolation \ud83c\udf9e\ufe0f\ud83c\udd62\ud83c\udd5c"}}, "559": {"inputs": {"ckpt_name": "film_net_fp32.pt", "clear_cache_after_n_frames": 10, "multiplier": 2, "frames": ["354", 2]}, "class_type": "FILM VFI", "_meta": {"title": "FILM VFI"}}, "560": {"inputs": {"image": "videos/temp/a0d6e31f-c958-4d23-92f6-d9f36f85df2d.png", "upload": "image"}, "class_type": "LoadImage", "_meta": {"title": "Load Image"}}, "563": {"inputs": {"weight": 0.61, "noise": 0.3, "weight_type": "original", "start_at": 0, "end_at": 1, "short_side_tiles": 2, "tile_weight": 0.6, "ipadapter": ["564", 0], "clip_vision": ["370", 0], "image": ["560", 0], "model": ["558", 3]}, "class_type": "IPAdapterTilesMasked", "_meta": {"title": "IPAdapter Masked Tiles (experimental)"}}, "564": {"inputs": {"ipadapter_file": "ip_plus_composition_sd15.safetensors"}, "class_type": "IPAdapterModelLoader", "_meta": {"title": "Load IPAdapter Model"}}} \ No newline at end of file diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index 8978689e..7ee9a359 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -518,14 +518,14 @@ def update_prompt(): uploaded_image_pil = Image.open(uploaded_image) uploaded_image_pil = uploaded_image_pil.resize((width, height)) image = save_new_image(uploaded_image_pil, shot.project.uuid) - image_location = image.local_path - - + # image_location = image.local_path + # Update session state with the URL of the uploaded image st.success("Image uploaded") - st.session_state[f"structure_control_image_{shot.uuid}"] = image_location + st.session_state[f"structure_control_image_{shot.uuid}"] = image.uuid + st.rerun() + - else: st.warning("No images uploaded") else: @@ -533,7 +533,10 @@ def update_prompt(): with i2: if st.session_state[f"structure_control_image_{shot.uuid}"]: st.info("Control image:") - st.image(st.session_state[f"structure_control_image_{shot.uuid}"], use_column_width=True) + file = data_repo.get_file_from_uuid(st.session_state[f"structure_control_image_{shot.uuid}"]) + image = file.local_path + st.image(image) + st.session_state[f"strength_of_structure_control_image_{shot.uuid}"] = st.slider("Strength of control image:", min_value=0.0, max_value=1.0, step=0.01, key="strength_of_structure_control_image", value=0.5, help="This is how much the control image will influence the motion of the video.") if st.button("Remove image", key="remove_images"): st.session_state[f"structure_control_image_{shot.uuid}"] = None @@ -765,7 +768,7 @@ def update_prompt(): st.image(timing_second.primary_image.location, use_column_width=True) with col2: - description_of_motion = st_memory.text_area("Describe the motion you want between the frames:", key=f"description_of_motion_{shot.uuid}", value=st.session_state[f"description_of_motion_{shot.uuid}"]) + description_of_motion = st_memory.text_area("Describe the motion you want between the frames:", key=f"description_of_motion_{shot.uuid}") st.info("This is very important and will likely require some iteration.") variant_count = 1 # Assuming a default value for variant_count, adjust as necessary diff --git a/utils/media_processor/interpolator.py b/utils/media_processor/interpolator.py index 644c53c0..357d9aa8 100644 --- a/utils/media_processor/interpolator.py +++ b/utils/media_processor/interpolator.py @@ -116,6 +116,12 @@ def video_through_frame_interpolation(img_location_list, settings, variant_count for idx, img_uuid in enumerate(settings['file_uuid_list']): sm_data[f"file_image_{padded_integer(idx+1)}" + "_uuid"] = img_uuid + + if settings["structure_control_image"] is not None: + # add to file_image__{padded_integer(idx+1)}_uuid + sm_data[f"file_image_{padded_integer(len(settings['file_uuid_list'])+1)}" + "_uuid"] = settings["structure_control_image"] + + # NOTE: @Peter all the above settings are put in the 'data' parameter below ml_query_object = MLQueryObject( prompt="SM", # hackish fix diff --git a/utils/ml_processor/comfy_data_transform.py b/utils/ml_processor/comfy_data_transform.py index b49450df..41b4098b 100644 --- a/utils/ml_processor/comfy_data_transform.py +++ b/utils/ml_processor/comfy_data_transform.py @@ -314,7 +314,10 @@ def transform_steerable_motion_workflow(query: MLQueryObject): def update_structure_control_image(json, image, weight): # Integrate all updates including new nodes and modifications in a single step - image = os.path.basename(image) + data_repo = DataRepo() + image = data_repo.get_file_from_uuid(image) + image = image.filename + # image = os.path.basename(image) json.update({ "560": { diff --git a/utils/ml_processor/comfy_workflows/dynamicrafter_api.json b/utils/ml_processor/comfy_workflows/dynamicrafter_api.json index d35ca185..a656278e 100644 --- a/utils/ml_processor/comfy_workflows/dynamicrafter_api.json +++ b/utils/ml_processor/comfy_workflows/dynamicrafter_api.json @@ -33,14 +33,15 @@ "12": { "inputs": { "steps": 50, - "cfg": 4, + "cfg": 5, "eta": 1, "frames": 16, - "prompt": "dolly zoom", - "seed": 898857724114134, + "prompt": "dolly zoom out", + "seed": 262623773159722, "fs": 10, "keep_model_loaded": true, "vae_dtype": "auto", + "cut_near_keyframes": 0, "model": [ "11", 0 @@ -58,11 +59,11 @@ "15": { "inputs": { "image1": [ - "16", + "37", 0 ], "image2": [ - "17", + "38", 0 ] }, @@ -73,7 +74,7 @@ }, "16": { "inputs": { - "image": "ComfyUI_temp_fshjc_00001_.png", + "image": "ea47a572b4e5b52ea7da22384232381b3e62048fa715f042b38b4da9 (1) (2).jpg", "upload": "image" }, "class_type": "LoadImage", @@ -83,7 +84,7 @@ }, "17": { "inputs": { - "image": "ComfyUI_temp_fshjc_00002_ (1).png", + "image": "2193d9ded46130b41d09133b4b1d2502f0eaa19ea1762252c6581e86 (1) (1).jpg", "upload": "image" }, "class_type": "LoadImage", @@ -105,5 +106,59 @@ "_meta": { "title": "FILM VFI" } + }, + "35": { + "inputs": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "image/gif", + "pingpong": false, + "save_output": true, + "images": [ + "12", + 0 + ] + }, + "class_type": "VHS_VideoCombine", + "_meta": { + "title": "Video Combine 🎥🅥🅗🅢" + } + }, + "37": { + "inputs": { + "mode": "rescale", + "supersample": "true", + "resampling": "lanczos", + "rescale_factor": 0.7000000000000001, + "resize_width": 1024, + "resize_height": 1536, + "image": [ + "16", + 0 + ] + }, + "class_type": "Image Resize", + "_meta": { + "title": "Image Resize" + } + }, + "38": { + "inputs": { + "mode": "rescale", + "supersample": "true", + "resampling": "lanczos", + "rescale_factor": 0.7000000000000001, + "resize_width": 1024, + "resize_height": 1536, + "image": [ + "17", + 0 + ] + }, + "class_type": "Image Resize", + "_meta": { + "title": "Image Resize" + } } } \ No newline at end of file From 433f5d2c992db3fd8d4d12d6b804c4ec2b0bc579 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Wed, 20 Mar 2024 15:14:59 +0530 Subject: [PATCH 33/43] reloading img from settings added --- backend/db_repo.py | 1 + ui_components/components/animate_shot_page.py | 33 +- .../components/video_rendering_page.py | 249 +++- .../methods/animation_style_methods.py | 439 +++++-- ui_components/setup.py | 5 - .../widgets/animation_style_element.py | 1139 ----------------- .../widgets/sm_animation_style_element.py | 603 +++++++++ .../widgets/variant_comparison_grid.py | 7 +- 8 files changed, 1176 insertions(+), 1300 deletions(-) delete mode 100644 ui_components/widgets/animation_style_element.py create mode 100644 ui_components/widgets/sm_animation_style_element.py diff --git a/backend/db_repo.py b/backend/db_repo.py index b7e71d08..15969e16 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -194,6 +194,7 @@ def get_all_file_list(self, **kwargs): kwargs['project_id'] = project.id + # hackish sol: you can pass custom params as long as 'page' is not in the kwargs if 'page' in kwargs and kwargs['page']: page = kwargs['page'] del kwargs['page'] diff --git a/ui_components/components/animate_shot_page.py b/ui_components/components/animate_shot_page.py index 4e56d10f..76956436 100644 --- a/ui_components/components/animate_shot_page.py +++ b/ui_components/components/animate_shot_page.py @@ -1,29 +1,44 @@ +import json import streamlit as st -from ui_components.widgets.frame_selector import frame_selector_widget, frame_view +from shared.constants import InternalFileType +from ui_components.components.video_rendering_page import sm_video_rendering_page +from ui_components.models import InternalShotObject +from ui_components.widgets.frame_selector import frame_selector_widget from ui_components.widgets.variant_comparison_grid import variant_comparison_grid -from ui_components.widgets.animation_style_element import animation_style_element from utils.data_repo.data_repo import DataRepo from ui_components.widgets.sidebar_logger import sidebar_logger def animate_shot_page(shot_uuid: str, h2): data_repo = DataRepo() shot = data_repo.get_shot_from_uuid(shot_uuid) + st.session_state['project_uuid'] = str(shot.project.uuid) + with st.sidebar: frame_selector_widget(show_frame_selector=False) st.write("") with st.expander("🔍 Generation log", expanded=True): - # if st_memory.toggle("Open", value=True, key="generaton_log_toggle"): - sidebar_logger(st.session_state["shot_uuid"]) + sidebar_logger(shot_uuid) st.write("") - # frame_view(view='Video',show_current_frames=False) st.markdown(f"#### :green[{st.session_state['main_view_type']}] > :red[{st.session_state['page']}] > :blue[{shot.name}]") st.markdown("***") - variant_comparison_grid(st.session_state['shot_uuid'], stage="Shots") - animation_style_element(st.session_state['shot_uuid']) + selected_variant = variant_comparison_grid(shot_uuid, stage="Shots") + file_uuid_list = [] + # loading images from a particular video variant + if selected_variant: + log = data_repo.get_inference_log_from_uuid(selected_variant) + shot_data = json.loads(log.input_params) + file_uuid_list = shot_data.get('origin_data', json.dumps({})).get('settings', {}).get('file_uuid_list', []) + # picking current images if no variant is selected + else: + for timing in shot.timing_list: + if timing.primary_image and timing.primary_image.location: + file_uuid_list.append(timing.primary_image.uuid) + + img_list = data_repo.get_all_file_list(uuid__in=file_uuid_list, file_type=InternalFileType.IMAGE.value)[0] + sm_video_rendering_page(shot_uuid, img_list) - st.markdown("***") - \ No newline at end of file + st.markdown("***") \ No newline at end of file diff --git a/ui_components/components/video_rendering_page.py b/ui_components/components/video_rendering_page.py index 0ae111a8..6ee55f14 100644 --- a/ui_components/components/video_rendering_page.py +++ b/ui_components/components/video_rendering_page.py @@ -1,72 +1,209 @@ from typing import List -import datetime +import time import streamlit as st -from shared.constants import InternalFileTag, InternalFileType -import random +from shared.constants import AnimationStyleType, AnimationToolType import time -import os -import re -from ui_components.methods.video_methods import render_video -from ui_components.models import InternalFileObject, InternalFrameTimingObject -from ui_components.widgets.attach_audio_element import attach_audio_element - -from ui_components.widgets.display_element import individual_video_display_element +from ui_components.widgets.sm_animation_style_element import animation_sidebar, individual_frame_settings_element, \ + select_motion_lora_element, select_sd_model_element, video_motion_settings +from ui_components.models import InternalFileObject, InternalShotObject +from ui_components.methods.animation_style_methods import toggle_generate_inference, transform_data, \ + update_session_state_with_animation_details +from ui_components.methods.video_methods import create_single_interpolated_clip from utils.data_repo.data_repo import DataRepo - -def video_rendering_page(project_uuid): +default_model = "Deliberate_v2.safetensors" +def sm_video_rendering_page(shot_uuid, img_list: List[InternalFileObject]): data_repo = DataRepo() - parody_movie_names = ["The_Lord_of_the_Onion_Rings", "Jurassic_Pork", "Harry_Potter_and_the_Sorcerer_s_Kidney_Stone", "Star_Wars_The_Phantom_of_the_Oprah", "The_Silence_of_the_Yams", "The_Hunger_Pains", "Free_Willy_Wonka_and_the_Chocolate_Factory", "The_Da_Vinci_Chode", "Forrest_Dump", "The_Shawshank_Inebriation", "A_Clockwork_Orange_Juice", "The_Big_Lebowski_2_Dude_Where_s_My_Car", "The_Princess_Diaries_The_Dark_Knight_Rises", "Eternal_Sunshine_of_the_Spotless_Behind", "Rebel_Without_a_Clue", "The_Terminal_Dentist", "Dr_Strangelove_or_How_I_Learned_to_Stop_Worrying_and_Love_the_Bombastic", "The_Wolf_of_Sesame_Street", "The_Good_the_Bad_and_the_Fluffy", "The_Sound_of_Mucus", "Back_to_the_Fuchsia", "The_Curious_Case_of_Benjamin_s_Button", "The_Fellowship_of_the_Bing", "The_Texas_Chainsaw_Manicure", "The_Iron_Manatee", "Night_of_the_Living_Bread", "Indiana_Jones_and_the_Temple_of_Groom", "Kill_Billiards", "The_Bourne_Redundancy", "The_SpongeBob_SquarePants_Movie_Sponge_Out_of_Water_and_Ideas", - "Planet_of_the_Snapes", "No_Country_for_Old_Yentas", "The_Expendable_Accountant", "The_Terminal_Illness", "A_Streetcar_Named_Retire", "The_Secret_Life_of_Walter_s_Mitty", "The_Hunger_Games_Catching_Foam", "The_Godfather_Part_Time_Job", "How_To_Kill_a_Mockingbird", "Star_Trek_III_The_Search_for_Spock_s_Missing_Sock", "Gone_with_the_Wind_Chimes", "Dr_No_Clue", "Ferris_Bueller_s_Day_Off_Sick", "Monty_Python_and_the_Holy_Fail", "A_Fistful_of_Quarters", "Willy_Wonka_and_the_Chocolate_Heartburn", "The_Good_the_Bad_and_the_Dandruff", "The_Princess_Bride_of_Frankenstein", "The_Wizard_of_Bras", "Pulp_Friction", "Die_Hard_with_a_Clipboard", "Indiana_Jones_and_the_Last_Audit", "Finding_Nemoy", "The_Silence_of_the_Lambs_The_Musical", "Titanic_2_The_Iceberg_Strikes_Back", "Fast_Times_at_Ridgemont_Mortuary", "The_Graduate_But_Only_Because_He_Has_an_Advanced_Degree", "Beauty_and_the_Yeast", "The_Blair_Witch_Takes_Manhattan", "Reservoir_Bitches", "Die_Hard_with_a_Pension"] - random_name = random.choice(parody_movie_names) + shot: InternalShotObject = data_repo.get_shot_from_uuid(shot_uuid) - st.markdown("#### Video Rendering") - st.markdown("***") - final_video_name = st.text_input( - "What would you like to name this video?", value=random_name) - - attach_audio_element(project_uuid, True) + st.markdown("### 🎥 Generate animations") + st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") + + settings = { + 'animation_tool': AnimationToolType.ANIMATEDIFF.value, + } + shot_meta_data = {} - if st.button("Render New Video"): - status = render_video(final_video_name, project_uuid, InternalFileTag.COMPLETE_GENERATED_VIDEO.value) - if status: - st.success("Video rendered!") - time.sleep(0.5) - st.rerun() + with st.container(): + col1, _, _ = st.columns([1.0,1.5, 1.0]) - st.markdown("***") + # ----------- INDIVIDUAL FRAME SETTINGS ----------- + strength_of_frames, distances_to_next_frames,\ + speeds_of_transitions, freedoms_between_frames,\ + individual_prompts, individual_negative_prompts,\ + motions_during_frames = individual_frame_settings_element(shot_uuid, img_list, col1) - # TODO: only show completed videos - video_list, _ = data_repo.get_all_file_list( - file_type=InternalFileType.VIDEO.value, - tag=InternalFileTag.COMPLETE_GENERATED_VIDEO.value, - project_id=project_uuid - ) - video_list = sorted(video_list, key=lambda x: x.created_on, reverse=True) - - for video in video_list: - st.subheader(video.name) + # ----------- SELECT SD MODEL ----------- + sd_model, model_files = select_sd_model_element(shot_uuid, default_model) + + # ----------- SELECT MOTION LORA ------------ + lora_data = select_motion_lora_element(shot_uuid, model_files) + + # ----------- OTHER SETTINGS ------------ + strength_of_adherence, overall_positive_prompt, \ + overall_negative_prompt, type_of_motion_context = video_motion_settings(shot_uuid, img_list) + + type_of_frame_distribution = "dynamic" + type_of_key_frame_influence = "dynamic" + type_of_strength_distribution = "dynamic" + linear_frame_distribution_value = 16 + linear_key_frame_influence_value = 1.0 + linear_cn_strength_value = 1.0 + relative_ipadapter_strength = 1.0 + relative_cn_strength = 0.0 + project_settings = data_repo.get_project_setting(shot.project.uuid) + width = project_settings.width + height = project_settings.height + img_dimension = f"{width}x{height}" + motion_scale = 1.3 + interpolation_style = 'ease-in-out' + buffer = 4 - try: - st.write(datetime.datetime.fromisoformat(video.created_on)) - except Exception as e: - st.write(datetime.datetime.strptime(video.created_on, '%Y-%m-%dT%H:%M:%S.%fZ')) + (dynamic_strength_values, dynamic_key_frame_influence_values, dynamic_frame_distribution_values, + context_length, context_stride, context_overlap, multipled_base_end_percent, multipled_base_adapter_strength, + prompt_travel, negative_prompt_travel, motion_scales) = transform_data( + strength_of_frames, + freedoms_between_frames, + speeds_of_transitions, + distances_to_next_frames, + type_of_motion_context, + strength_of_adherence, + individual_prompts, + individual_negative_prompts, + buffer, + motions_during_frames + ) - individual_video_display_element(video) + settings.update( + ckpt=sd_model, + width=width, + height=height, + buffer=4, + motion_scale=motion_scale, + motion_scales=motion_scales, + image_dimension=img_dimension, + output_format="video/h264-mp4", + prompt=overall_positive_prompt, + negative_prompt=overall_negative_prompt, + interpolation_type=interpolation_style, + stmfnet_multiplier=2, + relative_ipadapter_strength=relative_ipadapter_strength, + relative_cn_strength=relative_cn_strength, + type_of_strength_distribution=type_of_strength_distribution, + linear_strength_value=str(linear_cn_strength_value), + dynamic_strength_values=str(dynamic_strength_values), + linear_frame_distribution_value=linear_frame_distribution_value, + dynamic_frame_distribution_values=dynamic_frame_distribution_values, + type_of_frame_distribution=type_of_frame_distribution, + type_of_key_frame_influence=type_of_key_frame_influence, + linear_key_frame_influence_value=float(linear_key_frame_influence_value), + dynamic_key_frame_influence_values=dynamic_key_frame_influence_values, + normalise_speed=True, + ipadapter_noise=0.3, + animation_style=AnimationStyleType.CREATIVE_INTERPOLATION.value, + context_length=context_length, + context_stride=context_stride, + context_overlap=context_overlap, + multipled_base_end_percent=multipled_base_end_percent, + multipled_base_adapter_strength=multipled_base_adapter_strength, + individual_prompts=prompt_travel, + individual_negative_prompts=negative_prompt_travel, + animation_stype=AnimationStyleType.CREATIVE_INTERPOLATION.value, + max_frames=str(dynamic_frame_distribution_values[-1]), + lora_data=lora_data, + shot_data=shot_meta_data + ) + + position = "generate_vid" + st.markdown("***") + st.markdown("##### Generation Settings") - col1, col2 = st.columns(2) + animate_col_1, animate_col_2, _ = st.columns([3, 1, 1]) + with animate_col_1: + variant_count = st.number_input("How many variants?", min_value=1, max_value=5, value=1, step=1, key="variant_count") + + if "generate_vid_generate_inference" in st.session_state and st.session_state["generate_vid_generate_inference"]: + # last keyframe position * 16 + duration = float(dynamic_frame_distribution_values[-1] / 16) + data_repo.update_shot(uuid=shot_uuid, duration=duration) + shot_data = update_session_state_with_animation_details( + shot_uuid, + img_list, + strength_of_frames, + distances_to_next_frames, + speeds_of_transitions, + freedoms_between_frames, + motions_during_frames, + individual_prompts, + individual_negative_prompts, + lora_data, + default_model + ) + settings.update(shot_data=shot_data) + vid_quality = "full" + st.success("Generating clip - see status in the Generation Log in the sidebar. Press 'Refresh log' to update.") - with col1: - if st.checkbox(f"Confirm {video.name} Deletion"): - if st.button(f"Delete {video.name}"): - # removing locally - video_path = "videos/" + project_uuid + "/assets/videos/2_completed/" + video.name - if os.path.exists(video_path): - os.remove(video_path) + positive_prompt = "" + append_to_prompt = "" + for idx, img in enumerate(img_list): + if img.location: + b =img.inference_params + prompt = b.get("prompt", "") if b else "" + prompt += append_to_prompt + frame_prompt = f"{idx * linear_frame_distribution_value}_" + prompt + positive_prompt += ":" + frame_prompt if positive_prompt else frame_prompt + else: + st.error("Please generate primary images") + time.sleep(0.7) + st.rerun() + + if f'{shot_uuid}_backlog_enabled' not in st.session_state: + st.session_state[f'{shot_uuid}_backlog_enabled'] = False - # removing from database - data_repo.delete_file_from_uuid(video.uuid) + create_single_interpolated_clip( + shot_uuid, + vid_quality, + settings, + variant_count, + st.session_state[f'{shot_uuid}_backlog_enabled'] + ) + + backlog_update = {f'{shot_uuid}_backlog_enabled': False} + toggle_generate_inference(position, **backlog_update) + st.rerun() + + btn1, btn2, _ = st.columns([1, 1, 1]) + backlog_no_update = {f'{shot_uuid}_backlog_enabled': False} + with btn1: + help = "" + st.button("Add to queue", key="generate_animation_clip", disabled=False, help=help, \ + on_click=lambda: toggle_generate_inference(position, **backlog_no_update), type="primary", use_container_width=True) + + backlog_update = {f'{shot_uuid}_backlog_enabled': True} + with btn2: + backlog_help = "This will add the new video generation in the backlog" + st.button("Add to backlog", key="generate_animation_clip_backlog", disabled=False, help=backlog_help, \ + on_click=lambda: toggle_generate_inference(position, **backlog_update), type="secondary") - st.rerun() - else: - st.button(f"Delete {video.name}", disabled=True) + # --------------- SIDEBAR --------------------- + animation_sidebar( + shot_uuid, + img_list, + type_of_frame_distribution, + dynamic_frame_distribution_values, + linear_frame_distribution_value, + type_of_strength_distribution, + dynamic_strength_values, + linear_cn_strength_value, + type_of_key_frame_influence, + dynamic_key_frame_influence_values, + linear_key_frame_influence_value, + strength_of_frames, + distances_to_next_frames, + speeds_of_transitions, + freedoms_between_frames, + motions_during_frames, + individual_prompts, + individual_negative_prompts, + default_model + ) \ No newline at end of file diff --git a/ui_components/methods/animation_style_methods.py b/ui_components/methods/animation_style_methods.py index bfdfd376..72d050d2 100644 --- a/ui_components/methods/animation_style_methods.py +++ b/ui_components/methods/animation_style_methods.py @@ -1,9 +1,14 @@ import json +import os import time +from typing import List import streamlit as st +from backend.models import InternalFileObject from shared.constants import InferenceParamType from ui_components.constants import DEFAULT_SHOT_MOTION_VALUES, ShotMetaData from utils.data_repo.data_repo import DataRepo +import numpy as np +import matplotlib.pyplot as plt def get_generation_settings_from_log(log_uuid=None): @@ -26,15 +31,17 @@ def load_shot_settings(shot_uuid, log_uuid=None): if not log_uuid: shot_meta_data = shot.meta_data_dict.get(ShotMetaData.MOTION_DATA.value, json.dumps({})) shot_meta_data = json.loads(shot_meta_data) + st.session_state[f"{shot_uuid}_selected_variant_log_uuid"] = None # loading settings from that particular log else: shot_meta_data = get_generation_settings_from_log(log_uuid) + st.session_state[f"{shot_uuid}_selected_variant_log_uuid"] = log_uuid if shot_meta_data: # updating timing data timing_data = shot_meta_data.get("timing_data", []) - for idx, _ in enumerate(shot.timing_list): + for idx, _ in enumerate(shot.timing_list): # fix: check how the image list is being stored here and use that instead # setting default parameters (fetching data from the shot if it's present) if timing_data and len(timing_data) >= idx + 1: motion_data = timing_data[idx] @@ -48,120 +55,372 @@ def load_shot_settings(shot_uuid, log_uuid=None): st.session_state[key] = main_setting_data[key] st.rerun() else: - for idx, _ in enumerate(shot.timing_list): + for idx, _ in enumerate(shot.timing_list): # fix: check how the image list is being stored here for k, v in DEFAULT_SHOT_MOTION_VALUES.items(): st.session_state[f"{k}_{shot_uuid}_{idx}"] = v - -def reverse_data_transformation(dynamic_strength_values, dynamic_key_frame_influence_values, dynamic_frame_distribution_values, context_length, context_stride, context_overlap, multipled_base_end_percent, formatted_individual_prompts, formatted_individual_negative_prompts, formatted_motions, buffer): - def reverse_transform(dynamic_strength_values, dynamic_key_frame_influence_values, dynamic_frame_distribution_values): +def format_frame_prompts_with_buffer(frame_numbers, individual_prompts, buffer): + adjusted_frame_numbers = [frame + buffer for frame in frame_numbers] + + # Preprocess prompts to remove any '/' or '"' from the values + processed_prompts = [prompt.replace("/", "").replace('"', '') for prompt in individual_prompts] + + # Format the adjusted frame numbers and processed prompts + formatted = ', '.join(f'"{int(frame)}": "{prompt}"' for frame, prompt in zip(adjusted_frame_numbers, processed_prompts)) + return formatted - # Reconstructing strength_of_frames - strength_of_frames = [strength for _, strength, _ in dynamic_strength_values] - - # Reconstructing freedoms_between_frames (correctly as movements_between_frames) - freedoms_between_frames = [] - for i in range(1, len(dynamic_strength_values)): - if dynamic_strength_values[i][0] is not None: - middle_value = dynamic_strength_values[i][1] - adjusted_value = dynamic_strength_values[i][0] - relative_value = (middle_value - adjusted_value) / middle_value - freedoms_between_frames.append(round(relative_value, 2)) # Ensure proper rounding - - # Reconstructing speeds_of_transitions with correct rounding - speeds_of_transitions = [] - for current, next_ in dynamic_key_frame_influence_values[:-1]: - if next_ is not None: - inverted_speed = next_ / 2 - original_speed = 1.0 - inverted_speed - speeds_of_transitions.append(round(original_speed, 2)) # Ensure proper rounding - - # Reconstructing distances_to_next_frames with exact values - distances_to_next_frames = [] - for i in range(1, len(dynamic_frame_distribution_values)): - distances_to_next_frames.append(dynamic_frame_distribution_values[i] - dynamic_frame_distribution_values[i-1]) +def plot_weights(weights_list, frame_numbers_list): + plt.figure(figsize=(12, 6)) + for i, weights in enumerate(weights_list): + frame_numbers = frame_numbers_list[i] + plt.plot(frame_numbers, weights, label=f'Frame {i + 1}') + + # Plot settings + plt.xlabel('Frame Number') + plt.ylabel('Weight') + plt.legend() + plt.ylim(0, 1.0) + plt.show() + st.set_option('deprecation.showPyplotGlobalUse', False) + st.pyplot() + +def calculate_weights(keyframe_positions, strength_values, buffer, key_frame_influence_values,last_key_frame_position): + def calculate_influence_frame_number(key_frame_position, next_key_frame_position, distance): + # Calculate the absolute distance between key frames + key_frame_distance = abs(next_key_frame_position - key_frame_position) - return strength_of_frames,freedoms_between_frames, speeds_of_transitions - - def identify_type_of_motion_context(context_length, context_stride, context_overlap): - # Given the context settings, identify the type of motion context - if context_stride == 1 and context_overlap == 2: - return "Low" - elif context_stride == 2 and context_overlap == 4: - return "Standard" - elif context_stride == 4 and context_overlap == 4: - return "High" + # Apply the distance multiplier + extended_distance = key_frame_distance * distance + + # Determine the direction of influence based on the positions of the key frames + if key_frame_position < next_key_frame_position: + # Normal case: influence extends forward + influence_frame_number = key_frame_position + extended_distance else: - return "Unknown" # Fallback case if the inputs do not match expected values + # Reverse case: influence extends backward + influence_frame_number = key_frame_position - extended_distance - def calculate_strength_of_adherence(multipled_base_end_percent): - return multipled_base_end_percent / (0.05 * 10) + # Return the result rounded to the nearest integer + return round(influence_frame_number) - def reverse_frame_prompts_formatting(formatted_prompts): - # Extract frame number and prompt pairs using a regular expression - prompt_pairs = re.findall(r'\"(\d+\.\d+)\":\s*\"(.*?)\"', formatted_prompts) - - # Initialize an empty list to collect prompts - original_prompts = [prompt for frame, prompt in prompt_pairs] + def find_curve(batch_index_from, batch_index_to, strength_from, strength_to, interpolation,revert_direction_at_midpoint, last_key_frame_position,i, number_of_items,buffer): + # Initialize variables based on the position of the keyframe + range_start = batch_index_from + range_end = batch_index_to + # if it's the first value, set influence range from 1.0 to 0.0 + if i == number_of_items - 1: + range_end = last_key_frame_position + + steps = range_end - range_start + diff = strength_to - strength_from + + # Calculate index for interpolation + index = np.linspace(0, 1, steps // 2 + 1) if revert_direction_at_midpoint else np.linspace(0, 1, steps) + + # Calculate weights based on interpolation type + if interpolation == "linear": + weights = np.linspace(strength_from, strength_to, len(index)) + elif interpolation == "ease-in": + weights = diff * np.power(index, 2) + strength_from + elif interpolation == "ease-out": + weights = diff * (1 - np.power(1 - index, 2)) + strength_from + elif interpolation == "ease-in-out": + weights = diff * ((1 - np.cos(index * np.pi)) / 2) + strength_from - return original_prompts + if revert_direction_at_midpoint: + weights = np.concatenate([weights, weights[::-1]]) + + # Generate frame numbers + frame_numbers = np.arange(range_start, range_start + len(weights)) + + # "Dropper" component: For keyframes with negative start, drop the weights + if range_start < 0 and i > 0: + drop_count = abs(range_start) + weights = weights[drop_count:] + frame_numbers = frame_numbers[drop_count:] + + # Dropper component: for keyframes a range_End is greater than last_key_frame_position, drop the weights + if range_end > last_key_frame_position and i < number_of_items - 1: + drop_count = range_end - last_key_frame_position + weights = weights[:-drop_count] + frame_numbers = frame_numbers[:-drop_count] + return weights, frame_numbers + + weights_list = [] + frame_numbers_list = [] + + for i in range(len(keyframe_positions)): + keyframe_position = keyframe_positions[i] + interpolation = "ease-in-out" + # strength_from = strength_to = 1.0 - def reverse_motion_strengths_formatting(formatted_motions, buffer): - # Extract frame number and motion strength pairs using a regular expression - motion_pairs = re.findall(r'(\d+):\((.*?)\)', formatted_motions) + if i == 0: # first image + # GET IMAGE AND KEYFRAME INFLUENCE VALUES + key_frame_influence_from, key_frame_influence_to = key_frame_influence_values[i] + start_strength, mid_strength, end_strength = strength_values[i] + keyframe_position = keyframe_positions[i] + next_key_frame_position = keyframe_positions[i+1] + batch_index_from = keyframe_position + batch_index_to_excl = calculate_influence_frame_number(keyframe_position, next_key_frame_position, key_frame_influence_to) + weights, frame_numbers = find_curve(batch_index_from, batch_index_to_excl, mid_strength, end_strength, interpolation, False, last_key_frame_position, i, len(keyframe_positions), buffer) + # interpolation = "ease-in" - # Convert extracted pairs back to the original format, adjusting frame numbers - original_motions = [] - for frame, strength in motion_pairs: - original_frame = int(frame) - buffer # Subtract buffer to get original frame number - original_strength = float(strength) # Convert strength back to float - # Ensure the motion is appended in the correct order based on original frame numbers - original_motions.append(original_strength) + elif i == len(keyframe_positions) - 1: # last image + # GET IMAGE AND KEYFRAME INFLUENCE VALUES + key_frame_influence_from,key_frame_influence_to = key_frame_influence_values[i] + start_strength, mid_strength, end_strength = strength_values[i] + # strength_from, strength_to = cn_strength_values[i-1] + keyframe_position = keyframe_positions[i] + previous_key_frame_position = keyframe_positions[i-1] + batch_index_from = calculate_influence_frame_number(keyframe_position, previous_key_frame_position, key_frame_influence_from) + batch_index_to_excl = keyframe_position + weights, frame_numbers = find_curve(batch_index_from, batch_index_to_excl, start_strength, mid_strength, interpolation, False, last_key_frame_position, i, len(keyframe_positions), buffer) + # interpolation = "ease-out" - return original_motions - + else: # middle images + # GET IMAGE AND KEYFRAME INFLUENCE VALUES + key_frame_influence_from,key_frame_influence_to = key_frame_influence_values[i] + start_strength, mid_strength, end_strength = strength_values[i] + keyframe_position = keyframe_positions[i] + + # CALCULATE WEIGHTS FOR FIRST HALF + previous_key_frame_position = keyframe_positions[i-1] + batch_index_from = calculate_influence_frame_number(keyframe_position, previous_key_frame_position, key_frame_influence_from) + batch_index_to_excl = keyframe_position + first_half_weights, first_half_frame_numbers = find_curve(batch_index_from, batch_index_to_excl, start_strength, mid_strength, interpolation, False, last_key_frame_position, i, len(keyframe_positions), buffer) + + # CALCULATE WEIGHTS FOR SECOND HALF + next_key_frame_position = keyframe_positions[i+1] + batch_index_from = keyframe_position + batch_index_to_excl = calculate_influence_frame_number(keyframe_position, next_key_frame_position, key_frame_influence_to) + second_half_weights, second_half_frame_numbers = find_curve(batch_index_from, batch_index_to_excl, mid_strength, end_strength, interpolation, False, last_key_frame_position, i, len(keyframe_positions), buffer) + + # COMBINE FIRST AND SECOND HALF + weights = np.concatenate([first_half_weights, second_half_weights]) + frame_numbers = np.concatenate([first_half_frame_numbers, second_half_frame_numbers]) + + weights_list.append(weights) + frame_numbers_list.append(frame_numbers) + + return weights_list, frame_numbers_list + - def safe_eval(input_data): - if isinstance(input_data, str): +def extract_influence_values(type_of_key_frame_influence, dynamic_key_frame_influence_values, keyframe_positions, linear_key_frame_influence_value): + # Check and convert linear_key_frame_influence_value if it's a float or string float + # if it's a string that starts with a parenthesis, convert it to a tuple + if isinstance(linear_key_frame_influence_value, str) and linear_key_frame_influence_value[0] == "(": + linear_key_frame_influence_value = eval(linear_key_frame_influence_value) + + if not isinstance(linear_key_frame_influence_value, tuple): + if isinstance(linear_key_frame_influence_value, (float, str)): try: - return ast.literal_eval(input_data) + value = float(linear_key_frame_influence_value) + linear_key_frame_influence_value = (value, value) except ValueError: - # Handle the case where the string cannot be parsed - return input_data + raise ValueError("linear_key_frame_influence_value must be a float or a string representing a float") + + number_of_outputs = len(keyframe_positions) + if type_of_key_frame_influence == "dynamic": + # Convert list of individual float values into tuples + if all(isinstance(x, float) for x in dynamic_key_frame_influence_values): + dynamic_values = [(value, value) for value in dynamic_key_frame_influence_values] + elif isinstance(dynamic_key_frame_influence_values[0], str) and dynamic_key_frame_influence_values[0] == "(": + string_representation = ''.join(dynamic_key_frame_influence_values) + dynamic_values = eval(f'[{string_representation}]') else: - return input_data + dynamic_values = dynamic_key_frame_influence_values if isinstance(dynamic_key_frame_influence_values, list) else [dynamic_key_frame_influence_values] + return dynamic_values[:number_of_outputs] + else: + return [linear_key_frame_influence_value for _ in range(number_of_outputs)] - dynamic_strength_values = safe_eval(dynamic_strength_values) - dynamic_key_frame_influence_values = safe_eval(dynamic_key_frame_influence_values) - dynamic_frame_distribution_values = safe_eval(dynamic_frame_distribution_values) - context_length = int(context_length) - context_stride = int(context_stride) - context_overlap = int(context_overlap) - multipled_base_end_percent = float(multipled_base_end_percent) +def extract_strength_values(type_of_key_frame_influence, dynamic_key_frame_influence_values, keyframe_positions, linear_key_frame_influence_value): + if type_of_key_frame_influence == "dynamic": + # Process the dynamic_key_frame_influence_values depending on its format + if isinstance(dynamic_key_frame_influence_values, str): + dynamic_values = eval(dynamic_key_frame_influence_values) + else: + dynamic_values = dynamic_key_frame_influence_values - # Step 1: Reverse dynamic_strength_values and dynamic_key_frame_influence_values + # Iterate through the dynamic values and convert tuples with two values to three values + dynamic_values_corrected = [] + for value in dynamic_values: + if len(value) == 2: + value = (value[0], value[1], value[0]) + dynamic_values_corrected.append(value) - strength_of_frames, freedoms_between_frames, speeds_of_transitions = reverse_transform(dynamic_strength_values, dynamic_key_frame_influence_values, dynamic_frame_distribution_values) - - # Step 2: Reverse dynamic_frame_distribution_values to distances_to_next_frames - distances_to_next_frames = [round((dynamic_frame_distribution_values[i] - dynamic_frame_distribution_values[i-1]) / 16, 2) for i in range(1, len(dynamic_frame_distribution_values))] - - # Step 3: Identify type_of_motion_context - type_of_motion_context = identify_type_of_motion_context(context_length, context_stride, context_overlap) + return dynamic_values_corrected + else: + # Process for linear or other types + if len(linear_key_frame_influence_value) == 2: + linear_key_frame_influence_value = (linear_key_frame_influence_value[0], linear_key_frame_influence_value[1], linear_key_frame_influence_value[0]) + return [linear_key_frame_influence_value for _ in range(len(keyframe_positions) - 1)] + + +def get_keyframe_positions(type_of_frame_distribution, dynamic_frame_distribution_values, images, linear_frame_distribution_value): + if type_of_frame_distribution == "dynamic": + if isinstance(dynamic_frame_distribution_values, str): + # Sort the keyframe positions in numerical order + return sorted([int(kf.strip()) for kf in dynamic_frame_distribution_values.split(',')]) + elif isinstance(dynamic_frame_distribution_values, list): + return sorted(dynamic_frame_distribution_values) + else: + # Calculate the number of keyframes based on the total duration and linear_frames_per_keyframe + return [i * linear_frame_distribution_value for i in range(len(images))] + +def toggle_generate_inference(position, **kwargs): + for k,v in kwargs.items(): + st.session_state[k] = v + if position + '_generate_inference' not in st.session_state: + st.session_state[position + '_generate_inference'] = True + else: + st.session_state[position + '_generate_inference'] = not st.session_state[position + '_generate_inference'] + +def transform_data(strength_of_frames, movements_between_frames, speeds_of_transitions, distances_to_next_frames, type_of_motion_context, strength_of_adherence, individual_prompts, individual_negative_prompts, buffer, motions_during_frames): + # FRAME SETTINGS + def adjust_and_invert_relative_value(middle_value, relative_value): + if relative_value is not None: + adjusted_value = middle_value * relative_value + return round(middle_value - adjusted_value, 2) + return None + + def invert_value(value): + return round(1.0 - value, 2) if value is not None else None + + # Creating output_strength with relative and inverted start and end values + output_strength = [] + for i, strength in enumerate(strength_of_frames): + start_value = None if i == 0 else movements_between_frames[i - 1] + end_value = None if i == len(strength_of_frames) - 1 else movements_between_frames[i] + + # Adjusting and inverting start and end values relative to the middle value + adjusted_start = adjust_and_invert_relative_value(strength, start_value) + adjusted_end = adjust_and_invert_relative_value(strength, end_value) + + output_strength.append((adjusted_start, strength, adjusted_end)) + + # Creating output_speeds with inverted values + output_speeds = [(None, None) for _ in range(len(speeds_of_transitions) + 1)] + for i in range(len(speeds_of_transitions)): + current_tuple = list(output_speeds[i]) + next_tuple = list(output_speeds[i + 1]) + + inverted_speed = invert_value(speeds_of_transitions[i]) + current_tuple[1] = inverted_speed * 2 + next_tuple[0] = inverted_speed * 2 + + output_speeds[i] = tuple(current_tuple) + output_speeds[i + 1] = tuple(next_tuple) + + # Creating cumulative_distances + cumulative_distances = [0] + for distance in distances_to_next_frames: + cumulative_distances.append(cumulative_distances[-1] + distance) + + cumulative_distances = [int(float(value) * 16) for value in cumulative_distances] + + # MOTION CONTEXT SETTINGS + if type_of_motion_context == "Low": + context_length = 16 + context_stride = 1 + context_overlap = 2 + + elif type_of_motion_context == "Standard": + context_length = 16 + context_stride = 2 + context_overlap = 4 - # Step 4: Calculate strength_of_adherence from multipled_base_end_percent - strength_of_adherence = calculate_strength_of_adherence(multipled_base_end_percent) + elif type_of_motion_context == "High": + context_length = 16 + context_stride = 4 + context_overlap = 4 + + # SPARSE CTRL SETTINGS + multipled_base_end_percent = 0.05 * (strength_of_adherence * 10) + multipled_base_adapter_strength = 0.05 * (strength_of_adherence * 20) + + # FRAME PROMPTS FORMATTING + def format_frame_prompts_with_buffer(frame_numbers, individual_prompts, buffer): + adjusted_frame_numbers = [frame + buffer for frame in frame_numbers] + + # Preprocess prompts to remove any '/' or '"' from the values + processed_prompts = [prompt.replace("/", "").replace('"', '') for prompt in individual_prompts] + + # Format the adjusted frame numbers and processed prompts + formatted = ', '.join(f'"{int(frame)}": "{prompt}"' for frame, prompt in zip(adjusted_frame_numbers, processed_prompts)) + return formatted + + # Applying format_frame_prompts_with_buffer + formatted_individual_prompts = format_frame_prompts_with_buffer(cumulative_distances, individual_prompts, buffer) + formatted_individual_negative_prompts = format_frame_prompts_with_buffer(cumulative_distances, individual_negative_prompts, buffer) + + # MOTION STRENGTHS FORMATTING + adjusted_frame_numbers = [0] + [frame + buffer for frame in cumulative_distances[1:]] - # Step 5: Reverse frame prompts formatting + # Format the adjusted frame numbers and strengths + motions_during_frames = ', '.join(f'{int(frame)}:({strength})' for frame, strength in zip(adjusted_frame_numbers, motions_during_frames)) + + return output_strength, output_speeds, cumulative_distances, context_length, context_stride, context_overlap, multipled_base_end_percent, multipled_base_adapter_strength, formatted_individual_prompts, formatted_individual_negative_prompts,motions_during_frames - individual_prompts = reverse_frame_prompts_formatting(formatted_individual_prompts) +def update_session_state_with_animation_details(shot_uuid, img_list: List[InternalFileObject], strength_of_frames, distances_to_next_frames, speeds_of_transitions, freedoms_between_frames, motions_during_frames, individual_prompts, individual_negative_prompts, lora_data, default_model): + data_repo = DataRepo() + shot = data_repo.get_shot_from_uuid(shot_uuid) + meta_data = shot.meta_data_dict + timing_data = [] + for idx, img in enumerate(img_list): + if idx < len(img_list): + st.session_state[f'strength_of_frame_{shot_uuid}_{idx}'] = strength_of_frames[idx] + st.session_state[f'individual_prompt_{shot_uuid}_{idx}'] = individual_prompts[idx] + st.session_state[f'individual_negative_prompt_{shot_uuid}_{idx}'] = individual_negative_prompts[idx] + st.session_state[f'motion_during_frame_{shot_uuid}_{idx}'] = motions_during_frames[idx] + if idx < len(img_list) - 1: + st.session_state[f'distance_to_next_frame_{shot_uuid}_{idx}'] = distances_to_next_frames[idx] * 2 + st.session_state[f'speed_of_transition_{shot_uuid}_{idx}'] = speeds_of_transitions[idx] + st.session_state[f'freedom_between_frames_{shot_uuid}_{idx}'] = freedoms_between_frames[idx] - individual_negative_prompts = reverse_frame_prompts_formatting(formatted_individual_negative_prompts) + # adding into the meta-data + state_data = { + "strength_of_frame" : strength_of_frames[idx], + "individual_prompt" : individual_prompts[idx], + "individual_negative_prompt" : individual_negative_prompts[idx], + "motion_during_frame" : motions_during_frames[idx], + "distance_to_next_frame" : distances_to_next_frames[idx] * 2 if idx < len(img_list) - 1 else DEFAULT_SHOT_MOTION_VALUES["distance_to_next_frame"], + "speed_of_transition" : speeds_of_transitions[idx] if idx < len(img_list) - 1 else DEFAULT_SHOT_MOTION_VALUES["speed_of_transition"], + "freedom_between_frames" : freedoms_between_frames[idx] if idx < len(img_list) - 1 else DEFAULT_SHOT_MOTION_VALUES["freedom_between_frames"], + } - # Step 6: Reverse motion strengths formatting - motions_during_frames = reverse_motion_strengths_formatting(formatted_motions, buffer) + timing_data.append(state_data) - return strength_of_frames, freedoms_between_frames, speeds_of_transitions, distances_to_next_frames, type_of_motion_context, strength_of_adherence, individual_prompts, individual_negative_prompts, motions_during_frames + main_setting_data = {} + main_setting_data[f'lora_data_{shot.uuid}'] = lora_data + main_setting_data[f"strength_of_adherence_value_{shot.uuid}"] = st.session_state["strength_of_adherence"] + main_setting_data[f"type_of_motion_context_index_{shot.uuid}"] = st.session_state["type_of_motion_context"] + main_setting_data[f"positive_prompt_video_{shot.uuid}"] = st.session_state["overall_positive_prompt"] + main_setting_data[f"negative_prompt_video_{shot.uuid}"] = st.session_state["overall_negative_prompt"] + main_setting_data[f"amount_of_motion_{shot.uuid}"] = st.session_state["amount_of_motion"] + + checkpoints_dir = "ComfyUI/models/checkpoints" + all_files = os.listdir(checkpoints_dir) + model_files = [file for file in all_files if file.endswith('.safetensors') or file.endswith('.ckpt')] + model_files = [file for file in model_files if "xl" not in file] + if 'sd_model_video' in st.session_state and len(model_files): + idx = model_files.index(st.session_state["sd_model_video"]) if st.session_state["sd_model_video"] in model_files else 0 + main_setting_data[f'ckpt_{shot.uuid}'] = model_files[idx] + else: + main_setting_data[f'ckpt_{shot.uuid}'] = default_model + + meta_data.update( + { + ShotMetaData.MOTION_DATA.value : json.dumps( + { + "timing_data": timing_data, + "main_setting_data": main_setting_data + } + ) + } + ) + + data_repo.update_shot(**{"uuid": shot_uuid, "meta_data": json.dumps(meta_data)}) + return meta_data diff --git a/ui_components/setup.py b/ui_components/setup.py index 34106f26..0dda0376 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -12,7 +12,6 @@ from ui_components.components.new_project_page import new_project_page from ui_components.components.project_settings_page import project_settings_page -from ui_components.components.video_rendering_page import video_rendering_page from streamlit_option_menu import option_menu from utils.common_utils import set_default_values @@ -220,12 +219,8 @@ def change_page(key): animate_shot_page(st.session_state["shot_uuid"], h2) elif st.session_state["main_view_type"] == "Project Settings": - project_settings_page(st.session_state["project_uuid"]) - elif st.session_state["main_view_type"] == "Video Rendering": - video_rendering_page(st.session_state["project_uuid"]) - elif st.session_state["section"] == "App Settings": app_settings_page() diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py deleted file mode 100644 index 448c87d9..00000000 --- a/ui_components/widgets/animation_style_element.py +++ /dev/null @@ -1,1139 +0,0 @@ -import json -import random -import string -import tarfile -import time -import uuid -import zipfile -from ui_components.methods.ml_methods import train_motion_lora -import streamlit as st -from typing import List -from shared.constants import AnimationStyleType, AnimationToolType, InferenceParamType, InternalFileType -from ui_components.constants import DEFAULT_SHOT_MOTION_VALUES, DefaultProjectSettingParams, ShotMetaData -from ui_components.methods.animation_style_methods import load_shot_settings -from ui_components.methods.file_methods import get_files_in_a_directory, get_media_dimensions, save_or_host_file -from ui_components.methods.video_methods import create_single_interpolated_clip -from ui_components.widgets.display_element import display_motion_lora -from utils.data_repo.data_repo import DataRepo -from utils.local_storage.local_storage import read_from_motion_lora_local_db -from utils.ml_processor.motion_module import AnimateDiffCheckpoint -from ui_components.models import InternalFrameTimingObject, InternalShotObject -from utils import st_memory -import numpy as np -import matplotlib.pyplot as plt -import os -import requests -# import re -import re - -default_model = "Deliberate_v2.safetensors" - -def animation_style_element(shot_uuid): - disable_generate = False - help = "" - backlog_help = "This will add the new video generation in the backlog" - motion_modules = AnimateDiffCheckpoint.get_name_list() - variant_count = 1 - current_animation_style = AnimationStyleType.CREATIVE_INTERPOLATION.value # setting a default value - data_repo = DataRepo() - - shot: InternalShotObject = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) - st.session_state['project_uuid'] = str(shot.project.uuid) - timing_list: List[InternalFrameTimingObject] = shot.timing_list - - settings = { - 'animation_tool': AnimationToolType.ANIMATEDIFF.value, - } - - st.markdown("### 🎥 Generate animations") - st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") - - with st.container(): - advanced1, advanced2, advanced3 = st.columns([1.0,1.5, 1.0]) - - with advanced1: - st.markdown("##### Individual frame settings") - - items_per_row = 3 - strength_of_frames = [] - distances_to_next_frames = [] - speeds_of_transitions = [] - freedoms_between_frames = [] - individual_prompts = [] - individual_negative_prompts = [] - motions_during_frames = [] - shot_meta_data = {} - - if len(timing_list) <= 1: - st.warning("You need at least two frames to generate a video.") - st.stop() - - open_advanced_settings = st_memory.toggle("Open all advanced settings", key="advanced_settings", value=False) - - # setting default values to main shot settings - if f'lora_data_{shot.uuid}' not in st.session_state: - st.session_state[f'lora_data_{shot.uuid}'] = [] - - if f'strength_of_adherence_value_{shot.uuid}' not in st.session_state: - st.session_state[f'strength_of_adherence_value_{shot.uuid}'] = 0.10 - - if f'type_of_motion_context_index_{shot.uuid}' not in st.session_state: - st.session_state[f'type_of_motion_context_index_{shot.uuid}'] = 1 - - if f'positive_prompt_video_{shot.uuid}' not in st.session_state: - st.session_state[f"positive_prompt_video_{shot.uuid}"] = "" - - if f'negative_prompt_video_{shot.uuid}' not in st.session_state: - st.session_state[f"negative_prompt_video_{shot.uuid}"] = "" - - if f'ckpt_{shot.uuid}' not in st.session_state: - st.session_state[f'ckpt_{shot.uuid}'] = "" - - if f"amount_of_motion_{shot.uuid}" not in st.session_state: - st.session_state[f"amount_of_motion_{shot.uuid}"] = 1.3 - - # loading settings of the last shot (if this shot is being loaded for the first time) - if f'strength_of_frame_{shot_uuid}_0' not in st.session_state: - load_shot_settings(shot.uuid) - - # ------------- Timing Frame and their settings ------------------- - for i in range(0, len(timing_list) , items_per_row): - with st.container(): - grid = st.columns([2 if j%2==0 else 1 for j in range(2*items_per_row)]) # Adjust the column widths - for j in range(items_per_row): - - idx = i + j - if idx < len(timing_list): - with grid[2*j]: # Adjust the index for image column - timing = timing_list[idx] - if timing.primary_image and timing.primary_image.location: - - st.info(f"**Frame {idx + 1}**") - - st.image(timing.primary_image.location, use_column_width=True) - - # settings control - with st.expander("Advanced settings:", expanded=open_advanced_settings): - # checking for newly added frames - if f'individual_prompt_{shot.uuid}_{idx}' not in st.session_state: - for k, v in DEFAULT_SHOT_MOTION_VALUES.items(): - st.session_state[f"{k}_{shot_uuid}_{idx}"] = v - - individual_prompt = st.text_input("What to include:", key=f"individual_prompt_widget_{idx}_{timing.uuid}", value=st.session_state[f'individual_prompt_{shot.uuid}_{idx}'], help="Use this sparingly, as it can have a large impact on the video and cause weird distortions.") - individual_prompts.append(individual_prompt) - individual_negative_prompt = st.text_input("What to avoid:", key=f"negative_prompt_widget_{idx}_{timing.uuid}", value=st.session_state[f'individual_negative_prompt_{shot.uuid}_{idx}'],help="Use this sparingly, as it can have a large impact on the video and cause weird distortions.") - individual_negative_prompts.append(individual_negative_prompt) - strength1, strength2 = st.columns([1, 1]) - with strength1: - strength_of_frame = st.slider("Strength of current frame:", min_value=0.25, max_value=1.0, step=0.01, key=f"strength_of_frame_widget_{shot.uuid}_{idx}", value=st.session_state[f'strength_of_frame_{shot.uuid}_{idx}']) - strength_of_frames.append(strength_of_frame) - with strength2: - motion_during_frame = st.slider("Motion during frame:", min_value=0.5, max_value=1.5, step=0.01, key=f"motion_during_frame_widget_{idx}_{timing.uuid}", value=st.session_state[f'motion_during_frame_{shot.uuid}_{idx}']) - motions_during_frames.append(motion_during_frame) - else: - st.warning("No primary image present.") - - # distance, speed and freedom settings (also aggregates them into arrays) - with grid[2*j+1]: # Add the new column after the image column - if idx < len(timing_list) - 1: - - # if st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}'] is a int, make it a float - if isinstance(st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}'], int): - st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}'] = float(st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}']) - distance_to_next_frame = st.slider("Seconds to next frame:", min_value=0.25, max_value=6.00, step=0.25, key=f"distance_to_next_frame_widget_{idx}_{timing.uuid}", value=st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}']) - distances_to_next_frames.append(distance_to_next_frame/2) - speed_of_transition = st.slider("Speed of transition:", min_value=0.45, max_value=0.7, step=0.01, key=f"speed_of_transition_widget_{idx}_{timing.uuid}", value=st.session_state[f'speed_of_transition_{shot.uuid}_{idx}']) - speeds_of_transitions.append(speed_of_transition) - freedom_between_frames = st.slider("Freedom between frames:", min_value=0.15, max_value=0.85, step=0.01, key=f"freedom_between_frames_widget_{idx}_{timing.uuid}", value=st.session_state[f'freedom_between_frames_{shot.uuid}_{idx}']) - freedoms_between_frames.append(freedom_between_frames) - - if (i < len(timing_list) - 1) or (len(timing_list) % items_per_row != 0): - st.markdown("***") - - - st.markdown("##### Style model") - tab1, tab2 = st.tabs(["Choose Model","Download Models"]) - - checkpoints_dir = "ComfyUI/models/checkpoints" - all_files = os.listdir(checkpoints_dir) - if len(all_files) == 0: - model_files = [default_model] - - else: - # Filter files to only include those with .safetensors and .ckpt extensions - model_files = [file for file in all_files if file.endswith('.safetensors') or file.endswith('.ckpt')] - # drop all files that contain xl - model_files = [file for file in model_files if "xl" not in file] - - # Mapping of model names to their download URLs - sd_model_dict = { - "Anything V3 FP16 Pruned": { - "url": "https://weights.replicate.delivery/default/comfy-ui/checkpoints/anything-v3-fp16-pruned.safetensors.tar", - "filename": "anything-v3-fp16-pruned.safetensors.tar" - }, - "Deliberate V2": { - "url": "https://weights.replicate.delivery/default/comfy-ui/checkpoints/Deliberate_v2.safetensors.tar", - "filename": "Deliberate_v2.safetensors.tar" - }, - "Dreamshaper 8": { - "url": "https://weights.replicate.delivery/default/comfy-ui/checkpoints/dreamshaper_8.safetensors.tar", - "filename": "dreamshaper_8.safetensors.tar" - }, - "epicrealism_pureEvolutionV5": { - "url": "https://civitai.com/api/download/models/134065", - "filename": "epicrealism_pureEvolutionv5.safetensors" - }, - "majicmixRealistic_v6": { - "url": "https://civitai.com/api/download/models/94640", - "filename": "majicmixRealistic_v6.safetensors" - }, - } - - cur_model = st.session_state[f'ckpt_{shot.uuid}'] - current_model_index = model_files.index(cur_model) if (cur_model and cur_model in model_files) else 0 - # st.session_state['sd_model_video'] = current_model_index - # ---------------- SELECT CKPT -------------- - with tab1: - model1, model2 = st.columns([1, 1]) - with model1: - sd_model = "" - def update_model(): - global sd_model - sd_model = checkpoints_dir + "/" + st.session_state['sd_model_video'] - - if model_files and len(model_files): - sd_model = st.selectbox( - label="Which model would you like to use?", - options=model_files, - key="sd_model_video", - index=current_model_index, - on_change=update_model - ) - else: - st.write("") - st.info("Default model Deliberate V2 would be selected") - with model2: - if len(all_files) == 0: - st.write("") - st.info("This is the default model - to download more, go to the Download Models tab.") - else: - st.write("") - st.info("To download more models, go to the Download Models tab.") - - # if it's in sd_model-list, just pass the name. If not, stick checkpoints_dir in front of it - # sd_model = checkpoints_dir + "/" + sd_model - - # ---------------- ADD CKPT --------------- - with tab2: - where_to_get_model = st.radio("Where would you like to get the model from?", options=["Our list", "Upload a model", "From a URL"], key="where_to_get_model") - - if where_to_get_model == "Our list": - # Use the keys (model names) for the selection box - model_name_selected = st.selectbox("Which model would you like to download?", options=list(sd_model_dict.keys()), key="model_to_download") - - if st.button("Download Model", key="download_model"): - with st.spinner("Downloading model..."): - download_bar = st.progress(0, text="") - save_directory = "ComfyUI/models/checkpoints" - os.makedirs(save_directory, exist_ok=True) # Create the directory if it doesn't exist - - # Retrieve the URL using the selected model name - model_url = sd_model_dict[model_name_selected]["url"] - - # Download the model and save it to the directory - response = requests.get(model_url, stream=True) - zip_filename = sd_model_dict[model_name_selected]["filename"] - filepath = os.path.join(save_directory, zip_filename) - print("filepath: ", filepath) - if response.status_code == 200: - total_size = int(response.headers.get('content-length', 0)) - - with open(filepath, 'wb') as f: - received_bytes = 0 - - for data in response.iter_content(chunk_size=8192): - f.write(data) - received_bytes += len(data) - progress = received_bytes / total_size - download_bar.progress(progress) - - st.success(f"Downloaded {model_name_selected} to {save_directory}") - download_bar.empty() - - if model_url.endswith(".zip") or model_url.endswith(".tar"): - st.success("Extracting the zip file. Please wait...") - new_filepath = filepath.replace(zip_filename, "") - if model_url.endswith(".zip"): - with zipfile.ZipFile(f"{filepath}", "r") as zip_ref: - zip_ref.extractall(new_filepath) - else: - with tarfile.open(f"{filepath}", "r") as tar_ref: - tar_ref.extractall(new_filepath) - - os.remove(filepath) - st.rerun() - - elif where_to_get_model == "Upload a model": - st.info("It's simpler to just drop this into the ComfyUI/models/checkpoints directory.") - - elif where_to_get_model == "From a URL": - text1, text2 = st.columns([1, 1]) - with text1: - - text_input = st.text_input("Enter the URL of the model", key="text_input") - with text2: - st.info("Make sure to get the download url of the model. \n\n For example, from Civit, this should look like this: https://civitai.com/api/download/models/179446. \n\n While from Hugging Face, it should look like this: https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/1000_jeep_driving_r32_temporal_unet.safetensors") - if st.button("Download Model", key="download_model"): - with st.spinner("Downloading model..."): - save_directory = "ComfyUI/models/checkpoints" - os.makedirs(save_directory, exist_ok=True) - response = requests.get(text_input) - if response.status_code == 200: - with open(os.path.join(save_directory, text_input.split("/")[-1]), 'wb') as f: - f.write(response.content) - st.success(f"Downloaded model to {save_directory}") - else: - st.error("Failed to download model") - - # if it's in local DEVELOPMENT ENVIRONMENT - st.markdown("***") - st.markdown("##### Motion guidance") - tab1, tab2, tab3 = st.tabs(["Apply LoRAs","Download LoRAs","Train LoRAs"]) - - lora_data = [] - lora_file_dest = "ComfyUI/models/animatediff_motion_lora" - lora_file_links = { - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/1000_jeep_driving_r32_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/250_tony_stark_r64_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/250_train_r128_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/300_car_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_car_desert_48_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_car_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_jeep_driving_r32_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_man_running_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_rotation_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/750_jeep_driving_r32_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/300_zooming_in_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_cat_walking_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_playing_banjo_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_woman_dancing_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_zooming_out_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif" - } - - # ---------------- ADD LORA ----------------- - with tab1: - files = get_files_in_a_directory(lora_file_dest, ['safetensors', 'ckpt']) - - # Iterate through each current LoRA in session state - if len(files) == 0: - st.error("No LoRAs found in the directory - go to Explore to download some, or drop them into ComfyUI/models/animatediff_motion_lora") - if st.button("Check again", key="check_again"): - st.rerun() - else: - # cleaning empty lora vals - for idx, lora in enumerate(st.session_state[f"lora_data_{shot.uuid}"]): - if not lora: - st.session_state[f"lora_data_{shot.uuid}"].pop(idx) - - for idx, lora in enumerate(st.session_state[f"lora_data_{shot.uuid}"]): - if not lora: - continue - h1, h2, h3, h4 = st.columns([1, 1, 1, 0.5]) - with h1: - file_idx = files.index(lora["filename"]) - motion_lora = st.selectbox("Which LoRA would you like to use?", options=files, key=f"motion_lora_{idx}", index=file_idx) - - with h2: - strength_of_lora = st.slider("How strong would you like the LoRA to be?", min_value=0.0, max_value=1.0, value=lora["lora_strength"], step=0.01, key=f"strength_of_lora_{idx}") - lora_data.append({"filename": motion_lora, "lora_strength": strength_of_lora, "filepath": lora_file_dest + "/" + motion_lora}) - - with h3: - when_to_apply_lora = st.slider("When to apply the LoRA?", min_value=0, max_value=100, value=(0,100), step=1, key=f"when_to_apply_lora_{idx}",disabled=True,help="This feature is not yet available.") - - with h4: - st.write("") - if st.button("Remove", key=f"remove_lora_{idx}"): - st.session_state[f"lora_data_{shot.uuid}"].pop(idx) - st.rerun() - - # displaying preview - display_motion_lora(motion_lora, lora_file_links) - - if len(st.session_state[f"lora_data_{shot.uuid}"]) == 0: - text = "Add a LoRA" - else: - text = "Add another LoRA" - if st.button(text, key="add_motion_guidance"): - if files and len(files): - st.session_state[f"lora_data_{shot.uuid}"].append({ - "filename": files[0], - "lora_strength": 0.5, - "filepath": lora_file_dest + "/" + files[0] - }) - st.rerun() - # ---------------- DOWNLOAD LORA --------------- - with tab2: - text1, text2 = st.columns([1, 1]) - with text1: - where_to_download_from = st.radio("Where would you like to get the LoRA from?", options=["Our list", "From a URL","Upload a LoRA"], key="where_to_download_from", horizontal=True) - - if where_to_download_from == "Our list": - with text1: - selected_lora_optn = st.selectbox("Which LoRA would you like to download?", options=[a.split("/")[-1] for a in lora_file_links], key="selected_lora") - # Display selected Lora - display_motion_lora(selected_lora_optn, lora_file_links) - - if st.button("Download LoRA", key="download_lora"): - with st.spinner("Downloading LoRA..."): - save_directory = "ComfyUI/models/animatediff_motion_lora" - os.makedirs(save_directory, exist_ok=True) # Create the directory if it doesn't exist - - # Extract the filename from the URL - selected_lora, lora_idx = next(((ele, idx) for idx, ele in enumerate(lora_file_links.keys()) if selected_lora_optn in ele), None) - filename = selected_lora.split("/")[-1] - save_path = os.path.join(save_directory, filename) - - # Download the file - download_lora_bar = st.progress(0, text="") - response = requests.get(selected_lora, stream=True) - if response.status_code == 200: - total_size = int(response.headers.get('content-length', 0)) - with open(save_path, 'wb') as f: - received_bytes = 0 - - for data in response.iter_content(chunk_size=8192): - f.write(data) - received_bytes += len(data) - progress = received_bytes / total_size - download_lora_bar.progress(progress) - - st.success(f"Downloaded LoRA to {save_path}") - download_lora_bar.empty() - st.rerun() - else: - st.error("Failed to download LoRA") - - elif where_to_download_from == "From a URL": - with text1: - text_input = st.text_input("Enter the URL of the LoRA", key="text_input_lora") - with text2: - st.write("") - st.write("") - st.write("") - st.info("Make sure to get the download url of the LoRA. \n\n For example, from Hugging Face, it should look like this: https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/1000_jeep_driving_r32_temporal_unet.safetensors") - with text1: - if st.button("Download LoRA", key="download_lora"): - with st.spinner("Downloading LoRA..."): - save_directory = "ComfyUI/models/animatediff_motion_lora" - os.makedirs(save_directory, exist_ok=True) - response = requests.get(text_input) - if response.status_code == 200: - with open(os.path.join(save_directory, text_input.split("/")[-1]), 'wb') as f: - f.write(response.content) - st.success(f"Downloaded LoRA to {save_directory}") - else: - st.error("Failed to download LoRA") - elif where_to_download_from == "Upload a LoRA": - st.info("It's simpler to just drop this into the ComfyUI/models/animatediff_motion_lora directory.") - # ---------------- TRAIN LORA -------------- - with tab3: - b1, b2 = st.columns([1, 1]) - with b1: - lora_name = st.text_input("Name this LoRA", key="lora_name") - if model_files and len(model_files): - base_sd_model = st.selectbox( - label="Select base sd model for training", - options=model_files, - key="base_sd_model_video", - index=current_model_index, - on_change=update_model - ) - else: - base_sd_model = "" - st.info("Default model Deliberate V2 would be selected") - - lora_prompt = st.text_area("Describe the motion", key="lora_prompt") - training_video = st.file_uploader("Upload a video to train a new LoRA", type=["mp4"]) - - if st.button("Train LoRA", key="train_lora", use_container_width=True): - filename = str(uuid.uuid4()) + ".mp4" - hosted_url = save_or_host_file(training_video, "videos/temp/" + filename, "video/mp4") - - file_data = { - "name": filename, - "type": InternalFileType.VIDEO.value, - "project_id": shot.project.uuid, - } - - if hosted_url: - file_data.update({'hosted_url': hosted_url}) - else: - file_data.update({'local_path': "videos/temp/" + filename}) - - video_file = data_repo.create_file(**file_data) - video_width, video_height = get_media_dimensions(video_file.location) - unique_file_tag = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6)) - train_motion_lora( - video_file, - lora_prompt, - lora_name + "_" + unique_file_tag, - video_width, - video_height, - base_sd_model - ) - - st.markdown("***") - st.markdown("##### Overall style settings") - - e1, e2, e3 = st.columns([1, 1,1]) - with e1: - strength_of_adherence = st.slider("How much would you like to force adherence to the input images?", min_value=0.0, max_value=1.0, step=0.01, key="strength_of_adherence", value=st.session_state[f"strength_of_adherence_value_{shot.uuid}"]) - with e2: - st.info("Higher values may cause flickering and sudden changes in the video. Lower values may cause the video to be less influenced by the input images but can lead to smoother motion and better colours.") - - f1, f2, f3 = st.columns([1, 1, 1]) - with f1: - overall_positive_prompt = "" - def update_prompt(): - global overall_positive_prompt - overall_positive_prompt = st.session_state[f"positive_prompt_video_{shot.uuid}"] - - overall_positive_prompt = st.text_area( - "What would you like to see in the videos?", - key="overall_positive_prompt", - value=st.session_state[f"positive_prompt_video_{shot.uuid}"], - on_change=update_prompt - ) - with f2: - overall_negative_prompt = st.text_area( - "What would you like to avoid in the videos?", - key="overall_negative_prompt", - value=st.session_state[f"negative_prompt_video_{shot.uuid}"] - ) - - with f3: - st.write("") - st.write("") - st.info("Use these sparingly, as they can have a large impact on the video. You can also edit them for individual frames in the advanced settings above.") - - st.markdown("***") - st.markdown("##### Overall motion settings") - h1, h2, h3 = st.columns([0.5, 1.5, 1]) - with h1: - # will fix this later - if f"type_of_motion_context_index_{shot.uuid}" in st.session_state and isinstance(st.session_state[f"type_of_motion_context_index_{shot.uuid}"], str): - st.session_state[f"type_of_motion_context_index_{shot.uuid}"] = ["Low", "Standard", "High"].index(st.session_state[f"type_of_motion_context_index_{shot.uuid}"]) - type_of_motion_context = st.radio("Type of motion context:", options=["Low", "Standard", "High"], key="type_of_motion_context", horizontal=False, index=st.session_state[f"type_of_motion_context_index_{shot.uuid}"]) - - with h2: - st.info("This is how much the motion will be informed by the previous and next frames. 'High' can make it smoother but increase artifacts - while 'Low' make the motion less smooth but removes artifacts. Naturally, we recommend Standard.") - st.write("") - i1, i3,_ = st.columns([1,2,1]) - with i1: - amount_of_motion = st.slider("Amount of motion:", min_value=0.5, max_value=1.5, step=0.01, key="amount_of_motion", value=st.session_state[f"amount_of_motion_{shot.uuid}"]) - st.write("") - if st.button("Bulk update amount of motion", key="update_motion", help="This will update this value in all the frames"): - for idx, timing in enumerate(timing_list): - st.session_state[f'motion_during_frame_{shot.uuid}_{idx}'] = amount_of_motion - st.success("Updated amount of motion") - time.sleep(0.3) - st.rerun() - with i3: - st.write("") - st.write("") - st.info("This actually updates the motion during frames in the advanced settings above - but we put it here because it has a big impact on the video. You can scroll up to see the changes and tweak for individual frames.") - - type_of_frame_distribution = "dynamic" - type_of_key_frame_influence = "dynamic" - type_of_strength_distribution = "dynamic" - linear_frame_distribution_value = 16 - linear_key_frame_influence_value = 1.0 - linear_cn_strength_value = 1.0 - relative_ipadapter_strength = 1.0 - relative_cn_strength = 0.0 - project_settings = data_repo.get_project_setting(shot.project.uuid) - width = project_settings.width - height = project_settings.height - img_dimension = f"{width}x{height}" - motion_scale = 1.3 - interpolation_style = 'ease-in-out' - buffer = 4 - - - (dynamic_strength_values, dynamic_key_frame_influence_values, dynamic_frame_distribution_values, - context_length, context_stride, context_overlap, multipled_base_end_percent, multipled_base_adapter_strength, - prompt_travel, negative_prompt_travel, motion_scales) = transform_data(strength_of_frames, - freedoms_between_frames, speeds_of_transitions, distances_to_next_frames, type_of_motion_context, - strength_of_adherence,individual_prompts, individual_negative_prompts, buffer, motions_during_frames) - - - settings.update( - ckpt=sd_model, - width=width, - height=height, - buffer=4, - motion_scale=motion_scale, - motion_scales=motion_scales, - image_dimension=img_dimension, - output_format="video/h264-mp4", - prompt=overall_positive_prompt, - negative_prompt=overall_negative_prompt, - interpolation_type=interpolation_style, - stmfnet_multiplier=2, - relative_ipadapter_strength=relative_ipadapter_strength, - relative_cn_strength=relative_cn_strength, - type_of_strength_distribution=type_of_strength_distribution, - linear_strength_value=str(linear_cn_strength_value), - dynamic_strength_values=str(dynamic_strength_values), - linear_frame_distribution_value=linear_frame_distribution_value, - dynamic_frame_distribution_values=dynamic_frame_distribution_values, - type_of_frame_distribution=type_of_frame_distribution, - type_of_key_frame_influence=type_of_key_frame_influence, - linear_key_frame_influence_value=float(linear_key_frame_influence_value), - dynamic_key_frame_influence_values=dynamic_key_frame_influence_values, - normalise_speed=True, - ipadapter_noise=0.3, - animation_style=AnimationStyleType.CREATIVE_INTERPOLATION.value, - context_length=context_length, - context_stride=context_stride, - context_overlap=context_overlap, - multipled_base_end_percent=multipled_base_end_percent, - multipled_base_adapter_strength=multipled_base_adapter_strength, - individual_prompts=prompt_travel, - individual_negative_prompts=negative_prompt_travel, - animation_stype=AnimationStyleType.CREATIVE_INTERPOLATION.value, - max_frames=str(dynamic_frame_distribution_values[-1]), - lora_data=lora_data, - shot_data=shot_meta_data - ) - - position = "generate_vid" - st.markdown("***") - st.markdown("##### Generation Settings") - - animate_col_1, animate_col_2, _ = st.columns([3, 1, 1]) - with animate_col_1: - variant_count = st.number_input("How many variants?", min_value=1, max_value=5, value=1, step=1, key="variant_count") - - if "generate_vid_generate_inference" in st.session_state and st.session_state["generate_vid_generate_inference"]: - # last keyframe position * 16 - duration = float(dynamic_frame_distribution_values[-1] / 16) - data_repo.update_shot(uuid=shot.uuid, duration=duration) - shot_data = update_session_state_with_animation_details( - shot.uuid, - timing_list, - strength_of_frames, - distances_to_next_frames, - speeds_of_transitions, - freedoms_between_frames, - motions_during_frames, - individual_prompts, - individual_negative_prompts, - lora_data - ) - settings.update(shot_data=shot_data) - vid_quality = "full" # TODO: add this if video_resolution == "Full Resolution" else "preview" - st.success("Generating clip - see status in the Generation Log in the sidebar. Press 'Refresh log' to update.") - - positive_prompt = "" - append_to_prompt = "" # TODO: add this - for idx, timing in enumerate(timing_list): - if timing.primary_image and timing.primary_image.location: - b = timing.primary_image.inference_params - prompt = b.get("prompt", "") if b else "" - prompt += append_to_prompt - frame_prompt = f"{idx * linear_frame_distribution_value}_" + prompt - positive_prompt += ":" + frame_prompt if positive_prompt else frame_prompt - else: - st.error("Please generate primary images") - time.sleep(0.7) - st.rerun() - - if f'{shot_uuid}_backlog_enabled' not in st.session_state: - st.session_state[f'{shot_uuid}_backlog_enabled'] = False - - create_single_interpolated_clip( - shot_uuid, - vid_quality, - settings, - variant_count, - st.session_state[f'{shot_uuid}_backlog_enabled'] - ) - - backlog_update = {f'{shot_uuid}_backlog_enabled': False} - toggle_generate_inference(position, **backlog_update) - st.rerun() - - btn1, btn2, btn3 = st.columns([1, 1, 1]) - backlog_no_update = {f'{shot_uuid}_backlog_enabled': False} - with btn1: - st.button("Add to queue", key="generate_animation_clip", disabled=disable_generate, help=help, on_click=lambda: toggle_generate_inference(position, **backlog_no_update),type="primary",use_container_width=True) - - backlog_update = {f'{shot_uuid}_backlog_enabled': True} - with btn2: - st.button("Add to backlog", key="generate_animation_clip_backlog", disabled=disable_generate, help=backlog_help, on_click=lambda: toggle_generate_inference(position, **backlog_update),type="secondary") - - - with st.sidebar: - with st.expander("⚙️ Animation settings", expanded=True): - if st_memory.toggle("Open", key="open_motion_data"): - - st.markdown("### Visualisation of current motion") - keyframe_positions = get_keyframe_positions(type_of_frame_distribution, dynamic_frame_distribution_values, timing_list, linear_frame_distribution_value) - keyframe_positions = [int(kf * 16) for kf in keyframe_positions] - last_key_frame_position = (keyframe_positions[-1]) - strength_values = extract_strength_values(type_of_strength_distribution, dynamic_strength_values, keyframe_positions, linear_cn_strength_value) - key_frame_influence_values = extract_influence_values(type_of_key_frame_influence, dynamic_key_frame_influence_values, keyframe_positions, linear_key_frame_influence_value) - weights_list, frame_numbers_list = calculate_weights(keyframe_positions, strength_values, 4, key_frame_influence_values,last_key_frame_position) - plot_weights(weights_list, frame_numbers_list) - - st.markdown("***") - - bulk1, bulk2 = st.columns([1, 1]) - with bulk1: - st.markdown("### Bulk edit frame settings") - with bulk2: - if st.button("Reset to Default", use_container_width=True, key="reset_to_default"): - for idx, timing in enumerate(timing_list): - for k, v in DEFAULT_SHOT_MOTION_VALUES.items(): - st.session_state[f'{k}_{shot.uuid}_{idx}'] = v - - st.success("All frames have been reset to default values.") - st.rerun() - - editable_entity = st.selectbox("What would you like to edit?", options=["Seconds to next frames", "Speed of transitions", "Freedom between frames","Strength of frames","Motion during frames"], key="editable_entity") - if editable_entity == "Seconds to next frames": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.25, max_value=6.00, step=0.25, value=1.0, key="entity_new_val") - if editable_entity == "Strength of frames": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.25, max_value=1.0, step=0.01, value=0.5, key="entity_new_val") - elif editable_entity == "Speed of transitions": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.45, max_value=0.7, step=0.01, value=0.6, key="entity_new_val") - elif editable_entity == "Freedom between frames": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.15, max_value=0.85, step=0.01, value=0.5, key="entity_new_val") - elif editable_entity == "Motion during frames": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.5, max_value=1.5, step=0.01, value=1.3, key="entity_new_val") - - bulk1, bulk2 = st.columns([1, 1]) - with bulk1: - if st.button("Bulk edit", key="bulk_edit", use_container_width=True): - if editable_entity == "Strength of frames": - for idx, timing in enumerate(timing_list): - st.session_state[f'strength_of_frame_{shot.uuid}_{idx}'] = entity_new_val - elif editable_entity == "Seconds to next frames": - for idx, timing in enumerate(timing_list): - st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}'] = entity_new_val - elif editable_entity == "Speed of transitions": - for idx, timing in enumerate(timing_list): - st.session_state[f'speed_of_transition_{shot.uuid}_{idx}'] = entity_new_val - elif editable_entity == "Freedom between frames": - for idx, timing in enumerate(timing_list): - st.session_state[f'freedom_between_frames_{shot.uuid}_{idx}'] = entity_new_val - elif editable_entity == "Motion during frames": - for idx, timing in enumerate(timing_list): - st.session_state[f'motion_during_frame_{shot.uuid}_{idx}'] = entity_new_val - st.rerun() - - st.markdown("***") - st.markdown("### Save current settings") - if st.button("Save current settings", key="save_current_settings",use_container_width=True,help="Settings will also be saved when you generate the animation."): - update_session_state_with_animation_details(shot.uuid, timing_list, strength_of_frames, distances_to_next_frames, speeds_of_transitions, freedoms_between_frames, motions_during_frames, individual_prompts, individual_negative_prompts) - st.success("Settings saved successfully.") - time.sleep(0.7) - st.rerun() - -# --------------------- METHODS ----------------------- -def toggle_generate_inference(position, **kwargs): - for k,v in kwargs.items(): - st.session_state[k] = v - if position + '_generate_inference' not in st.session_state: - st.session_state[position + '_generate_inference'] = True - else: - st.session_state[position + '_generate_inference'] = not st.session_state[position + '_generate_inference'] - -def update_session_state_with_animation_details(shot_uuid, timing_list, strength_of_frames, distances_to_next_frames, speeds_of_transitions, freedoms_between_frames, motions_during_frames, individual_prompts, individual_negative_prompts, lora_data): - data_repo = DataRepo() - shot = data_repo.get_shot_from_uuid(shot_uuid) - meta_data = shot.meta_data_dict - timing_data = [] - for idx, timing in enumerate(timing_list): - if idx < len(timing_list): - st.session_state[f'strength_of_frame_{shot_uuid}_{idx}'] = strength_of_frames[idx] - st.session_state[f'individual_prompt_{shot_uuid}_{idx}'] = individual_prompts[idx] - st.session_state[f'individual_negative_prompt_{shot_uuid}_{idx}'] = individual_negative_prompts[idx] - st.session_state[f'motion_during_frame_{shot_uuid}_{idx}'] = motions_during_frames[idx] - if idx < len(timing_list) - 1: - st.session_state[f'distance_to_next_frame_{shot_uuid}_{idx}'] = distances_to_next_frames[idx] * 2 - st.session_state[f'speed_of_transition_{shot_uuid}_{idx}'] = speeds_of_transitions[idx] - st.session_state[f'freedom_between_frames_{shot_uuid}_{idx}'] = freedoms_between_frames[idx] - - # adding into the meta-data - state_data = { - "strength_of_frame" : strength_of_frames[idx], - "individual_prompt" : individual_prompts[idx], - "individual_negative_prompt" : individual_negative_prompts[idx], - "motion_during_frame" : motions_during_frames[idx], - "distance_to_next_frame" : distances_to_next_frames[idx] * 2 if idx < len(timing_list) - 1 else DEFAULT_SHOT_MOTION_VALUES["distance_to_next_frame"], - "speed_of_transition" : speeds_of_transitions[idx] if idx < len(timing_list) - 1 else DEFAULT_SHOT_MOTION_VALUES["speed_of_transition"], - "freedom_between_frames" : freedoms_between_frames[idx] if idx < len(timing_list) - 1 else DEFAULT_SHOT_MOTION_VALUES["freedom_between_frames"], - } - - timing_data.append(state_data) - - main_setting_data = {} - main_setting_data[f'lora_data_{shot.uuid}'] = lora_data - main_setting_data[f"strength_of_adherence_value_{shot.uuid}"] = st.session_state["strength_of_adherence"] - main_setting_data[f"type_of_motion_context_index_{shot.uuid}"] = st.session_state["type_of_motion_context"] - main_setting_data[f"positive_prompt_video_{shot.uuid}"] = st.session_state["overall_positive_prompt"] - main_setting_data[f"negative_prompt_video_{shot.uuid}"] = st.session_state["overall_negative_prompt"] - main_setting_data[f"amount_of_motion_{shot.uuid}"] = st.session_state["amount_of_motion"] - - checkpoints_dir = "ComfyUI/models/checkpoints" - all_files = os.listdir(checkpoints_dir) - model_files = [file for file in all_files if file.endswith('.safetensors') or file.endswith('.ckpt')] - model_files = [file for file in model_files if "xl" not in file] - - if 'sd_model_video' in st.session_state and len(model_files): - idx = model_files.index(st.session_state["sd_model_video"]) if st.session_state["sd_model_video"] in model_files else 0 - main_setting_data[f'ckpt_{shot.uuid}'] = model_files[idx] - else: - main_setting_data[f'ckpt_{shot.uuid}'] = default_model - - meta_data.update( - { - ShotMetaData.MOTION_DATA.value : json.dumps( - { - "timing_data": timing_data, - "main_setting_data": main_setting_data - } - ) - } - ) - - data_repo.update_shot(**{"uuid": shot_uuid, "meta_data": json.dumps(meta_data)}) - return meta_data - -def format_frame_prompts_with_buffer(frame_numbers, individual_prompts, buffer): - adjusted_frame_numbers = [frame + buffer for frame in frame_numbers] - - # Preprocess prompts to remove any '/' or '"' from the values - processed_prompts = [prompt.replace("/", "").replace('"', '') for prompt in individual_prompts] - - # Format the adjusted frame numbers and processed prompts - formatted = ', '.join(f'"{int(frame)}": "{prompt}"' for frame, prompt in zip(adjusted_frame_numbers, processed_prompts)) - return formatted - -def extract_strength_values(type_of_key_frame_influence, dynamic_key_frame_influence_values, keyframe_positions, linear_key_frame_influence_value): - - if type_of_key_frame_influence == "dynamic": - # Process the dynamic_key_frame_influence_values depending on its format - if isinstance(dynamic_key_frame_influence_values, str): - dynamic_values = eval(dynamic_key_frame_influence_values) - else: - dynamic_values = dynamic_key_frame_influence_values - - # Iterate through the dynamic values and convert tuples with two values to three values - dynamic_values_corrected = [] - for value in dynamic_values: - if len(value) == 2: - value = (value[0], value[1], value[0]) - dynamic_values_corrected.append(value) - - return dynamic_values_corrected - else: - # Process for linear or other types - if len(linear_key_frame_influence_value) == 2: - linear_key_frame_influence_value = (linear_key_frame_influence_value[0], linear_key_frame_influence_value[1], linear_key_frame_influence_value[0]) - return [linear_key_frame_influence_value for _ in range(len(keyframe_positions) - 1)] - -def update_interpolation_settings(values=None, timing_list=None): - default_values = { - 'type_of_frame_distribution': 0, - 'frames_per_keyframe': 16, - 'type_of_key_frame_influence': 0, - 'length_of_key_frame_influence': 1.0, - 'type_of_cn_strength_distribution': 0, - 'linear_cn_strength_value': (0.0,0.7), - 'linear_frame_distribution_value': 16, - 'linear_key_frame_influence_value': 1.0, - 'interpolation_style': 0, - 'motion_scale': 1.0, - 'negative_prompt_video': 'bad image, worst quality', - 'ip_adapter_strength': 1.0, - 'ip_adapter_influence': 1.0, - 'soft_scaled_cn_weights_multiple_video': 0.85 - } - - for idx in range(0, len(timing_list)): - default_values[f'dynamic_frame_distribution_values_{idx}'] = (idx) * 16 - default_values[f'dynamic_key_frame_influence_values_{idx}'] = 1.0 - default_values[f'dynamic_strength_values_{idx}'] = (0.0,0.7) - - for key, default_value in default_values.items(): - st.session_state[key] = values.get(key, default_value) if values and values.get(key) is not None else default_value - # print(f"{key}: {st.session_state[key]}") - -def extract_influence_values(type_of_key_frame_influence, dynamic_key_frame_influence_values, keyframe_positions, linear_key_frame_influence_value): - # Check and convert linear_key_frame_influence_value if it's a float or string float - # if it's a string that starts with a parenthesis, convert it to a tuple - if isinstance(linear_key_frame_influence_value, str) and linear_key_frame_influence_value[0] == "(": - linear_key_frame_influence_value = eval(linear_key_frame_influence_value) - - - if not isinstance(linear_key_frame_influence_value, tuple): - if isinstance(linear_key_frame_influence_value, (float, str)): - try: - value = float(linear_key_frame_influence_value) - linear_key_frame_influence_value = (value, value) - except ValueError: - raise ValueError("linear_key_frame_influence_value must be a float or a string representing a float") - - number_of_outputs = len(keyframe_positions) - - if type_of_key_frame_influence == "dynamic": - # Convert list of individual float values into tuples - if all(isinstance(x, float) for x in dynamic_key_frame_influence_values): - dynamic_values = [(value, value) for value in dynamic_key_frame_influence_values] - elif isinstance(dynamic_key_frame_influence_values[0], str) and dynamic_key_frame_influence_values[0] == "(": - string_representation = ''.join(dynamic_key_frame_influence_values) - dynamic_values = eval(f'[{string_representation}]') - else: - dynamic_values = dynamic_key_frame_influence_values if isinstance(dynamic_key_frame_influence_values, list) else [dynamic_key_frame_influence_values] - return dynamic_values[:number_of_outputs] - else: - return [linear_key_frame_influence_value for _ in range(number_of_outputs)] - -def get_keyframe_positions(type_of_frame_distribution, dynamic_frame_distribution_values, images, linear_frame_distribution_value): - if type_of_frame_distribution == "dynamic": - # Check if the input is a string or a list - if isinstance(dynamic_frame_distribution_values, str): - # Sort the keyframe positions in numerical order - return sorted([int(kf.strip()) for kf in dynamic_frame_distribution_values.split(',')]) - elif isinstance(dynamic_frame_distribution_values, list): - return sorted(dynamic_frame_distribution_values) - else: - # Calculate the number of keyframes based on the total duration and linear_frames_per_keyframe - return [i * linear_frame_distribution_value for i in range(len(images))] - -def calculate_weights(keyframe_positions, strength_values, buffer, key_frame_influence_values,last_key_frame_position): - - def calculate_influence_frame_number(key_frame_position, next_key_frame_position, distance): - # Calculate the absolute distance between key frames - key_frame_distance = abs(next_key_frame_position - key_frame_position) - - # Apply the distance multiplier - extended_distance = key_frame_distance * distance - - # Determine the direction of influence based on the positions of the key frames - if key_frame_position < next_key_frame_position: - # Normal case: influence extends forward - influence_frame_number = key_frame_position + extended_distance - else: - # Reverse case: influence extends backward - influence_frame_number = key_frame_position - extended_distance - - # Return the result rounded to the nearest integer - return round(influence_frame_number) - - def find_curve(batch_index_from, batch_index_to, strength_from, strength_to, interpolation,revert_direction_at_midpoint, last_key_frame_position,i, number_of_items,buffer): - # Initialize variables based on the position of the keyframe - range_start = batch_index_from - range_end = batch_index_to - # if it's the first value, set influence range from 1.0 to 0.0 - if i == number_of_items - 1: - range_end = last_key_frame_position - - steps = range_end - range_start - diff = strength_to - strength_from - - # Calculate index for interpolation - index = np.linspace(0, 1, steps // 2 + 1) if revert_direction_at_midpoint else np.linspace(0, 1, steps) - - # Calculate weights based on interpolation type - if interpolation == "linear": - weights = np.linspace(strength_from, strength_to, len(index)) - elif interpolation == "ease-in": - weights = diff * np.power(index, 2) + strength_from - elif interpolation == "ease-out": - weights = diff * (1 - np.power(1 - index, 2)) + strength_from - elif interpolation == "ease-in-out": - weights = diff * ((1 - np.cos(index * np.pi)) / 2) + strength_from - - if revert_direction_at_midpoint: - weights = np.concatenate([weights, weights[::-1]]) - - # Generate frame numbers - frame_numbers = np.arange(range_start, range_start + len(weights)) - - # "Dropper" component: For keyframes with negative start, drop the weights - if range_start < 0 and i > 0: - drop_count = abs(range_start) - weights = weights[drop_count:] - frame_numbers = frame_numbers[drop_count:] - - # Dropper component: for keyframes a range_End is greater than last_key_frame_position, drop the weights - if range_end > last_key_frame_position and i < number_of_items - 1: - drop_count = range_end - last_key_frame_position - weights = weights[:-drop_count] - frame_numbers = frame_numbers[:-drop_count] - - return weights, frame_numbers - - weights_list = [] - frame_numbers_list = [] - - for i in range(len(keyframe_positions)): - keyframe_position = keyframe_positions[i] - interpolation = "ease-in-out" - # strength_from = strength_to = 1.0 - - if i == 0: # first image - # GET IMAGE AND KEYFRAME INFLUENCE VALUES - key_frame_influence_from, key_frame_influence_to = key_frame_influence_values[i] - start_strength, mid_strength, end_strength = strength_values[i] - keyframe_position = keyframe_positions[i] - next_key_frame_position = keyframe_positions[i+1] - batch_index_from = keyframe_position - batch_index_to_excl = calculate_influence_frame_number(keyframe_position, next_key_frame_position, key_frame_influence_to) - weights, frame_numbers = find_curve(batch_index_from, batch_index_to_excl, mid_strength, end_strength, interpolation, False, last_key_frame_position, i, len(keyframe_positions), buffer) - # interpolation = "ease-in" - - elif i == len(keyframe_positions) - 1: # last image - # GET IMAGE AND KEYFRAME INFLUENCE VALUES - key_frame_influence_from,key_frame_influence_to = key_frame_influence_values[i] - start_strength, mid_strength, end_strength = strength_values[i] - # strength_from, strength_to = cn_strength_values[i-1] - keyframe_position = keyframe_positions[i] - previous_key_frame_position = keyframe_positions[i-1] - batch_index_from = calculate_influence_frame_number(keyframe_position, previous_key_frame_position, key_frame_influence_from) - batch_index_to_excl = keyframe_position - weights, frame_numbers = find_curve(batch_index_from, batch_index_to_excl, start_strength, mid_strength, interpolation, False, last_key_frame_position, i, len(keyframe_positions), buffer) - # interpolation = "ease-out" - - else: # middle images - # GET IMAGE AND KEYFRAME INFLUENCE VALUES - key_frame_influence_from,key_frame_influence_to = key_frame_influence_values[i] - start_strength, mid_strength, end_strength = strength_values[i] - keyframe_position = keyframe_positions[i] - - # CALCULATE WEIGHTS FOR FIRST HALF - previous_key_frame_position = keyframe_positions[i-1] - batch_index_from = calculate_influence_frame_number(keyframe_position, previous_key_frame_position, key_frame_influence_from) - batch_index_to_excl = keyframe_position - first_half_weights, first_half_frame_numbers = find_curve(batch_index_from, batch_index_to_excl, start_strength, mid_strength, interpolation, False, last_key_frame_position, i, len(keyframe_positions), buffer) - - # CALCULATE WEIGHTS FOR SECOND HALF - next_key_frame_position = keyframe_positions[i+1] - batch_index_from = keyframe_position - batch_index_to_excl = calculate_influence_frame_number(keyframe_position, next_key_frame_position, key_frame_influence_to) - second_half_weights, second_half_frame_numbers = find_curve(batch_index_from, batch_index_to_excl, mid_strength, end_strength, interpolation, False, last_key_frame_position, i, len(keyframe_positions), buffer) - - # COMBINE FIRST AND SECOND HALF - weights = np.concatenate([first_half_weights, second_half_weights]) - frame_numbers = np.concatenate([first_half_frame_numbers, second_half_frame_numbers]) - - weights_list.append(weights) - frame_numbers_list.append(frame_numbers) - - return weights_list, frame_numbers_list - -def plot_weights(weights_list, frame_numbers_list): - plt.figure(figsize=(12, 6)) - for i, weights in enumerate(weights_list): - frame_numbers = frame_numbers_list[i] - plt.plot(frame_numbers, weights, label=f'Frame {i + 1}') - - # Plot settings - plt.xlabel('Frame Number') - plt.ylabel('Weight') - plt.legend() - plt.ylim(0, 1.0) - plt.show() - st.set_option('deprecation.showPyplotGlobalUse', False) - st.pyplot() - -def transform_data(strength_of_frames, movements_between_frames, speeds_of_transitions, distances_to_next_frames, type_of_motion_context, strength_of_adherence, individual_prompts, individual_negative_prompts, buffer, motions_during_frames): - # FRAME SETTINGS - def adjust_and_invert_relative_value(middle_value, relative_value): - if relative_value is not None: - adjusted_value = middle_value * relative_value - return round(middle_value - adjusted_value, 2) - return None - - def invert_value(value): - return round(1.0 - value, 2) if value is not None else None - - # Creating output_strength with relative and inverted start and end values - output_strength = [] - for i, strength in enumerate(strength_of_frames): - start_value = None if i == 0 else movements_between_frames[i - 1] - end_value = None if i == len(strength_of_frames) - 1 else movements_between_frames[i] - - # Adjusting and inverting start and end values relative to the middle value - adjusted_start = adjust_and_invert_relative_value(strength, start_value) - adjusted_end = adjust_and_invert_relative_value(strength, end_value) - - output_strength.append((adjusted_start, strength, adjusted_end)) - - # Creating output_speeds with inverted values - output_speeds = [(None, None) for _ in range(len(speeds_of_transitions) + 1)] - for i in range(len(speeds_of_transitions)): - current_tuple = list(output_speeds[i]) - next_tuple = list(output_speeds[i + 1]) - - inverted_speed = invert_value(speeds_of_transitions[i]) - current_tuple[1] = inverted_speed * 2 - next_tuple[0] = inverted_speed * 2 - - output_speeds[i] = tuple(current_tuple) - output_speeds[i + 1] = tuple(next_tuple) - - # Creating cumulative_distances - cumulative_distances = [0] - for distance in distances_to_next_frames: - cumulative_distances.append(cumulative_distances[-1] + distance) - - cumulative_distances = [int(float(value) * 16) for value in cumulative_distances] - - # MOTION CONTEXT SETTINGS - if type_of_motion_context == "Low": - context_length = 16 - context_stride = 1 - context_overlap = 2 - - elif type_of_motion_context == "Standard": - context_length = 16 - context_stride = 2 - context_overlap = 4 - - elif type_of_motion_context == "High": - context_length = 16 - context_stride = 4 - context_overlap = 4 - - # SPARSE CTRL SETTINGS - multipled_base_end_percent = 0.05 * (strength_of_adherence * 10) - multipled_base_adapter_strength = 0.05 * (strength_of_adherence * 20) - - # FRAME PROMPTS FORMATTING - def format_frame_prompts_with_buffer(frame_numbers, individual_prompts, buffer): - adjusted_frame_numbers = [frame + buffer for frame in frame_numbers] - - # Preprocess prompts to remove any '/' or '"' from the values - processed_prompts = [prompt.replace("/", "").replace('"', '') for prompt in individual_prompts] - - # Format the adjusted frame numbers and processed prompts - formatted = ', '.join(f'"{int(frame)}": "{prompt}"' for frame, prompt in zip(adjusted_frame_numbers, processed_prompts)) - return formatted - - # Applying format_frame_prompts_with_buffer - formatted_individual_prompts = format_frame_prompts_with_buffer(cumulative_distances, individual_prompts, buffer) - formatted_individual_negative_prompts = format_frame_prompts_with_buffer(cumulative_distances, individual_negative_prompts, buffer) - - # MOTION STRENGTHS FORMATTING - adjusted_frame_numbers = [0] + [frame + buffer for frame in cumulative_distances[1:]] - - # Format the adjusted frame numbers and strengths - motions_during_frames = ', '.join(f'{int(frame)}:({strength})' for frame, strength in zip(adjusted_frame_numbers, motions_during_frames)) - - return output_strength, output_speeds, cumulative_distances, context_length, context_stride, context_overlap, multipled_base_end_percent, multipled_base_adapter_strength, formatted_individual_prompts, formatted_individual_negative_prompts,motions_during_frames - diff --git a/ui_components/widgets/sm_animation_style_element.py b/ui_components/widgets/sm_animation_style_element.py new file mode 100644 index 00000000..332f82f3 --- /dev/null +++ b/ui_components/widgets/sm_animation_style_element.py @@ -0,0 +1,603 @@ +import time +import uuid +import os +import zipfile +import requests +import random +import string +import tarfile +import streamlit as st +from shared.constants import InternalFileType +from utils import st_memory +from ui_components.constants import DEFAULT_SHOT_MOTION_VALUES +from ui_components.methods.animation_style_methods import calculate_weights, extract_influence_values, \ + extract_strength_values, get_keyframe_positions, load_shot_settings, plot_weights, update_session_state_with_animation_details +from ui_components.methods.file_methods import get_files_in_a_directory, get_media_dimensions, save_or_host_file +from ui_components.widgets.display_element import display_motion_lora +from ui_components.methods.ml_methods import train_motion_lora +from utils.data_repo.data_repo import DataRepo + +def animation_sidebar(shot_uuid, img_list, type_of_frame_distribution, dynamic_frame_distribution_values, linear_frame_distribution_value,\ + type_of_strength_distribution, dynamic_strength_values, linear_cn_strength_value, type_of_key_frame_influence, dynamic_key_frame_influence_values, \ + linear_key_frame_influence_value, strength_of_frames, distances_to_next_frames, speeds_of_transitions, freedoms_between_frames, \ + motions_during_frames, individual_prompts, individual_negative_prompts, default_model): + with st.sidebar: + with st.expander("⚙️ Animation settings", expanded=True): + if st_memory.toggle("Open", key="open_motion_data"): + + st.markdown("### Visualisation of current motion") + keyframe_positions = get_keyframe_positions(type_of_frame_distribution, dynamic_frame_distribution_values, img_list, linear_frame_distribution_value) + keyframe_positions = [int(kf * 16) for kf in keyframe_positions] + last_key_frame_position = (keyframe_positions[-1]) + strength_values = extract_strength_values(type_of_strength_distribution, dynamic_strength_values, keyframe_positions, linear_cn_strength_value) + key_frame_influence_values = extract_influence_values(type_of_key_frame_influence, dynamic_key_frame_influence_values, keyframe_positions, linear_key_frame_influence_value) + weights_list, frame_numbers_list = calculate_weights(keyframe_positions, strength_values, 4, key_frame_influence_values,last_key_frame_position) + plot_weights(weights_list, frame_numbers_list) + + st.markdown("***") + + bulk1, bulk2 = st.columns([1, 1]) + with bulk1: + st.markdown("### Bulk edit frame settings") + with bulk2: + if st.button("Reset to Default", use_container_width=True, key="reset_to_default"): + for idx, _ in enumerate(img_list): + for k, v in DEFAULT_SHOT_MOTION_VALUES.items(): + st.session_state[f'{k}_{shot_uuid}_{idx}'] = v + + st.success("All frames have been reset to default values.") + st.rerun() + + editable_entity = st.selectbox("What would you like to edit?", options=["Seconds to next frames", "Speed of transitions", "Freedom between frames","Strength of frames","Motion during frames"], key="editable_entity") + if editable_entity == "Seconds to next frames": + entity_new_val = st.slider("What would you like to change it to?", min_value=0.25, max_value=6.00, step=0.25, value=1.0, key="entity_new_val") + if editable_entity == "Strength of frames": + entity_new_val = st.slider("What would you like to change it to?", min_value=0.25, max_value=1.0, step=0.01, value=0.5, key="entity_new_val") + elif editable_entity == "Speed of transitions": + entity_new_val = st.slider("What would you like to change it to?", min_value=0.45, max_value=0.7, step=0.01, value=0.6, key="entity_new_val") + elif editable_entity == "Freedom between frames": + entity_new_val = st.slider("What would you like to change it to?", min_value=0.15, max_value=0.85, step=0.01, value=0.5, key="entity_new_val") + elif editable_entity == "Motion during frames": + entity_new_val = st.slider("What would you like to change it to?", min_value=0.5, max_value=1.5, step=0.01, value=1.3, key="entity_new_val") + + bulk1, bulk2 = st.columns([1, 1]) + with bulk1: + if st.button("Bulk edit", key="bulk_edit", use_container_width=True): + if editable_entity == "Strength of frames": + for idx, _ in enumerate(img_list): + st.session_state[f'strength_of_frame_{shot_uuid}_{idx}'] = entity_new_val + elif editable_entity == "Seconds to next frames": + for idx, _ in enumerate(img_list): + st.session_state[f'distance_to_next_frame_{shot_uuid}_{idx}'] = entity_new_val + elif editable_entity == "Speed of transitions": + for idx, _ in enumerate(img_list): + st.session_state[f'speed_of_transition_{shot_uuid}_{idx}'] = entity_new_val + elif editable_entity == "Freedom between frames": + for idx, _ in enumerate(img_list): + st.session_state[f'freedom_between_frames_{shot_uuid}_{idx}'] = entity_new_val + elif editable_entity == "Motion during frames": + for idx, _ in enumerate(img_list): + st.session_state[f'motion_during_frame_{shot_uuid}_{idx}'] = entity_new_val + st.rerun() + + st.markdown("***") + st.markdown("### Save current settings") + if st.button("Save current settings", key="save_current_settings",use_container_width=True,help="Settings will also be saved when you generate the animation."): + update_session_state_with_animation_details( + shot_uuid, + img_list, + strength_of_frames, + distances_to_next_frames, + speeds_of_transitions, + freedoms_between_frames, + motions_during_frames, + individual_prompts, + individual_negative_prompts, + [], + default_model + ) + st.success("Settings saved successfully.") + time.sleep(0.7) + st.rerun() + + +def video_motion_settings(shot_uuid, img_list): + st.markdown("***") + st.markdown("##### Overall style settings") + + e1, e2, e3 = st.columns([1, 1,1]) + with e1: + strength_of_adherence = st.slider("How much would you like to force adherence to the input images?", min_value=0.0, max_value=1.0, step=0.01, key="strength_of_adherence", value=st.session_state[f"strength_of_adherence_value_{shot_uuid}"]) + with e2: + st.info("Higher values may cause flickering and sudden changes in the video. Lower values may cause the video to be less influenced by the input images but can lead to smoother motion and better colours.") + + f1, f2, f3 = st.columns([1, 1, 1]) + with f1: + overall_positive_prompt = "" + def update_prompt(): + global overall_positive_prompt + overall_positive_prompt = st.session_state[f"positive_prompt_video_{shot_uuid}"] + + overall_positive_prompt = st.text_area( + "What would you like to see in the videos?", + key="overall_positive_prompt", + value=st.session_state[f"positive_prompt_video_{shot_uuid}"], + on_change=update_prompt + ) + with f2: + overall_negative_prompt = st.text_area( + "What would you like to avoid in the videos?", + key="overall_negative_prompt", + value=st.session_state[f"negative_prompt_video_{shot_uuid}"] + ) + + with f3: + st.write("") + st.write("") + st.info("Use these sparingly, as they can have a large impact on the video. You can also edit them for individual frames in the advanced settings above.") + + st.markdown("***") + st.markdown("##### Overall motion settings") + h1, h2, h3 = st.columns([0.5, 1.5, 1]) + with h1: + # will fix this later + if f"type_of_motion_context_index_{shot_uuid}" in st.session_state and isinstance(st.session_state[f"type_of_motion_context_index_{shot_uuid}"], str): + st.session_state[f"type_of_motion_context_index_{shot_uuid}"] = ["Low", "Standard", "High"].index(st.session_state[f"type_of_motion_context_index_{shot_uuid}"]) + type_of_motion_context = st.radio("Type of motion context:", options=["Low", "Standard", "High"], key="type_of_motion_context", horizontal=False, index=st.session_state[f"type_of_motion_context_index_{shot_uuid}"]) + + with h2: + st.info("This is how much the motion will be informed by the previous and next frames. 'High' can make it smoother but increase artifacts - while 'Low' make the motion less smooth but removes artifacts. Naturally, we recommend Standard.") + st.write("") + i1, i3,_ = st.columns([1,2,1]) + with i1: + amount_of_motion = st.slider("Amount of motion:", min_value=0.5, max_value=1.5, step=0.01, key="amount_of_motion", value=st.session_state[f"amount_of_motion_{shot_uuid}"]) + st.write("") + if st.button("Bulk update amount of motion", key="update_motion", help="This will update this value in all the frames"): + for idx, _ in enumerate(img_list): + st.session_state[f'motion_during_frame_{shot_uuid}_{idx}'] = amount_of_motion + st.success("Updated amount of motion") + time.sleep(0.3) + st.rerun() + with i3: + st.write("") + st.write("") + st.info("This actually updates the motion during frames in the advanced settings above - but we put it here because it has a big impact on the video. You can scroll up to see the changes and tweak for individual frames.") + + return strength_of_adherence, overall_positive_prompt, overall_negative_prompt, type_of_motion_context + +def select_motion_lora_element(shot_uuid, model_files): + data_repo = DataRepo() + shot = data_repo.get_shot_from_uuid(shot_uuid) + + # if it's in local DEVELOPMENT ENVIRONMENT + st.markdown("***") + st.markdown("##### Motion guidance") + tab1, tab2, tab3 = st.tabs(["Apply LoRAs","Download LoRAs","Train LoRAs"]) + + lora_data = [] + lora_file_dest = "ComfyUI/models/animatediff_motion_lora" + lora_file_links = { + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/1000_jeep_driving_r32_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/250_tony_stark_r64_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/250_train_r128_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/300_car_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_car_desert_48_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_car_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_jeep_driving_r32_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_man_running_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_rotation_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/750_jeep_driving_r32_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/300_zooming_in_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_cat_walking_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_playing_banjo_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_woman_dancing_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", + "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_zooming_out_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif" + } + + # ---------------- ADD LORA ----------------- + with tab1: + files = get_files_in_a_directory(lora_file_dest, ['safetensors', 'ckpt']) + + # Iterate through each current LoRA in session state + if len(files) == 0: + st.error("No LoRAs found in the directory - go to Explore to download some, or drop them into ComfyUI/models/animatediff_motion_lora") + if st.button("Check again", key="check_again"): + st.rerun() + else: + # cleaning empty lora vals + for idx, lora in enumerate(st.session_state[f"lora_data_{shot_uuid}"]): + if not lora: + st.session_state[f"lora_data_{shot_uuid}"].pop(idx) + + for idx, lora in enumerate(st.session_state[f"lora_data_{shot_uuid}"]): + if not lora: + continue + h1, h2, h3, h4 = st.columns([1, 1, 1, 0.5]) + with h1: + file_idx = files.index(lora["filename"]) + motion_lora = st.selectbox("Which LoRA would you like to use?", options=files, key=f"motion_lora_{idx}", index=file_idx) + + with h2: + strength_of_lora = st.slider("How strong would you like the LoRA to be?", min_value=0.0, max_value=1.0, value=lora["lora_strength"], step=0.01, key=f"strength_of_lora_{idx}") + lora_data.append({"filename": motion_lora, "lora_strength": strength_of_lora, "filepath": lora_file_dest + "/" + motion_lora}) + + with h3: + when_to_apply_lora = st.slider("When to apply the LoRA?", min_value=0, max_value=100, value=(0,100), step=1, key=f"when_to_apply_lora_{idx}",disabled=True,help="This feature is not yet available.") + + with h4: + st.write("") + if st.button("Remove", key=f"remove_lora_{idx}"): + st.session_state[f"lora_data_{shot_uuid}"].pop(idx) + st.rerun() + + # displaying preview + display_motion_lora(motion_lora, lora_file_links) + + if len(st.session_state[f"lora_data_{shot_uuid}"]) == 0: + text = "Add a LoRA" + else: + text = "Add another LoRA" + if st.button(text, key="add_motion_guidance"): + if files and len(files): + st.session_state[f"lora_data_{shot_uuid}"].append({ + "filename": files[0], + "lora_strength": 0.5, + "filepath": lora_file_dest + "/" + files[0] + }) + st.rerun() + + # ---------------- DOWNLOAD LORA --------------- + with tab2: + text1, text2 = st.columns([1, 1]) + with text1: + where_to_download_from = st.radio("Where would you like to get the LoRA from?", options=["Our list", "From a URL","Upload a LoRA"], key="where_to_download_from", horizontal=True) + + if where_to_download_from == "Our list": + with text1: + selected_lora_optn = st.selectbox("Which LoRA would you like to download?", options=[a.split("/")[-1] for a in lora_file_links], key="selected_lora") + # Display selected Lora + display_motion_lora(selected_lora_optn, lora_file_links) + + if st.button("Download LoRA", key="download_lora"): + with st.spinner("Downloading LoRA..."): + save_directory = "ComfyUI/models/animatediff_motion_lora" + os.makedirs(save_directory, exist_ok=True) # Create the directory if it doesn't exist + + # Extract the filename from the URL + selected_lora, lora_idx = next(((ele, idx) for idx, ele in enumerate(lora_file_links.keys()) if selected_lora_optn in ele), None) + filename = selected_lora.split("/")[-1] + save_path = os.path.join(save_directory, filename) + + # Download the file + download_lora_bar = st.progress(0, text="") + response = requests.get(selected_lora, stream=True) + if response.status_code == 200: + total_size = int(response.headers.get('content-length', 0)) + with open(save_path, 'wb') as f: + received_bytes = 0 + + for data in response.iter_content(chunk_size=8192): + f.write(data) + received_bytes += len(data) + progress = received_bytes / total_size + download_lora_bar.progress(progress) + + st.success(f"Downloaded LoRA to {save_path}") + download_lora_bar.empty() + st.rerun() + else: + st.error("Failed to download LoRA") + + elif where_to_download_from == "From a URL": + with text1: + text_input = st.text_input("Enter the URL of the LoRA", key="text_input_lora") + with text2: + st.write("") + st.write("") + st.write("") + st.info("Make sure to get the download url of the LoRA. \n\n For example, from Hugging Face, it should look like this: https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/1000_jeep_driving_r32_temporal_unet.safetensors") + with text1: + if st.button("Download LoRA", key="download_lora"): + with st.spinner("Downloading LoRA..."): + save_directory = "ComfyUI/models/animatediff_motion_lora" + os.makedirs(save_directory, exist_ok=True) + response = requests.get(text_input) + if response.status_code == 200: + with open(os.path.join(save_directory, text_input.split("/")[-1]), 'wb') as f: + f.write(response.content) + st.success(f"Downloaded LoRA to {save_directory}") + else: + st.error("Failed to download LoRA") + elif where_to_download_from == "Upload a LoRA": + st.info("It's simpler to just drop this into the ComfyUI/models/animatediff_motion_lora directory.") + + # ---------------- TRAIN LORA -------------- + with tab3: + b1, b2 = st.columns([1, 1]) + with b1: + lora_name = st.text_input("Name this LoRA", key="lora_name") + if model_files and len(model_files): + base_sd_model = st.selectbox( + label="Select base sd model for training", + options=model_files, + key="base_sd_model_video", + index=0 + ) + else: + base_sd_model = "" + st.info("Default model Deliberate V2 would be selected") + + lora_prompt = st.text_area("Describe the motion", key="lora_prompt") + training_video = st.file_uploader("Upload a video to train a new LoRA", type=["mp4"]) + + if st.button("Train LoRA", key="train_lora", use_container_width=True): + filename = str(uuid.uuid4()) + ".mp4" + hosted_url = save_or_host_file(training_video, "videos/temp/" + filename, "video/mp4") + + file_data = { + "name": filename, + "type": InternalFileType.VIDEO.value, + "project_id": shot.project.uuid, + } + + if hosted_url: + file_data.update({'hosted_url': hosted_url}) + else: + file_data.update({'local_path': "videos/temp/" + filename}) + + video_file = data_repo.create_file(**file_data) + video_width, video_height = get_media_dimensions(video_file.location) + unique_file_tag = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6)) + train_motion_lora( + video_file, + lora_prompt, + lora_name + "_" + unique_file_tag, + video_width, + video_height, + base_sd_model + ) + + return lora_data + + +def select_sd_model_element(shot_uuid, default_model): + st.markdown("##### Style model") + tab1, tab2 = st.tabs(["Choose Model","Download Models"]) + + checkpoints_dir = "ComfyUI/models/checkpoints" + all_files = os.listdir(checkpoints_dir) + if len(all_files) == 0: + model_files = [default_model] + + else: + model_files = [file for file in all_files if file.endswith('.safetensors') or file.endswith('.ckpt')] + model_files = [file for file in model_files if "xl" not in file] + + sd_model_dict = { + "Anything V3 FP16 Pruned": { + "url": "https://weights.replicate.delivery/default/comfy-ui/checkpoints/anything-v3-fp16-pruned.safetensors.tar", + "filename": "anything-v3-fp16-pruned.safetensors.tar" + }, + "Deliberate V2": { + "url": "https://weights.replicate.delivery/default/comfy-ui/checkpoints/Deliberate_v2.safetensors.tar", + "filename": "Deliberate_v2.safetensors.tar" + }, + "Dreamshaper 8": { + "url": "https://weights.replicate.delivery/default/comfy-ui/checkpoints/dreamshaper_8.safetensors.tar", + "filename": "dreamshaper_8.safetensors.tar" + }, + "epicrealism_pureEvolutionV5": { + "url": "https://civitai.com/api/download/models/134065", + "filename": "epicrealism_pureEvolutionv5.safetensors" + }, + "majicmixRealistic_v6": { + "url": "https://civitai.com/api/download/models/94640", + "filename": "majicmixRealistic_v6.safetensors" + }, + } + + cur_model = st.session_state[f'ckpt_{shot_uuid}'] + current_model_index = model_files.index(cur_model) if (cur_model and cur_model in model_files) else 0 + + # ---------------- SELECT CKPT -------------- + with tab1: + col1, col2 = st.columns([1, 1]) + with col1: + sd_model = "" + def update_model(): + global sd_model + sd_model = checkpoints_dir + "/" + st.session_state['sd_model_video'] + + if model_files and len(model_files): + sd_model = st.selectbox( + label="Which model would you like to use?", + options=model_files, + key="sd_model_video", + index=current_model_index, + on_change=update_model + ) + else: + st.write("") + st.info("Default model Deliberate V2 would be selected") + + with col2: + if len(all_files) == 0: + st.write("") + st.info("This is the default model - to download more, go to the Download Models tab.") + else: + st.write("") + st.info("To download more models, go to the Download Models tab.") + + + # ---------------- ADD CKPT --------------- + with tab2: + where_to_get_model = st.radio("Where would you like to get the model from?", options=["Our list", "Upload a model", "From a URL"], key="where_to_get_model") + + if where_to_get_model == "Our list": + model_name_selected = st.selectbox("Which model would you like to download?", options=list(sd_model_dict.keys()), key="model_to_download") + + if st.button("Download Model", key="download_model"): + with st.spinner("Downloading model..."): + download_bar = st.progress(0, text="") + save_directory = "ComfyUI/models/checkpoints" + os.makedirs(save_directory, exist_ok=True) # Create the directory if it doesn't exist + + # Retrieve the URL using the selected model name + model_url = sd_model_dict[model_name_selected]["url"] + + # Download the model and save it to the directory + response = requests.get(model_url, stream=True) + zip_filename = sd_model_dict[model_name_selected]["filename"] + filepath = os.path.join(save_directory, zip_filename) + print("filepath: ", filepath) + if response.status_code == 200: + total_size = int(response.headers.get('content-length', 0)) + + with open(filepath, 'wb') as f: + received_bytes = 0 + + for data in response.iter_content(chunk_size=8192): + f.write(data) + received_bytes += len(data) + progress = received_bytes / total_size + download_bar.progress(progress) + + st.success(f"Downloaded {model_name_selected} to {save_directory}") + download_bar.empty() + + if model_url.endswith(".zip") or model_url.endswith(".tar"): + st.success("Extracting the zip file. Please wait...") + new_filepath = filepath.replace(zip_filename, "") + if model_url.endswith(".zip"): + with zipfile.ZipFile(f"{filepath}", "r") as zip_ref: + zip_ref.extractall(new_filepath) + else: + with tarfile.open(f"{filepath}", "r") as tar_ref: + tar_ref.extractall(new_filepath) + + os.remove(filepath) + st.rerun() + + elif where_to_get_model == "Upload a model": + st.info("It's simpler to just drop this into the ComfyUI/models/checkpoints directory.") + + elif where_to_get_model == "From a URL": + text1, text2 = st.columns([1, 1]) + with text1: + text_input = st.text_input("Enter the URL of the model", key="text_input") + with text2: + st.info("Make sure to get the download url of the model. \n\n For example, from Civit, this should look like this: https://civitai.com/api/download/models/179446. \n\n While from Hugging Face, it should look like this: https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/1000_jeep_driving_r32_temporal_unet.safetensors") + + if st.button("Download Model", key="download_model"): + with st.spinner("Downloading model..."): + save_directory = "ComfyUI/models/checkpoints" + os.makedirs(save_directory, exist_ok=True) + response = requests.get(text_input) + if response.status_code == 200: + with open(os.path.join(save_directory, text_input.split("/")[-1]), 'wb') as f: + f.write(response.content) + st.success(f"Downloaded model to {save_directory}") + else: + st.error("Failed to download model") + + return sd_model, model_files, + + +def individual_frame_settings_element(shot_uuid, img_list, display_indent): + with display_indent: + st.markdown("##### Individual frame settings") + + items_per_row = 3 + strength_of_frames = [] + distances_to_next_frames = [] + speeds_of_transitions = [] + freedoms_between_frames = [] + individual_prompts = [] + individual_negative_prompts = [] + motions_during_frames = [] + + if len(img_list) <= 1: + st.warning("You need at least two frames to generate a video.") + st.stop() + + open_advanced_settings = st_memory.toggle("Open all advanced settings", key="advanced_settings", value=False) + + # setting default values to main shot settings + if f'lora_data_{shot_uuid}' not in st.session_state: + st.session_state[f'lora_data_{shot_uuid}'] = [] + + if f'strength_of_adherence_value_{shot_uuid}' not in st.session_state: + st.session_state[f'strength_of_adherence_value_{shot_uuid}'] = 0.10 + + if f'type_of_motion_context_index_{shot_uuid}' not in st.session_state: + st.session_state[f'type_of_motion_context_index_{shot_uuid}'] = 1 + + if f'positive_prompt_video_{shot_uuid}' not in st.session_state: + st.session_state[f"positive_prompt_video_{shot_uuid}"] = "" + + if f'negative_prompt_video_{shot_uuid}' not in st.session_state: + st.session_state[f"negative_prompt_video_{shot_uuid}"] = "" + + if f'ckpt_{shot_uuid}' not in st.session_state: + st.session_state[f'ckpt_{shot_uuid}'] = "" + + if f"amount_of_motion_{shot_uuid}" not in st.session_state: + st.session_state[f"amount_of_motion_{shot_uuid}"] = 1.3 + + # loading settings of the last shot (if this shot is being loaded for the first time) + if f'strength_of_frame_{shot_uuid}_0' not in st.session_state: + load_shot_settings(shot_uuid) + + # ------------- Timing Frame and their settings ------------------- + for i in range(0, len(img_list) , items_per_row): + with st.container(): + grid = st.columns([2 if j%2==0 else 1 for j in range(2*items_per_row)]) # Adjust the column widths + for j in range(items_per_row): + idx = i + j + if idx < len(img_list): + with grid[2*j]: # Adjust the index for image column + img = img_list[idx] + if img.location: + st.info(f"**Frame {idx + 1}**") + st.image(img.location, use_column_width=True) + + # settings control + with st.expander("Advanced settings:", expanded=open_advanced_settings): + # checking for newly added frames + if f'individual_prompt_{shot_uuid}_{idx}' not in st.session_state: + for k, v in DEFAULT_SHOT_MOTION_VALUES.items(): + st.session_state[f"{k}_{shot_uuid}_{idx}"] = v + + individual_prompt = st.text_input("What to include:", key=f"individual_prompt_widget_{idx}_{img.uuid}", value=st.session_state[f'individual_prompt_{shot_uuid}_{idx}'], help="Use this sparingly, as it can have a large impact on the video and cause weird distortions.") + individual_prompts.append(individual_prompt) + individual_negative_prompt = st.text_input("What to avoid:", key=f"negative_prompt_widget_{idx}_{img.uuid}", value=st.session_state[f'individual_negative_prompt_{shot_uuid}_{idx}'],help="Use this sparingly, as it can have a large impact on the video and cause weird distortions.") + individual_negative_prompts.append(individual_negative_prompt) + strength1, strength2 = st.columns([1, 1]) + with strength1: + strength_of_frame = st.slider("Strength of current frame:", min_value=0.25, max_value=1.0, step=0.01, key=f"strength_of_frame_widget_{shot_uuid}_{idx}", value=st.session_state[f'strength_of_frame_{shot_uuid}_{idx}']) + strength_of_frames.append(strength_of_frame) + with strength2: + motion_during_frame = st.slider("Motion during frame:", min_value=0.5, max_value=1.5, step=0.01, key=f"motion_during_frame_widget_{idx}_{img.uuid}", value=st.session_state[f'motion_during_frame_{shot_uuid}_{idx}']) + motions_during_frames.append(motion_during_frame) + else: + st.warning("No primary image present.") + + # distance, speed and freedom settings (also aggregates them into arrays) + with grid[2*j+1]: # Add the new column after the image column + if idx < len(img_list) - 1: + + # if st.session_state[f'distance_to_next_frame_{shot_uuid}_{idx}'] is a int, make it a float + if isinstance(st.session_state[f'distance_to_next_frame_{shot_uuid}_{idx}'], int): + st.session_state[f'distance_to_next_frame_{shot_uuid}_{idx}'] = float(st.session_state[f'distance_to_next_frame_{shot_uuid}_{idx}']) + distance_to_next_frame = st.slider("Seconds to next frame:", min_value=0.25, max_value=6.00, step=0.25, key=f"distance_to_next_frame_widget_{idx}_{img.uuid}", value=st.session_state[f'distance_to_next_frame_{shot_uuid}_{idx}']) + distances_to_next_frames.append(distance_to_next_frame/2) + speed_of_transition = st.slider("Speed of transition:", min_value=0.45, max_value=0.7, step=0.01, key=f"speed_of_transition_widget_{idx}_{img.uuid}", value=st.session_state[f'speed_of_transition_{shot_uuid}_{idx}']) + speeds_of_transitions.append(speed_of_transition) + freedom_between_frames = st.slider("Freedom between frames:", min_value=0.15, max_value=0.85, step=0.01, key=f"freedom_between_frames_widget_{idx}_{img.uuid}", value=st.session_state[f'freedom_between_frames_{shot_uuid}_{idx}']) + freedoms_between_frames.append(freedom_between_frames) + + if (i < len(img_list) - 1) or (len(img_list) % items_per_row != 0): + st.markdown("***") + + return strength_of_frames, distances_to_next_frames, speeds_of_transitions, freedoms_between_frames, individual_prompts, individual_negative_prompts, motions_during_frames + \ No newline at end of file diff --git a/ui_components/widgets/variant_comparison_grid.py b/ui_components/widgets/variant_comparison_grid.py index 0bb8c6e3..3ffc9f70 100644 --- a/ui_components/widgets/variant_comparison_grid.py +++ b/ui_components/widgets/variant_comparison_grid.py @@ -15,7 +15,6 @@ from ui_components.widgets.shot_view import create_video_download_button from ui_components.models import InternalAIModelObject, InternalFileObject from ui_components.widgets.add_key_frame_element import add_key_frame -from ui_components.widgets.animation_style_element import update_interpolation_settings from utils import st_memory from utils.data_repo.data_repo import DataRepo from utils.ml_processor.constants import ML_MODEL, ComfyWorkflow @@ -35,6 +34,9 @@ def variant_comparison_grid(ele_uuid, stage=CreativeProcessType.MOTION.value): shot = data_repo.get_shot_from_uuid(shot_uuid) variants = shot.interpolated_clip_list timing_list = data_repo.get_timing_list_from_shot(shot.uuid) + + if not (f"{shot_uuid}_selected_variant_log_uuid" in st.session_state and st.session_state[f"{shot_uuid}_selected_variant_log_uuid"]): + st.session_state[f"{shot_uuid}_selected_variant_log_uuid"] = None else: timing_uuid = ele_uuid timing = data_repo.get_timing_from_uuid(timing_uuid) @@ -149,6 +151,9 @@ def variant_comparison_grid(ele_uuid, stage=CreativeProcessType.MOTION.value): st.markdown("***") # Add markdown line cols = st.columns(num_columns) # Prepare for the next row # Add markdown line if this is not the last variant in page_indices + + return st.session_state[f"{shot_uuid}_selected_variant_log_uuid"] if \ + f"{shot_uuid}_selected_variant_log_uuid" in st.session_state else None def is_upscaled_video(variant: InternalFileObject): log = variant.inference_log From e56aaf55d9b92abb71841778d8d4ae2ed031c044 Mon Sep 17 00:00:00 2001 From: peteromallet Date: Thu, 21 Mar 2024 02:00:44 +0100 Subject: [PATCH 34/43] Tiny improvements --- .../widgets/animation_style_element.py | 79 +++++++++++-------- utils/media_processor/interpolator.py | 4 +- utils/ml_processor/comfy_data_transform.py | 1 + 3 files changed, 48 insertions(+), 36 deletions(-) diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index 7ee9a359..4f3fbcf4 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -42,10 +42,14 @@ def animation_style_element(shot_uuid): 'animation_tool': AnimationToolType.ANIMATEDIFF.value, } - st.markdown("### 🎥 Generate animations") - st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") + headline1, headline2, headline3 = st.columns([1, 1, 1]) + with headline1: - type_of_animation = st_memory.radio("What type of animation would you like to generate?", options=["Batch Creative Interpolation", "2-Image Realistic Interpolation"],horizontal=True, help="**Batch Creative Interpolaton** lets you input multple images and control the motion and style of each frame - resulting in a fluid, surreal and highly-controllable motion. \n\n **2-Image Realistic Interpolation** is a simpler way to generate animations - it generates a video by interpolating between two images, and is best for realistic motion.",key=f"type_of_animation_{shot.uuid}") + st.markdown("### 🎥 Generate animations") + st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") + with headline3: + with st.expander("", expanded=False): + type_of_animation = st_memory.radio("What type of animation would you like to generate?", options=["Batch Creative Interpolation", "2-Image Realistic Interpolation (beta)"],horizontal=True, help="**Batch Creative Interpolaton** lets you input multple images and control the motion and style of each frame - resulting in a fluid, surreal and highly-controllable motion. \n\n **2-Image Realistic Interpolation** is a simpler way to generate animations - it generates a video by interpolating between two images, and is best for realistic motion.",key=f"type_of_animation_{shot.uuid}") if type_of_animation == "Batch Creative Interpolation": @@ -485,7 +489,7 @@ def update_prompt(): st.markdown("***") st.markdown("##### Overall motion settings") - h1, h2, h3 = st.columns([1, 0.5, 2]) + h1, h2, h3 = st.columns([1, 0.5, 1.0]) with h1: # will fix this later if f"type_of_motion_context_index_{shot.uuid}" in st.session_state and isinstance(st.session_state[f"type_of_motion_context_index_{shot.uuid}"], str): @@ -695,6 +699,8 @@ def update_prompt(): strength_values = extract_strength_values(type_of_strength_distribution, dynamic_strength_values, keyframe_positions, linear_cn_strength_value) key_frame_influence_values = extract_influence_values(type_of_key_frame_influence, dynamic_key_frame_influence_values, keyframe_positions, linear_key_frame_influence_value) weights_list, frame_numbers_list = calculate_weights(keyframe_positions, strength_values, 4, key_frame_influence_values,last_key_frame_position) + # s + plot_weights(weights_list, frame_numbers_list) st.markdown("***") @@ -707,41 +713,44 @@ def update_prompt(): for idx, timing in enumerate(timing_list): for k, v in DEFAULT_SHOT_MOTION_VALUES.items(): st.session_state[f'{k}_{shot.uuid}_{idx}'] = v - + st.success("All frames have been reset to default values.") st.rerun() - - editable_entity = st.selectbox("What would you like to edit?", options=["Seconds to next frames", "Speed of transitions", "Freedom between frames","Strength of frames","Motion during frames"], key="editable_entity") - if editable_entity == "Seconds to next frames": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.25, max_value=6.00, step=0.25, value=1.0, key="entity_new_val") - if editable_entity == "Strength of frames": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.25, max_value=1.0, step=0.01, value=0.5, key="entity_new_val") - elif editable_entity == "Speed of transitions": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.45, max_value=0.7, step=0.01, value=0.6, key="entity_new_val") - elif editable_entity == "Freedom between frames": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.15, max_value=0.85, step=0.01, value=0.5, key="entity_new_val") - elif editable_entity == "Motion during frames": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.5, max_value=1.5, step=0.01, value=1.3, key="entity_new_val") + + # New feature: Selecting a range to edit + range_to_edit = st.slider("Select the range of frames you would like to edit:", + min_value=1, max_value=len(timing_list), + value=(1, len(timing_list)), step=1, key="range_to_edit") + edit1, edit2 = st.columns([1, 1]) + with edit1: + editable_entity = st.selectbox("What would you like to edit?", options=["Seconds to next frames", "Speed of transitions", "Freedom between frames","Strength of frames","Motion during frames"], key="editable_entity") + with edit2: + if editable_entity == "Seconds to next frames": + entity_new_val = st.slider("What would you like to change it to?", min_value=0.25, max_value=6.00, step=0.25, value=1.0, key="entity_new_val_seconds") + elif editable_entity == "Strength of frames": + entity_new_val = st.slider("What would you like to change it to?", min_value=0.25, max_value=1.0, step=0.01, value=0.5, key="entity_new_val_strength") + elif editable_entity == "Speed of transitions": + entity_new_val = st.slider("What would you like to change it to?", min_value=0.45, max_value=0.7, step=0.01, value=0.6, key="entity_new_val_speed") + elif editable_entity == "Freedom between frames": + entity_new_val = st.slider("What would you like to change it to?", min_value=0.15, max_value=0.85, step=0.01, value=0.5, key="entity_new_val_freedom") + elif editable_entity == "Motion during frames": + entity_new_val = st.slider("What would you like to change it to?", min_value=0.5, max_value=1.5, step=0.01, value=1.3, key="entity_new_val_motion") - bulk1, bulk2 = st.columns([1, 1]) - with bulk1: - if st.button("Bulk edit", key="bulk_edit", use_container_width=True): + if st.button("Bulk edit", key="bulk_edit", use_container_width=True): + start_idx, end_idx = range_to_edit + for idx in range(start_idx - 1, end_idx): # Adjusting index to be 0-based if editable_entity == "Strength of frames": - for idx, timing in enumerate(timing_list): - st.session_state[f'strength_of_frame_{shot.uuid}_{idx}'] = entity_new_val + st.session_state[f'strength_of_frame_{shot.uuid}_{idx}'] = entity_new_val elif editable_entity == "Seconds to next frames": - for idx, timing in enumerate(timing_list): - st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}'] = entity_new_val + st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}'] = entity_new_val elif editable_entity == "Speed of transitions": - for idx, timing in enumerate(timing_list): - st.session_state[f'speed_of_transition_{shot.uuid}_{idx}'] = entity_new_val + st.session_state[f'speed_of_transition_{shot.uuid}_{idx}'] = entity_new_val elif editable_entity == "Freedom between frames": - for idx, timing in enumerate(timing_list): - st.session_state[f'freedom_between_frames_{shot.uuid}_{idx}'] = entity_new_val + st.session_state[f'freedom_between_frames_{shot.uuid}_{idx}'] = entity_new_val elif editable_entity == "Motion during frames": - for idx, timing in enumerate(timing_list): - st.session_state[f'motion_during_frame_{shot.uuid}_{idx}'] = entity_new_val - st.rerun() + st.session_state[f'motion_during_frame_{shot.uuid}_{idx}'] = entity_new_val + st.rerun() + st.markdown("***") st.markdown("### Save current settings") @@ -751,7 +760,7 @@ def update_prompt(): time.sleep(0.7) st.rerun() - elif type_of_animation == "2-Image Realistic Interpolation": + elif type_of_animation == "2-Image Realistic Interpolation (beta)": col1, col2, col3 = st.columns([1, 1, 1]) for i in range(0, 2, 2): # Iterate two items at a time @@ -1126,11 +1135,13 @@ def find_curve(batch_index_from, batch_index_to, strength_from, strength_to, int def plot_weights(weights_list, frame_numbers_list): plt.figure(figsize=(12, 6)) for i, weights in enumerate(weights_list): - frame_numbers = frame_numbers_list[i] + # Divide each frame number by 100 + frame_numbers = [frame_number / 100 for frame_number in frame_numbers_list[i]] + plt.plot(frame_numbers, weights, label=f'Frame {i + 1}') # Plot settings - plt.xlabel('Frame Number') + plt.xlabel('Seconds') # Updated to represent seconds plt.ylabel('Weight') plt.legend() plt.ylim(0, 1.0) diff --git a/utils/media_processor/interpolator.py b/utils/media_processor/interpolator.py index 357d9aa8..b38c558c 100644 --- a/utils/media_processor/interpolator.py +++ b/utils/media_processor/interpolator.py @@ -135,9 +135,9 @@ def video_through_frame_interpolation(img_location_list, settings, variant_count negative_prompt="", height=512, width=512, + image_uuid=settings["structure_control_image"], low_threshold=100, - high_threshold=200, - image_uuid=None, + high_threshold=200, mask_uuid=None, data=sm_data ) diff --git a/utils/ml_processor/comfy_data_transform.py b/utils/ml_processor/comfy_data_transform.py index 41b4098b..b4c7341c 100644 --- a/utils/ml_processor/comfy_data_transform.py +++ b/utils/ml_processor/comfy_data_transform.py @@ -449,6 +449,7 @@ def update_json_with_loras(json_data, loras): workflow["543"]["inputs"]["text"] = sm_data.get('individual_negative_prompts') if sm_data.get('structure_control_image'): + workflow = update_structure_control_image(workflow, sm_data.get('structure_control_image'), sm_data.get('strength_of_structure_control_image')) ignore_list = sm_data.get("lora_data", []) From 919d0e7389eab8e7a96219a2b50e9e0c9b312a51 Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Thu, 21 Mar 2024 11:10:45 +0000 Subject: [PATCH 35/43] green-head merge changes --- banodoco_settings.py | 77 ++++---- ui_components/components/explorer_page.py | 26 ++- utils/media_processor/interpolator.py | 105 +++++------ utils/ml_processor/comfy_data_transform.py | 125 ++++++++++++- .../comfy_workflows/dynamicrafter_api.json | 164 +++++++++++++++++ .../ipadapter_composition_workflow_api.json | 174 ++++++++++++++++++ utils/ml_processor/constants.py | 10 +- 7 files changed, 583 insertions(+), 98 deletions(-) create mode 100644 utils/ml_processor/comfy_workflows/dynamicrafter_api.json create mode 100644 utils/ml_processor/comfy_workflows/ipadapter_composition_workflow_api.json diff --git a/banodoco_settings.py b/banodoco_settings.py index d324a877..d49c6d69 100644 --- a/banodoco_settings.py +++ b/banodoco_settings.py @@ -98,7 +98,9 @@ def create_new_user_data(user: InternalUserObject): def create_new_project(user: InternalUserObject, project_name: str, width=512, height=512): data_repo = DataRepo() - # creating a new project for this user + existing_projects_list = data_repo.get_all_project_list(user.uuid) + add_initial_frames = False if (existing_projects_list and len(existing_projects_list)) else True + project_data = { "user_id": user.uuid, "name": project_name, @@ -115,43 +117,46 @@ def create_new_project(user: InternalUserObject, project_name: str, width=512, h } shot = data_repo.create_shot(**shot_data) - - # create timings for init_images - init_images_path = os.path.join("sample_assets", "sample_images", "init_frames") - init_image_list = list_files_in_folder(init_images_path) st.session_state["project_uuid"] = project.uuid - - for idx, img_path in enumerate(init_image_list): - img_path = os.path.join(init_images_path, img_path) - img = Image.open(img_path) - img = img.resize((width, height)) - - unique_file_name = f"{str(uuid.uuid4())}.png" - file_location = f"videos/{project.uuid}/resources/prompt_images/{unique_file_name}" - hosted_url = save_or_host_file(img, file_location, mime_type='image/png', dim=(width, height)) - file_data = { - "name": str(uuid.uuid4()), - "type": InternalFileType.IMAGE.value, - "project_id": project.uuid, - "dim": (width, height), - } - - if hosted_url: - file_data.update({'hosted_url': hosted_url}) - else: - file_data.update({'local_path': file_location}) - source_image = data_repo.create_file(**file_data) - - timing_data = { - "frame_time": 0.0, - "aux_frame_index": idx, - "source_image_id": source_image.uuid, - "shot_id": shot.uuid, - } - timing: InternalFrameTimingObject = data_repo.create_timing(**timing_data) - - add_image_variant(source_image.uuid, timing.uuid) + # Add initial frames only if there are no existing projects (i.e., it's the user's first project) + if add_initial_frames: + init_images_path = os.path.join("sample_assets", "sample_images", "init_frames") + init_image_list = list_files_in_folder(init_images_path) + image_extensions = {'.png', '.jpg', '.jpeg', '.gif'} + init_image_list = [img for img in init_image_list if os.path.splitext(img)[1].lower() in image_extensions] + + for idx, img_path in enumerate(init_image_list): + img_path = os.path.join(init_images_path, img_path) + img = Image.open(img_path) + img = img.resize((width, height)) + + unique_file_name = f"{str(uuid.uuid4())}.png" + file_location = f"videos/{project.uuid}/resources/prompt_images/{unique_file_name}" + hosted_url = save_or_host_file(img, file_location, mime_type='image/png', dim=(width, height)) + file_data = { + "name": str(uuid.uuid4()), + "type": InternalFileType.IMAGE.value, + "project_id": project.uuid, + "dim": (width, height), + } + + if hosted_url: + file_data.update({'hosted_url': hosted_url}) + else: + file_data.update({'local_path': file_location}) + + source_image = data_repo.create_file(**file_data) + + timing_data = { + "frame_time": 0.0, + "aux_frame_index": idx, + "source_image_id": source_image.uuid, + "shot_id": shot.uuid, + } + timing: InternalFrameTimingObject = data_repo.create_timing(**timing_data) + + add_image_variant(source_image.uuid, timing.uuid) # create default ai models model_list = create_predefined_models(user) diff --git a/ui_components/components/explorer_page.py b/ui_components/components/explorer_page.py index a5b526fc..e509850e 100644 --- a/ui_components/components/explorer_page.py +++ b/ui_components/components/explorer_page.py @@ -22,7 +22,8 @@ class InputImageStyling(ExtendedEnum): TEXT2IMAGE = "Text to Image" IMAGE2IMAGE = "Image to Image" - CONTROLNET_CANNY = "ControlNet Canny" + # CONTROLNET_CANNY = "ControlNet Canny" + IPADAPTER_COMPOSITION = "IP-Adapter Composition" IPADAPTER_FACE = "IP-Adapter Face" IPADAPTER_PLUS = "IP-Adapter Plus" IPADPTER_FACE_AND_PLUS = "IP-Adapter Face & Plus" @@ -207,6 +208,27 @@ def handle_image_input(column, type_of_generation, output_value_name, data_repo= output, log = ml_client.predict_model_output_standardized(ML_MODEL.sdxl_img2img, query_obj, queue_inference=QUEUE_INFERENCE_QUERIES) + elif generation_method == InputImageStyling.IPADAPTER_COMPOSITION.value: + input_img = st.session_state["input_image_1"] + input_image_file = save_new_image(input_img, project_uuid) + query_obj = MLQueryObject( + timing_uuid=None, + model_uuid=None, + image_uuid=input_image_file.uuid, + guidance_scale=5, + seed=-1, + num_inference_steps=30, + strength=strength_of_image/100, + adapter_type=None, + prompt=prompt, + negative_prompt=negative_prompt, + height=project_settings.height, + width=project_settings.width, + data={'condition_scale': 1, "shot_uuid": shot_uuid} + ) + + output, log = ml_client.predict_model_output_standardized(ML_MODEL.ipadapter_composition, query_obj, queue_inference=QUEUE_INFERENCE_QUERIES) + elif generation_method == InputImageStyling.CONTROLNET_CANNY.value: edge_pil_img = get_canny_img(st.session_state["input_image_1"], low_threshold=50, high_threshold=150) # redundant incase of local inference input_img = edge_pil_img if not GPU_INFERENCE_ENABLED else st.session_state["input_image_1"] @@ -349,7 +371,7 @@ def handle_image_input(column, type_of_generation, output_value_name, data_repo= st.button("Generate images", key="generate_images", use_container_width=True, type="primary", disabled=True, help="Please enter a prompt to generate images") elif type_of_generation == InputImageStyling.IMAGE2IMAGE.value and st.session_state["input_image_1"] is None: st.button("Generate images", key="generate_images", use_container_width=True, type="primary", disabled=True, help="Please upload an image") - elif type_of_generation == InputImageStyling.CONTROLNET_CANNY.value and st.session_state["input_image_1"] is None: + elif type_of_generation == InputImageStyling.IPADAPTER_COMPOSITION.value and st.session_state["input_image_1"] is None: st.button("Generate images", key="generate_images", use_container_width=True, type="primary", disabled=True, help="Please upload an image") elif type_of_generation == InputImageStyling.IPADAPTER_FACE.value and st.session_state["input_image_1"] is None: st.button("Generate images", key="generate_images", use_container_width=True, type="primary", disabled=True, help="Please upload an image") diff --git a/utils/media_processor/interpolator.py b/utils/media_processor/interpolator.py index 57198fdd..b38c558c 100644 --- a/utils/media_processor/interpolator.py +++ b/utils/media_processor/interpolator.py @@ -54,6 +54,8 @@ def create_interpolated_clip(img_location_list, animation_style, settings, varia return VideoInterpolator.video_through_direct_morphing( img_location_list, settings, + variant_count, + queue_inference, backlog ) @@ -62,26 +64,10 @@ def create_interpolated_clip(img_location_list, animation_style, settings, varia @staticmethod def video_through_frame_interpolation(img_location_list, settings, variant_count, queue_inference=False, backlog=False): ml_client = get_ml_client() - # zip_filename = zip_images(img_location_list) - # zip_url = ml_client.upload_training_data(zip_filename, delete_after_upload=True) - # print("zipped file url: ", zip_url) - # animation_tool = settings['animation_tool'] if 'animation_tool' in settings else AnimationToolType.G_FILM.value final_res = [] for _ in range(variant_count): - # if animation_tool == AnimationToolType.G_FILM.value: - # res = ml_client.predict_model_output( - # ML_MODEL.google_frame_interpolation, - # frame1=img1, - # frame2=img2, - # times_to_interpolate=settings['interpolation_steps'], - # queue_inference=queue_inference - # ) - - # since workflows can have multiple input params it's not standardized yet - # elif animation_tool == AnimationToolType.ANIMATEDIFF.value: - # defaulting to animatediff interpolation if True: # NOTE: @Peter these are all the settings you passed in from the UI sm_data = { @@ -121,13 +107,21 @@ def video_through_frame_interpolation(img_location_list, settings, variant_count "individual_negative_prompts": settings["individual_negative_prompts"], "max_frames": settings["max_frames"], "lora_data": settings["lora_data"], - "shot_data": settings["shot_data"] + "shot_data": settings["shot_data"], + "structure_control_image": settings["structure_control_image"], + "strength_of_structure_control_image": settings["strength_of_structure_control_image"] } # adding the input images for idx, img_uuid in enumerate(settings['file_uuid_list']): sm_data[f"file_image_{padded_integer(idx+1)}" + "_uuid"] = img_uuid + + if settings["structure_control_image"] is not None: + # add to file_image__{padded_integer(idx+1)}_uuid + sm_data[f"file_image_{padded_integer(len(settings['file_uuid_list'])+1)}" + "_uuid"] = settings["structure_control_image"] + + # NOTE: @Peter all the above settings are put in the 'data' parameter below ml_query_object = MLQueryObject( prompt="SM", # hackish fix @@ -141,9 +135,9 @@ def video_through_frame_interpolation(img_location_list, settings, variant_count negative_prompt="", height=512, width=512, + image_uuid=settings["structure_control_image"], low_threshold=100, - high_threshold=200, - image_uuid=None, + high_threshold=200, mask_uuid=None, data=sm_data ) @@ -155,37 +149,44 @@ def video_through_frame_interpolation(img_location_list, settings, variant_count @staticmethod - def video_through_direct_morphing(img_location_list, settings, backlog=False): - def load_image(image_path_or_url): - if image_path_or_url.startswith("http"): - response = r.get(image_path_or_url) - image = np.asarray(bytearray(response.content), dtype="uint8") - image = cv2.imdecode(image, cv2.IMREAD_COLOR) - else: - image = cv2.imread(image_path_or_url) - - return image - - img1 = load_image(img_location_list[0]) - img2 = load_image(img_location_list[1]) + def video_through_direct_morphing(img_location_list, settings, variant_count, queue_inference=False, backlog=False): + ml_client = get_ml_client() + + final_res = [] + for _ in range(variant_count): + + if True: + # NOTE: @Peter these are all the settings you passed in from the UI + sm_data = { + "width": settings['width'], + "height": settings['height'], + "prompt": settings["prompt"] + } + + for idx, img_uuid in enumerate(settings['file_uuid_list']): + sm_data[f"file_image_{padded_integer(idx+1)}" + "_uuid"] = img_uuid + + ml_query_object = MLQueryObject( + prompt="SM", # hackish fix + timing_uuid=None, + model_uuid=None, + guidance_scale=None, + seed=None, + num_inference_steps=None, + strength=None, + adapter_type=None, + negative_prompt="", + height=settings['height'], + width=settings['width'], + image_uuid=None, + mask_uuid=None, + data=sm_data + ) + res = ml_client.predict_model_output_standardized(ML_MODEL.dynamicrafter, ml_query_object, QUEUE_INFERENCE_QUERIES, backlog) + + final_res.append(res) + + return final_res + + - if img1 is None or img2 is None: - raise ValueError("Could not read one or both of the images.") - - num_frames = settings['interpolation_steps'] # Number of frames in the video - video_frames = [] - - for alpha in np.linspace(0, 1, num_frames): - morphed_image = cv2.addWeighted(img1, alpha, img2, 1 - alpha, 0) - video_frames.append(morphed_image) - - fourcc = cv2.VideoWriter_fourcc(*"avc1") - video_bytes = [] - for frame in video_frames: - ret, frame_bytes = cv2.imencode('.mp4', frame, fourcc) - if not ret: - raise ValueError("Failed to encode video frame") - video_bytes.append(frame_bytes.tobytes()) - - video_data = b''.join(video_bytes) - return [(video_data, InferenceLogObject({}))] # returning None for inference log \ No newline at end of file diff --git a/utils/ml_processor/comfy_data_transform.py b/utils/ml_processor/comfy_data_transform.py index 1a2107ee..54293311 100644 --- a/utils/ml_processor/comfy_data_transform.py +++ b/utils/ml_processor/comfy_data_transform.py @@ -27,6 +27,7 @@ ComfyWorkflow.UPSCALER: {"workflow_path": 'comfy_workflows/video_upscaler_api.json', "output_node_id": [243]}, ComfyWorkflow.MOTION_LORA: {"workflow_path": 'comfy_workflows/motion_lora_api.json', "output_node_id": [11, 14, 26, 30, 34]}, # ComfyWorkflow.MOTION_LORA: {"workflow_path": 'comfy_workflows/motion_lora_test_api.json', "output_node_id": [11, 14]}, + ComfyWorkflow.DYNAMICRAFTER: {"workflow_path": 'comfy_workflows/dynamicrafter_api.json', "output_node_id": [2]} } @@ -84,7 +85,8 @@ def transform_sdxl_img2img_workflow(query: MLQueryObject): workflow["42:2"]["inputs"]["seed"] = random_seed() return json.dumps(workflow), output_node_ids, [], [] - + + @staticmethod def transform_sdxl_controlnet_workflow(query: MLQueryObject): data_repo = DataRepo() @@ -110,6 +112,33 @@ def transform_sdxl_controlnet_workflow(query: MLQueryObject): return json.dumps(workflow), output_node_ids, [], [] + + @staticmethod + def transform_ipadapter_composition_workflow(query: MLQueryObject): + data_repo = DataRepo() + workflow, output_node_ids = ComfyDataTransform.get_workflow_json(ComfyWorkflow.IPADAPTER_COMPOSITION) + + # workflow params + width, height = query.width, query.height + # width, height = determine_dimensions_for_sdxl(width, height) + positive_prompt, negative_prompt = query.prompt, query.negative_prompt + steps, cfg = query.num_inference_steps, query.guidance_scale + # low_threshold, high_threshold = query.low_threshold, query.high_threshold + image = data_repo.get_file_from_uuid(query.image_uuid) + image_name = image.filename + + # updating params + workflow["9"]["inputs"]["seed"] = random_seed() + workflow["10"]["width"], workflow["10"]["height"] = width, height + # workflow["17"]["width"], workflow["17"]["height"] = width, height + workflow["7"]["inputs"]["text"], workflow["8"]["inputs"]["text"] = positive_prompt, negative_prompt + # workflow["12"]["inputs"]["low_threshold"], workflow["12"]["inputs"]["high_threshold"] = low_threshold, high_threshold + workflow["9"]["inputs"]["steps"], workflow["9"]["inputs"]["cfg"] = steps, cfg + workflow["6"]["inputs"]["image"] = image_name + workflow["28"]["inputs"]["weight"] = query.strength + + return json.dumps(workflow), output_node_ids, [], [] + @staticmethod def transform_sdxl_controlnet_openpose_workflow(query: MLQueryObject): data_repo = DataRepo() @@ -283,6 +312,62 @@ def transform_ipadaptor_face_plus_workflow(query: MLQueryObject): @staticmethod def transform_steerable_motion_workflow(query: MLQueryObject): + + def update_structure_control_image(json, image, weight): + # Integrate all updates including new nodes and modifications in a single step + data_repo = DataRepo() + image = data_repo.get_file_from_uuid(image) + image = image.filename + # image = os.path.basename(image) + + json.update({ + "560": { + "inputs": { + "image": image, + "upload": "image" + }, + "class_type": "LoadImage", + "_meta": { + "title": "Load Image" + } + }, + "563": { + "inputs": { + "weight": weight, + "noise": 0.3, + "weight_type": "original", + "start_at": 0, + "end_at": 1, + "short_side_tiles": 2, + "tile_weight": 0.6, + "ipadapter": ["564", 0], + "clip_vision": ["370", 0], + "image": ["560", 0], + "model": ["558", 3] + }, + "class_type": "IPAdapterTilesMasked", + "_meta": { + "title": "IPAdapter Masked Tiles (experimental)" + } + }, + "564": { + "inputs": { + "ipadapter_file": "ip_plus_composition_sd15.safetensors" + }, + "class_type": "IPAdapterModelLoader", + "_meta": { + "title": "Load IPAdapter Model" + } + } + }) + + # Update the "207" node's model pair to point to "563" + if "207" in json: + json["207"]["inputs"]["model"] = ["563", 0] + + return json + + def update_json_with_loras(json_data, loras): start_id = 536 new_ids = [] @@ -321,8 +406,7 @@ def update_json_with_loras(json_data, loras): sm_data = query.data.get('data', {}) workflow, output_node_ids = ComfyDataTransform.get_workflow_json(ComfyWorkflow.STEERABLE_MOTION) workflow = update_json_with_loras(workflow, sm_data.get('lora_data')) - - print(sm_data) + workflow['464']['inputs']['height'] = sm_data.get('height') workflow['464']['inputs']['width'] = sm_data.get('width') @@ -365,13 +449,40 @@ def update_json_with_loras(json_data, loras): workflow["543"]["inputs"]["max_frames"] = int(float(sm_data.get('max_frames'))) workflow["543"]["inputs"]["text"] = sm_data.get('individual_negative_prompts') - # download the json file as text.json - # with open("text.json", "w") as f: - # f.write(json.dumps(workflow)) + if sm_data.get('structure_control_image'): + + workflow = update_structure_control_image(workflow, sm_data.get('structure_control_image'), sm_data.get('strength_of_structure_control_image')) ignore_list = sm_data.get("lora_data", []) return json.dumps(workflow), output_node_ids, [], ignore_list + + @staticmethod + def transform_dynamicrafter_workflow(query: MLQueryObject): + data_repo = DataRepo() + workflow, output_node_ids = ComfyDataTransform.get_workflow_json(ComfyWorkflow.DYNAMICRAFTER) + sm_data = query.data.get('data', {}) + + image_1 = data_repo.get_file_from_uuid(sm_data.get('file_image_0001_uuid')) + image_2 = data_repo.get_file_from_uuid(sm_data.get('file_image_0002_uuid')) + + workflow['16']['inputs']['image'] = image_1.filename + workflow['17']['inputs']['image'] = image_2.filename + workflow['12']['inputs']['seed'] = random_seed() + workflow['12']['inputs']['steps'] = 50 + workflow['12']['inputs']['cfg'] = 4 + workflow['12']['inputs']['prompt'] = sm_data.get('prompt') + + + extra_models_list = [ + { + "filename": "dynamicrafter_512_interp_v1.ckpt", + "url": "https://huggingface.co/Kijai/DynamiCrafter_pruned/blob/resolve/dynamicrafter_512_interp_v1_bf16.safetensors?download=true", + "dest": "./ComfyUI/models/checkpoints/" + }] + + return json.dumps(workflow), output_node_ids, extra_models_list, [] + @staticmethod def transform_video_upscaler_workflow(query: MLQueryObject): data_repo = DataRepo() @@ -469,12 +580,14 @@ def transform_motion_lora_workflow(query: MLQueryObject): ML_MODEL.sdxl.workflow_name: ComfyDataTransform.transform_sdxl_workflow, ML_MODEL.sdxl_controlnet.workflow_name: ComfyDataTransform.transform_sdxl_controlnet_workflow, ML_MODEL.sdxl_controlnet_openpose.workflow_name: ComfyDataTransform.transform_sdxl_controlnet_openpose_workflow, + ML_MODEL.ipadapter_composition.workflow_name: ComfyDataTransform.transform_ipadapter_composition_workflow, ML_MODEL.llama_2_7b.workflow_name: ComfyDataTransform.transform_llama_2_7b_workflow, ML_MODEL.sdxl_inpainting.workflow_name: ComfyDataTransform.transform_sdxl_inpainting_workflow, ML_MODEL.ipadapter_plus.workflow_name: ComfyDataTransform.transform_ipadaptor_plus_workflow, ML_MODEL.ipadapter_face.workflow_name: ComfyDataTransform.transform_ipadaptor_face_workflow, ML_MODEL.ipadapter_face_plus.workflow_name: ComfyDataTransform.transform_ipadaptor_face_plus_workflow, ML_MODEL.ad_interpolation.workflow_name: ComfyDataTransform.transform_steerable_motion_workflow, + ML_MODEL.dynamicrafter.workflow_name: ComfyDataTransform.transform_dynamicrafter_workflow, ML_MODEL.sdxl_img2img.workflow_name: ComfyDataTransform.transform_sdxl_img2img_workflow, ML_MODEL.video_upscaler.workflow_name: ComfyDataTransform.transform_video_upscaler_workflow, ML_MODEL.motion_lora_trainer.workflow_name: ComfyDataTransform.transform_motion_lora_workflow diff --git a/utils/ml_processor/comfy_workflows/dynamicrafter_api.json b/utils/ml_processor/comfy_workflows/dynamicrafter_api.json new file mode 100644 index 00000000..a656278e --- /dev/null +++ b/utils/ml_processor/comfy_workflows/dynamicrafter_api.json @@ -0,0 +1,164 @@ +{ + "2": { + "inputs": { + "frame_rate": 12, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "pingpong": false, + "save_output": true, + "images": [ + "34", + 0 + ] + }, + "class_type": "VHS_VideoCombine", + "_meta": { + "title": "Video Combine 🎥🅥🅗🅢" + } + }, + "11": { + "inputs": { + "ckpt_name": "dynamicrafter_512_interp_v1.ckpt", + "dtype": "auto" + }, + "class_type": "DynamiCrafterModelLoader", + "_meta": { + "title": "DynamiCrafterModelLoader" + } + }, + "12": { + "inputs": { + "steps": 50, + "cfg": 5, + "eta": 1, + "frames": 16, + "prompt": "dolly zoom out", + "seed": 262623773159722, + "fs": 10, + "keep_model_loaded": true, + "vae_dtype": "auto", + "cut_near_keyframes": 0, + "model": [ + "11", + 0 + ], + "images": [ + "15", + 0 + ] + }, + "class_type": "DynamiCrafterBatchInterpolation", + "_meta": { + "title": "DynamiCrafterBatchInterpolation" + } + }, + "15": { + "inputs": { + "image1": [ + "37", + 0 + ], + "image2": [ + "38", + 0 + ] + }, + "class_type": "ImageBatch", + "_meta": { + "title": "Batch Images" + } + }, + "16": { + "inputs": { + "image": "ea47a572b4e5b52ea7da22384232381b3e62048fa715f042b38b4da9 (1) (2).jpg", + "upload": "image" + }, + "class_type": "LoadImage", + "_meta": { + "title": "Load Image" + } + }, + "17": { + "inputs": { + "image": "2193d9ded46130b41d09133b4b1d2502f0eaa19ea1762252c6581e86 (1) (1).jpg", + "upload": "image" + }, + "class_type": "LoadImage", + "_meta": { + "title": "Load Image" + } + }, + "34": { + "inputs": { + "ckpt_name": "film_net_fp32.pt", + "clear_cache_after_n_frames": 10, + "multiplier": 3, + "frames": [ + "12", + 0 + ] + }, + "class_type": "FILM VFI", + "_meta": { + "title": "FILM VFI" + } + }, + "35": { + "inputs": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "image/gif", + "pingpong": false, + "save_output": true, + "images": [ + "12", + 0 + ] + }, + "class_type": "VHS_VideoCombine", + "_meta": { + "title": "Video Combine 🎥🅥🅗🅢" + } + }, + "37": { + "inputs": { + "mode": "rescale", + "supersample": "true", + "resampling": "lanczos", + "rescale_factor": 0.7000000000000001, + "resize_width": 1024, + "resize_height": 1536, + "image": [ + "16", + 0 + ] + }, + "class_type": "Image Resize", + "_meta": { + "title": "Image Resize" + } + }, + "38": { + "inputs": { + "mode": "rescale", + "supersample": "true", + "resampling": "lanczos", + "rescale_factor": 0.7000000000000001, + "resize_width": 1024, + "resize_height": 1536, + "image": [ + "17", + 0 + ] + }, + "class_type": "Image Resize", + "_meta": { + "title": "Image Resize" + } + } +} \ No newline at end of file diff --git a/utils/ml_processor/comfy_workflows/ipadapter_composition_workflow_api.json b/utils/ml_processor/comfy_workflows/ipadapter_composition_workflow_api.json new file mode 100644 index 00000000..cc5a4cea --- /dev/null +++ b/utils/ml_processor/comfy_workflows/ipadapter_composition_workflow_api.json @@ -0,0 +1,174 @@ +{ + "1": { + "inputs": { + "ckpt_name": "Realistic_Vision_V5.1.safetensors" + }, + "class_type": "CheckpointLoaderSimple", + "_meta": { + "title": "Load Checkpoint" + } + }, + "2": { + "inputs": { + "vae_name": "vae-ft-mse-840000-ema-pruned.safetensors" + }, + "class_type": "VAELoader", + "_meta": { + "title": "Load VAE" + } + }, + "3": { + "inputs": { + "ipadapter_file": "ip_plus_composition_sd15.safetensors" + }, + "class_type": "IPAdapterModelLoader", + "_meta": { + "title": "Load IPAdapter Model" + } + }, + "4": { + "inputs": { + "clip_name": "SD1.5/pytorch_model.bin" + }, + "class_type": "CLIPVisionLoader", + "_meta": { + "title": "Load CLIP Vision" + } + }, + "6": { + "inputs": { + "image": "Hulk_Hogan.jpg", + "upload": "image" + }, + "class_type": "LoadImage", + "_meta": { + "title": "Load Image" + } + }, + "7": { + "inputs": { + "text": "hulk hogan", + "clip": [ + "1", + 1 + ] + }, + "class_type": "CLIPTextEncode", + "_meta": { + "title": "CLIP Text Encode (Prompt)" + } + }, + "8": { + "inputs": { + "text": "blurry, photo, malformed", + "clip": [ + "1", + 1 + ] + }, + "class_type": "CLIPTextEncode", + "_meta": { + "title": "CLIP Text Encode (Prompt)" + } + }, + "9": { + "inputs": { + "seed": 16, + "steps": 30, + "cfg": 5, + "sampler_name": "dpmpp_2m_sde", + "scheduler": "exponential", + "denoise": 1, + "model": [ + "28", + 0 + ], + "positive": [ + "7", + 0 + ], + "negative": [ + "8", + 0 + ], + "latent_image": [ + "10", + 0 + ] + }, + "class_type": "KSampler", + "_meta": { + "title": "KSampler" + } + }, + "10": { + "inputs": { + "width": 512, + "height": 512, + "batch_size": 1 + }, + "class_type": "EmptyLatentImage", + "_meta": { + "title": "Empty Latent Image" + } + }, + "11": { + "inputs": { + "samples": [ + "9", + 0 + ], + "vae": [ + "2", + 0 + ] + }, + "class_type": "VAEDecode", + "_meta": { + "title": "VAE Decode" + } + }, + "27": { + "inputs": { + "filename_prefix": "ComfyUI", + "images": [ + "11", + 0 + ] + }, + "class_type": "SaveImage", + "_meta": { + "title": "Save Image" + } + }, + "28": { + "inputs": { + "weight": 1, + "noise": 0, + "weight_type": "original", + "start_at": 0, + "end_at": 1, + "unfold_batch": false, + "ipadapter": [ + "3", + 0 + ], + "clip_vision": [ + "4", + 0 + ], + "image": [ + "6", + 0 + ], + "model": [ + "1", + 0 + ] + }, + "class_type": "IPAdapterApply", + "_meta": { + "title": "Apply IPAdapter" + } + } +} \ No newline at end of file diff --git a/utils/ml_processor/constants.py b/utils/ml_processor/constants.py index 73ea4d1b..62a879b8 100644 --- a/utils/ml_processor/constants.py +++ b/utils/ml_processor/constants.py @@ -16,6 +16,8 @@ class ComfyWorkflow(ExtendedEnum): SDXL_IMG2IMG = "sdxl_img2img" UPSCALER = "upscale" MOTION_LORA = "motion_lora" + IPADAPTER_COMPOSITION = "ipadapter_composition" + DYNAMICRAFTER = "dynamicrafter" @dataclass class MLModel: @@ -82,6 +84,7 @@ class ML_MODEL: sdxl_controlnet = MLModel("lucataco/sdxl-controlnet", "db2ffdbdc7f6cb4d6dab512434679ee3366ae7ab84f89750f8947d5594b79a47", ComfyWorkflow.SDXL_CONTROLNET) realistic_vision_v5_img2img = MLModel("lucataco/realistic-vision-v5-img2img", "82bbb4595458d6be142450fc6d8c4d79c936b92bd184dd2d6dd71d0796159819") ad_interpolation = MLModel(ComfyRunnerModel.name, ComfyRunnerModel.version, ComfyWorkflow.STEERABLE_MOTION) + dynamicrafter = MLModel(ComfyRunnerModel.name, ComfyRunnerModel.version, ComfyWorkflow.DYNAMICRAFTER) # addition 17/10/2023 llama_2_7b = MLModel("meta/llama-2-7b", "527827021d8756c7ab79fde0abbfaac885c37a3ed5fe23c7465093f0878d55ef", ComfyWorkflow.LLAMA_2_7B) @@ -95,7 +98,8 @@ class ML_MODEL: ipadapter_face_plus = MLModel(ComfyRunnerModel.name, ComfyRunnerModel.version, ComfyWorkflow.IP_ADAPTER_FACE_PLUS) video_upscaler = MLModel(ComfyRunnerModel.name, ComfyRunnerModel.version, ComfyWorkflow.UPSCALER) motion_lora_trainer = MLModel(ComfyRunnerModel.name, ComfyRunnerModel.version, ComfyWorkflow.MOTION_LORA) - + ipadapter_composition = MLModel(ComfyRunnerModel.name, ComfyRunnerModel.version, ComfyWorkflow.IPADAPTER_COMPOSITION) + @staticmethod def get_model_by_db_obj(model_db_obj): @@ -112,9 +116,11 @@ def get_model_by_db_obj(model_db_obj): ML_MODEL.sdxl_img2img, ML_MODEL.sdxl_inpainting, ML_MODEL.ad_interpolation, + ML_MODEL.dynamicrafter, ML_MODEL.ipadapter_face, ML_MODEL.ipadapter_face_plus, - ML_MODEL.ipadapter_plus + ML_MODEL.ipadapter_plus, + ML_MODEL.ipadapter_composition, ] DEFAULT_LORA_MODEL_URL = "https://replicate.delivery/pbxt/nWm6eP9ojwVvBCaWoWZVawOKRfgxPJmkVk13ES7PX36Y66kQA/tmpxuz6k_k2datazip.safetensors" From 65539af195b0a9ac755defc8fa24991926900c9a Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Thu, 21 Mar 2024 12:07:30 +0000 Subject: [PATCH 36/43] sm reference img and dynamic crafter fix --- ui_components/components/animate_shot_page.py | 24 +- .../components/video_rendering_page.py | 82 ++++++- .../methods/animation_style_methods.py | 6 +- ui_components/methods/video_methods.py | 10 +- ui_components/widgets/shot_view.py | 1 + .../widgets/sm_animation_style_element.py | 124 ++++++---- utils/media_processor/interpolator.py | 217 ++++++++---------- utils/ml_processor/comfy_data_transform.py | 47 ++-- .../comfy_workflows/dynamicrafter_api.json | 3 +- utils/ml_processor/gpu/gpu.py | 13 +- utils/ml_processor/replicate/utils.py | 3 +- 11 files changed, 321 insertions(+), 209 deletions(-) diff --git a/ui_components/components/animate_shot_page.py b/ui_components/components/animate_shot_page.py index 76956436..61e2c1f0 100644 --- a/ui_components/components/animate_shot_page.py +++ b/ui_components/components/animate_shot_page.py @@ -1,10 +1,11 @@ import json import streamlit as st from shared.constants import InternalFileType -from ui_components.components.video_rendering_page import sm_video_rendering_page +from ui_components.components.video_rendering_page import sm_video_rendering_page, two_img_realistic_interpolation_page from ui_components.models import InternalShotObject from ui_components.widgets.frame_selector import frame_selector_widget from ui_components.widgets.variant_comparison_grid import variant_comparison_grid +from utils import st_memory from utils.data_repo.data_repo import DataRepo from ui_components.widgets.sidebar_logger import sidebar_logger @@ -26,6 +27,13 @@ def animate_shot_page(shot_uuid: str, h2): st.markdown("***") selected_variant = variant_comparison_grid(shot_uuid, stage="Shots") + video_rendering_page(shot_uuid, selected_variant) + + +def video_rendering_page(shot_uuid, selected_variant): + data_repo = DataRepo() + shot = data_repo.get_shot_from_uuid(shot_uuid) + file_uuid_list = [] # loading images from a particular video variant if selected_variant: @@ -39,6 +47,18 @@ def animate_shot_page(shot_uuid: str, h2): file_uuid_list.append(timing.primary_image.uuid) img_list = data_repo.get_all_file_list(uuid__in=file_uuid_list, file_type=InternalFileType.IMAGE.value)[0] - sm_video_rendering_page(shot_uuid, img_list) + + headline1, _, headline3 = st.columns([1, 1, 1]) + with headline1: + st.markdown("### 🎥 Generate animations") + st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") + with headline3: + with st.expander("Type of animation", expanded=False): + type_of_animation = st_memory.radio("What type of animation would you like to generate?", options=["Batch Creative Interpolation", "2-Image Realistic Interpolation (beta)"],horizontal=True, help="**Batch Creative Interpolaton** lets you input multple images and control the motion and style of each frame - resulting in a fluid, surreal and highly-controllable motion. \n\n **2-Image Realistic Interpolation** is a simpler way to generate animations - it generates a video by interpolating between two images, and is best for realistic motion.",key=f"type_of_animation_{shot.uuid}") + + if type_of_animation == "Batch Creative Interpolation": + sm_video_rendering_page(shot_uuid, img_list) + else: + two_img_realistic_interpolation_page(shot_uuid, img_list) st.markdown("***") \ No newline at end of file diff --git a/ui_components/components/video_rendering_page.py b/ui_components/components/video_rendering_page.py index 6ee55f14..89712979 100644 --- a/ui_components/components/video_rendering_page.py +++ b/ui_components/components/video_rendering_page.py @@ -9,6 +9,7 @@ from ui_components.methods.animation_style_methods import toggle_generate_inference, transform_data, \ update_session_state_with_animation_details from ui_components.methods.video_methods import create_single_interpolated_clip +from utils import st_memory from utils.data_repo.data_repo import DataRepo default_model = "Deliberate_v2.safetensors" @@ -16,9 +17,6 @@ def sm_video_rendering_page(shot_uuid, img_list: List[InternalFileObject]): data_repo = DataRepo() shot: InternalShotObject = data_repo.get_shot_from_uuid(shot_uuid) - st.markdown("### 🎥 Generate animations") - st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") - settings = { 'animation_tool': AnimationToolType.ANIMATEDIFF.value, } @@ -58,6 +56,7 @@ def sm_video_rendering_page(shot_uuid, img_list: List[InternalFileObject]): motion_scale = 1.3 interpolation_style = 'ease-in-out' buffer = 4 + amount_of_motion = 1.3 (dynamic_strength_values, dynamic_key_frame_influence_values, dynamic_frame_distribution_values, context_length, context_stride, context_overlap, multipled_base_end_percent, multipled_base_adapter_strength, @@ -111,14 +110,16 @@ def sm_video_rendering_page(shot_uuid, img_list: List[InternalFileObject]): animation_stype=AnimationStyleType.CREATIVE_INTERPOLATION.value, max_frames=str(dynamic_frame_distribution_values[-1]), lora_data=lora_data, - shot_data=shot_meta_data + shot_data=shot_meta_data, + pil_img_structure_control_image=st.session_state[f"structure_control_image_{shot.uuid}"], # this is a PIL object + strength_of_structure_control_image=st.session_state[f"strength_of_structure_control_image_{shot.uuid}"], ) position = "generate_vid" st.markdown("***") st.markdown("##### Generation Settings") - animate_col_1, animate_col_2, _ = st.columns([3, 1, 1]) + animate_col_1, _, _ = st.columns([3, 1, 1]) with animate_col_1: variant_count = st.number_input("How many variants?", min_value=1, max_value=5, value=1, step=1, key="variant_count") @@ -206,4 +207,73 @@ def sm_video_rendering_page(shot_uuid, img_list: List[InternalFileObject]): individual_prompts, individual_negative_prompts, default_model - ) \ No newline at end of file + ) + + +def two_img_realistic_interpolation_page(shot_uuid, img_list): + if not (img_list and len(img_list) >= 2): + st.error("You need two images for this interpolation") + return + + data_repo = DataRepo() + shot = data_repo.get_shot_from_uuid(shot_uuid) + + settings = {} + col1, col2, col3 = st.columns([1, 1, 1]) + with col1: + st.image(img_list[0].location, use_column_width=True) + + with col3: + st.image(img_list[1].location, use_column_width=True) + + with col2: + description_of_motion = st_memory.text_area("Describe the motion you want between the frames:", key=f"description_of_motion_{shot.uuid}") + st.info("This is very important and will likely require some iteration.") + + variant_count = 1 # Assuming a default value for variant_count, adjust as necessary + vid_quality = "full" # Assuming full quality, adjust as necessary based on your requirements + position = "dynamiccrafter" + + if f"{position}_generate_inference" in st.session_state and st.session_state[f"{position}_generate_inference"]: + + st.success("Generating clip - see status in the Generation Log in the sidebar. Press 'Refresh log' to update.") + # Assuming the logic to generate the clip based on two images, the described motion, and fixed duration + duration = 4 # Fixed duration of 4 seconds + data_repo.update_shot(uuid=shot.uuid, duration=duration) + + project_settings = data_repo.get_project_setting(shot.project.uuid) + + settings.update( + duration= duration, + animation_style=AnimationStyleType.DIRECT_MORPHING.value, + output_format="video/h264-mp4", + width=project_settings.width, + height=project_settings.height, + prompt=description_of_motion + ) + + create_single_interpolated_clip( + shot_uuid, + vid_quality, + settings, + variant_count, + st.session_state[f'{shot_uuid}_backlog_enabled'] + ) + + backlog_update = {f'{shot_uuid}_backlog_enabled': False} + toggle_generate_inference(position, **backlog_update) + st.rerun() + + + # Placeholder for the logic to generate the clip and update session state as needed + # This should include calling the function that handles the interpolation process with the updated settings + + # Buttons for adding to queue or backlog, assuming these are still relevant + btn1, btn2, btn3 = st.columns([1, 1, 1]) + backlog_no_update = {f'{shot_uuid}_backlog_enabled': False} + with btn1: + st.button("Add to queue", key="generate_animation_clip", disabled=False, help="Generate the interpolation clip based on the two images and described motion.", on_click=lambda: toggle_generate_inference(position, **backlog_no_update), type="primary", use_container_width=True) + + backlog_update = {f'{shot_uuid}_backlog_enabled': True} + with btn2: + st.button("Add to backlog", key="generate_animation_clip_backlog", disabled=False, help="Add the 2-Image Realistic Interpolation to the backlog.", on_click=lambda: toggle_generate_inference(position, **backlog_update), type="secondary") diff --git a/ui_components/methods/animation_style_methods.py b/ui_components/methods/animation_style_methods.py index 72d050d2..bf681976 100644 --- a/ui_components/methods/animation_style_methods.py +++ b/ui_components/methods/animation_style_methods.py @@ -72,11 +72,11 @@ def format_frame_prompts_with_buffer(frame_numbers, individual_prompts, buffer): def plot_weights(weights_list, frame_numbers_list): plt.figure(figsize=(12, 6)) for i, weights in enumerate(weights_list): - frame_numbers = frame_numbers_list[i] + frame_numbers = [frame_number / 100 for frame_number in frame_numbers_list[i]] plt.plot(frame_numbers, weights, label=f'Frame {i + 1}') # Plot settings - plt.xlabel('Frame Number') + plt.xlabel('Seconds') plt.ylabel('Weight') plt.legend() plt.ylim(0, 1.0) @@ -398,7 +398,7 @@ def update_session_state_with_animation_details(shot_uuid, img_list: List[Intern main_setting_data[f"type_of_motion_context_index_{shot.uuid}"] = st.session_state["type_of_motion_context"] main_setting_data[f"positive_prompt_video_{shot.uuid}"] = st.session_state["overall_positive_prompt"] main_setting_data[f"negative_prompt_video_{shot.uuid}"] = st.session_state["overall_negative_prompt"] - main_setting_data[f"amount_of_motion_{shot.uuid}"] = st.session_state["amount_of_motion"] + # main_setting_data[f"amount_of_motion_{shot.uuid}"] = st.session_state["amount_of_motion"] checkpoints_dir = "ComfyUI/models/checkpoints" all_files = os.listdir(checkpoints_dir) diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index a98a2f0d..681c31fd 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -45,10 +45,18 @@ def create_single_interpolated_clip(shot_uuid, quality, settings={}, variant_cou img_list = [t.primary_image.location for t in timing_list] settings.update(interpolation_steps=interpolation_steps) settings.update(file_uuid_list=[t.primary_image.uuid for t in timing_list]) + + # converting PIL imgs to InternalFileObject + from ui_components.methods.common_methods import save_new_image + for key in settings.keys(): + if key.startswith("pil_img_") and settings[key]: + image = save_new_image(settings[key], shot.project.uuid) + del settings[key] + new_key = key.replace("pil_img_", "") + "_uuid" + settings[new_key] = image.uuid # res is an array of tuples (video_bytes, log) res = VideoInterpolator.create_interpolated_clip( - img_list, settings['animation_style'], settings, variant_count, diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index 68862078..66041e6d 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -55,6 +55,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None, position="Timel } for idx, timing in enumerate(timing_list)] st.session_state[f"shot_data_{shot_uuid}"] = pd.DataFrame(shot_data) else: + st.info("Use this to move frames") st.session_state[f"shot_data_{shot_uuid}"] = None diff --git a/ui_components/widgets/sm_animation_style_element.py b/ui_components/widgets/sm_animation_style_element.py index 332f82f3..94242b7d 100644 --- a/ui_components/widgets/sm_animation_style_element.py +++ b/ui_components/widgets/sm_animation_style_element.py @@ -6,8 +6,10 @@ import random import string import tarfile +from PIL import Image import streamlit as st from shared.constants import InternalFileType +from ui_components.methods.common_methods import save_new_image from utils import st_memory from ui_components.constants import DEFAULT_SHOT_MOTION_VALUES from ui_components.methods.animation_style_methods import calculate_weights, extract_influence_values, \ @@ -48,37 +50,39 @@ def animation_sidebar(shot_uuid, img_list, type_of_frame_distribution, dynamic_f st.success("All frames have been reset to default values.") st.rerun() - editable_entity = st.selectbox("What would you like to edit?", options=["Seconds to next frames", "Speed of transitions", "Freedom between frames","Strength of frames","Motion during frames"], key="editable_entity") - if editable_entity == "Seconds to next frames": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.25, max_value=6.00, step=0.25, value=1.0, key="entity_new_val") - if editable_entity == "Strength of frames": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.25, max_value=1.0, step=0.01, value=0.5, key="entity_new_val") - elif editable_entity == "Speed of transitions": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.45, max_value=0.7, step=0.01, value=0.6, key="entity_new_val") - elif editable_entity == "Freedom between frames": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.15, max_value=0.85, step=0.01, value=0.5, key="entity_new_val") - elif editable_entity == "Motion during frames": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.5, max_value=1.5, step=0.01, value=1.3, key="entity_new_val") + # New feature: Selecting a range to edit + range_to_edit = st.slider("Select the range of frames you would like to edit:", + min_value=1, max_value=len(img_list), + value=(1, len(img_list)), step=1, key="range_to_edit") + edit1, edit2 = st.columns([1, 1]) + with edit1: + editable_entity = st.selectbox("What would you like to edit?", options=["Seconds to next frames", "Speed of transitions", "Freedom between frames","Strength of frames","Motion during frames"], key="editable_entity") + with edit2: + if editable_entity == "Seconds to next frames": + entity_new_val = st.slider("What would you like to change it to?", min_value=0.25, max_value=6.00, step=0.25, value=1.0, key="entity_new_val_seconds") + elif editable_entity == "Strength of frames": + entity_new_val = st.slider("What would you like to change it to?", min_value=0.25, max_value=1.0, step=0.01, value=0.5, key="entity_new_val_strength") + elif editable_entity == "Speed of transitions": + entity_new_val = st.slider("What would you like to change it to?", min_value=0.45, max_value=0.7, step=0.01, value=0.6, key="entity_new_val_speed") + elif editable_entity == "Freedom between frames": + entity_new_val = st.slider("What would you like to change it to?", min_value=0.15, max_value=0.85, step=0.01, value=0.5, key="entity_new_val_freedom") + elif editable_entity == "Motion during frames": + entity_new_val = st.slider("What would you like to change it to?", min_value=0.5, max_value=1.5, step=0.01, value=1.3, key="entity_new_val_motion") - bulk1, bulk2 = st.columns([1, 1]) - with bulk1: - if st.button("Bulk edit", key="bulk_edit", use_container_width=True): + if st.button("Bulk edit", key="bulk_edit", use_container_width=True): + start_idx, end_idx = range_to_edit + for idx in range(start_idx - 1, end_idx): # Adjusting index to be 0-based if editable_entity == "Strength of frames": - for idx, _ in enumerate(img_list): - st.session_state[f'strength_of_frame_{shot_uuid}_{idx}'] = entity_new_val + st.session_state[f'strength_of_frame_{shot_uuid}_{idx}'] = entity_new_val elif editable_entity == "Seconds to next frames": - for idx, _ in enumerate(img_list): - st.session_state[f'distance_to_next_frame_{shot_uuid}_{idx}'] = entity_new_val + st.session_state[f'distance_to_next_frame_{shot_uuid}_{idx}'] = entity_new_val elif editable_entity == "Speed of transitions": - for idx, _ in enumerate(img_list): - st.session_state[f'speed_of_transition_{shot_uuid}_{idx}'] = entity_new_val + st.session_state[f'speed_of_transition_{shot_uuid}_{idx}'] = entity_new_val elif editable_entity == "Freedom between frames": - for idx, _ in enumerate(img_list): - st.session_state[f'freedom_between_frames_{shot_uuid}_{idx}'] = entity_new_val + st.session_state[f'freedom_between_frames_{shot_uuid}_{idx}'] = entity_new_val elif editable_entity == "Motion during frames": - for idx, _ in enumerate(img_list): - st.session_state[f'motion_during_frame_{shot_uuid}_{idx}'] = entity_new_val - st.rerun() + st.session_state[f'motion_during_frame_{shot_uuid}_{idx}'] = entity_new_val + st.rerun() st.markdown("***") st.markdown("### Save current settings") @@ -102,14 +106,15 @@ def animation_sidebar(shot_uuid, img_list, type_of_frame_distribution, dynamic_f def video_motion_settings(shot_uuid, img_list): + data_repo = DataRepo() + shot = data_repo.get_shot_from_uuid(shot_uuid) + st.markdown("***") st.markdown("##### Overall style settings") - e1, e2, e3 = st.columns([1, 1,1]) + e1, _, _ = st.columns([1, 1,1]) with e1: strength_of_adherence = st.slider("How much would you like to force adherence to the input images?", min_value=0.0, max_value=1.0, step=0.01, key="strength_of_adherence", value=st.session_state[f"strength_of_adherence_value_{shot_uuid}"]) - with e2: - st.info("Higher values may cause flickering and sudden changes in the video. Lower values may cause the video to be less influenced by the input images but can lead to smoother motion and better colours.") f1, f2, f3 = st.columns([1, 1, 1]) with f1: @@ -138,31 +143,54 @@ def update_prompt(): st.markdown("***") st.markdown("##### Overall motion settings") - h1, h2, h3 = st.columns([0.5, 1.5, 1]) + h1, h2, h3 = st.columns([1, 0.5, 1.0]) with h1: # will fix this later + def update_motion_for_all_frames(shot_uuid, timing_list): + amount_of_motion = st.session_state.get("amount_of_motion_overall", 1.0) # Default to 1.0 if not set + for idx, _ in enumerate(timing_list): + st.session_state[f'motion_during_frame_{shot_uuid}_{idx}'] = amount_of_motion + if f"type_of_motion_context_index_{shot_uuid}" in st.session_state and isinstance(st.session_state[f"type_of_motion_context_index_{shot_uuid}"], str): st.session_state[f"type_of_motion_context_index_{shot_uuid}"] = ["Low", "Standard", "High"].index(st.session_state[f"type_of_motion_context_index_{shot_uuid}"]) - type_of_motion_context = st.radio("Type of motion context:", options=["Low", "Standard", "High"], key="type_of_motion_context", horizontal=False, index=st.session_state[f"type_of_motion_context_index_{shot_uuid}"]) - - with h2: - st.info("This is how much the motion will be informed by the previous and next frames. 'High' can make it smoother but increase artifacts - while 'Low' make the motion less smooth but removes artifacts. Naturally, we recommend Standard.") - st.write("") - i1, i3,_ = st.columns([1,2,1]) + type_of_motion_context = st.radio("Type of motion context:", options=["Low", "Standard", "High"], key="type_of_motion_context", horizontal=True, index=st.session_state[f"type_of_motion_context_index_{shot.uuid}"], help="This is how much the motion will be informed by the previous and next frames. 'High' can make it smoother but increase artifacts - while 'Low' make the motion less smooth but removes artifacts. Naturally, we recommend Standard.") + st.session_state[f"amount_of_motion_{shot_uuid}"] = st.slider("Amount of motion:", min_value=0.5, max_value=1.5, step=0.01,value=1.3, key="amount_of_motion_overall", on_change=lambda: update_motion_for_all_frames(shot.uuid, img_list), help="You can also tweak this on an individual frame level in the advanced settings above.") + + i1, i2, i3 = st.columns([1, 0.5, 1.5]) with i1: - amount_of_motion = st.slider("Amount of motion:", min_value=0.5, max_value=1.5, step=0.01, key="amount_of_motion", value=st.session_state[f"amount_of_motion_{shot_uuid}"]) - st.write("") - if st.button("Bulk update amount of motion", key="update_motion", help="This will update this value in all the frames"): - for idx, _ in enumerate(img_list): - st.session_state[f'motion_during_frame_{shot_uuid}_{idx}'] = amount_of_motion - st.success("Updated amount of motion") - time.sleep(0.3) - st.rerun() - with i3: - st.write("") - st.write("") - st.info("This actually updates the motion during frames in the advanced settings above - but we put it here because it has a big impact on the video. You can scroll up to see the changes and tweak for individual frames.") - + if f'structure_control_image_{shot_uuid}' not in st.session_state: + st.session_state[f"structure_control_image_{shot_uuid}"] = None + + if f"strength_of_structure_control_image_{shot_uuid}" not in st.session_state: + st.session_state[f"strength_of_structure_control_image_{shot_uuid}"] = None + control_motion_with_image = st_memory.toggle("Control motion with an image", help="This will allow you to upload images to control the motion of the video.",key=f"control_motion_with_image_{shot_uuid}") + + if control_motion_with_image: + uploaded_image = st.file_uploader("Upload images to control motion", type=["png", "jpg", "jpeg"], accept_multiple_files=False) + if st.button("Add image", key="add_images"): + if uploaded_image: + project_settings = data_repo.get_project_setting(shot.project.uuid) + width, height = project_settings.width, project_settings.height + # Convert the uploaded image file to PIL Image + uploaded_image_pil = Image.open(uploaded_image) + uploaded_image_pil = uploaded_image_pil.resize((width, height)) + st.session_state[f"structure_control_image_{shot.uuid}"] = uploaded_image_pil + st.rerun() + else: + st.warning("No images uploaded") + else: + st.session_state[f"structure_control_image_{shot_uuid}"] = None + + with i2: + if f"structure_control_image_{shot_uuid}" in st.session_state and st.session_state[f"structure_control_image_{shot_uuid}"]: + st.info("Control image:") + st.image(st.session_state[f"structure_control_image_{shot_uuid}"]) + st.session_state[f"strength_of_structure_control_image_{shot_uuid}"] = st.slider("Strength of control image:", min_value=0.0, max_value=1.0, step=0.01, key="strength_of_structure_control_image", value=0.5, help="This is how much the control image will influence the motion of the video.") + if st.button("Remove image", key="remove_images"): + st.session_state[f"structure_control_image_{shot_uuid}"] = None + st.success("Image removed") + st.rerun() + return strength_of_adherence, overall_positive_prompt, overall_negative_prompt, type_of_motion_context def select_motion_lora_element(shot_uuid, model_files): diff --git a/utils/media_processor/interpolator.py b/utils/media_processor/interpolator.py index b38c558c..dc91060b 100644 --- a/utils/media_processor/interpolator.py +++ b/utils/media_processor/interpolator.py @@ -1,17 +1,7 @@ -import os -import time -import cv2 -import streamlit as st -import requests as r -import numpy as np from shared.constants import QUEUE_INFERENCE_QUERIES, AnimationStyleType, AnimationToolType from ui_components.constants import DefaultTimingStyleParams -from ui_components.methods.file_methods import generate_temp_file, zip_images -from ui_components.models import InferenceLogObject from utils.common_utils import padded_integer from utils.constants import MLQueryObject - -from utils.data_repo.data_repo import DataRepo from utils.ml_processor.ml_interface import get_ml_client from utils.ml_processor.constants import ML_MODEL @@ -37,13 +27,12 @@ def calculate_dynamic_interpolations_steps(clip_duration): return interpolation_steps @staticmethod - def create_interpolated_clip(img_location_list, animation_style, settings, variant_count=1, queue_inference=False, backlog=False): + def create_interpolated_clip(animation_style, settings, variant_count=1, queue_inference=False, backlog=False): if not animation_style: animation_style = DefaultTimingStyleParams.animation_style if animation_style == AnimationStyleType.CREATIVE_INTERPOLATION.value: return VideoInterpolator.video_through_frame_interpolation( - img_location_list, settings, variant_count, queue_inference, @@ -52,7 +41,6 @@ def create_interpolated_clip(img_location_list, animation_style, settings, varia elif animation_style == AnimationStyleType.DIRECT_MORPHING.value: return VideoInterpolator.video_through_direct_morphing( - img_location_list, settings, variant_count, queue_inference, @@ -62,128 +50,115 @@ def create_interpolated_clip(img_location_list, animation_style, settings, varia # returns a video bytes generated through interpolating frames between the given list of frames @staticmethod - def video_through_frame_interpolation(img_location_list, settings, variant_count, queue_inference=False, backlog=False): + def video_through_frame_interpolation(settings, variant_count, queue_inference=False, backlog=False): ml_client = get_ml_client() final_res = [] for _ in range(variant_count): - - if True: - # NOTE: @Peter these are all the settings you passed in from the UI - sm_data = { - "ckpt": settings['ckpt'], - "width": settings['width'], # "width": "512", - "height": settings['height'], # "height": "512", - "buffer": settings['buffer'], - "motion_scale": settings['motion_scale'], # "motion_scale": "1.0", - "motion_scales": settings['motion_scales'], - "image_dimension": settings["image_dimension"], - "output_format": settings['output_format'], - "prompt": settings["prompt"], - "negative_prompt": settings["negative_prompt"], - # "image_prompt_list": settings["image_prompt_list"], - "interpolation_type": settings["interpolation_type"], - "stmfnet_multiplier": settings["stmfnet_multiplier"], - "relative_ipadapter_strength": settings["relative_ipadapter_strength"], - "relative_cn_strength": settings["relative_cn_strength"], - "type_of_strength_distribution": settings["type_of_strength_distribution"], - "linear_strength_value": settings["linear_strength_value"], - "dynamic_strength_values": settings["dynamic_strength_values"], - "linear_frame_distribution_value": settings["linear_frame_distribution_value"], - "dynamic_frame_distribution_values": settings["dynamic_frame_distribution_values"], - "type_of_frame_distribution": settings["type_of_frame_distribution"], - "type_of_key_frame_influence": settings["type_of_key_frame_influence"], - "linear_key_frame_influence_value": settings["linear_key_frame_influence_value"], - "dynamic_key_frame_influence_values": settings["dynamic_key_frame_influence_values"], - "normalise_speed": settings["normalise_speed"], - "ipadapter_noise": settings["ipadapter_noise"], - "queue_inference": True, - "context_length": settings["context_length"], - "context_stride": settings["context_stride"], - "context_overlap": settings["context_overlap"], - "multipled_base_end_percent": settings["multipled_base_end_percent"], - "multipled_base_adapter_strength": settings["multipled_base_adapter_strength"], - "individual_prompts": settings["individual_prompts"], - "individual_negative_prompts": settings["individual_negative_prompts"], - "max_frames": settings["max_frames"], - "lora_data": settings["lora_data"], - "shot_data": settings["shot_data"], - "structure_control_image": settings["structure_control_image"], - "strength_of_structure_control_image": settings["strength_of_structure_control_image"] - } - - # adding the input images - for idx, img_uuid in enumerate(settings['file_uuid_list']): - sm_data[f"file_image_{padded_integer(idx+1)}" + "_uuid"] = img_uuid - - - if settings["structure_control_image"] is not None: - # add to file_image__{padded_integer(idx+1)}_uuid - sm_data[f"file_image_{padded_integer(len(settings['file_uuid_list'])+1)}" + "_uuid"] = settings["structure_control_image"] - - - # NOTE: @Peter all the above settings are put in the 'data' parameter below - ml_query_object = MLQueryObject( - prompt="SM", # hackish fix - timing_uuid=None, - model_uuid=None, - guidance_scale=None, - seed=None, - num_inference_steps=None, - strength=None, - adapter_type=None, - negative_prompt="", - height=512, - width=512, - image_uuid=settings["structure_control_image"], - low_threshold=100, - high_threshold=200, - mask_uuid=None, - data=sm_data - ) - res = ml_client.predict_model_output_standardized(ML_MODEL.ad_interpolation, ml_query_object, QUEUE_INFERENCE_QUERIES, backlog) - + sm_data = { + "ckpt": settings['ckpt'], + "width": settings['width'], # "width": "512", + "height": settings['height'], # "height": "512", + "buffer": settings['buffer'], + "motion_scale": settings['motion_scale'], # "motion_scale": "1.0", + "motion_scales": settings['motion_scales'], + "image_dimension": settings["image_dimension"], + "output_format": settings['output_format'], + "prompt": settings["prompt"], + "negative_prompt": settings["negative_prompt"], + # "image_prompt_list": settings["image_prompt_list"], + "interpolation_type": settings["interpolation_type"], + "stmfnet_multiplier": settings["stmfnet_multiplier"], + "relative_ipadapter_strength": settings["relative_ipadapter_strength"], + "relative_cn_strength": settings["relative_cn_strength"], + "type_of_strength_distribution": settings["type_of_strength_distribution"], + "linear_strength_value": settings["linear_strength_value"], + "dynamic_strength_values": settings["dynamic_strength_values"], + "linear_frame_distribution_value": settings["linear_frame_distribution_value"], + "dynamic_frame_distribution_values": settings["dynamic_frame_distribution_values"], + "type_of_frame_distribution": settings["type_of_frame_distribution"], + "type_of_key_frame_influence": settings["type_of_key_frame_influence"], + "linear_key_frame_influence_value": settings["linear_key_frame_influence_value"], + "dynamic_key_frame_influence_values": settings["dynamic_key_frame_influence_values"], + "normalise_speed": settings["normalise_speed"], + "ipadapter_noise": settings["ipadapter_noise"], + "queue_inference": True, + "context_length": settings["context_length"], + "context_stride": settings["context_stride"], + "context_overlap": settings["context_overlap"], + "multipled_base_end_percent": settings["multipled_base_end_percent"], + "multipled_base_adapter_strength": settings["multipled_base_adapter_strength"], + "individual_prompts": settings["individual_prompts"], + "individual_negative_prompts": settings["individual_negative_prompts"], + "max_frames": settings["max_frames"], + "lora_data": settings["lora_data"], + "shot_data": settings["shot_data"], + "strength_of_structure_control_image": settings["strength_of_structure_control_image"] + } + + # adding the input images + for idx, img_uuid in enumerate(settings['file_uuid_list']): + sm_data[f"file_image_{padded_integer(idx+1)}" + "_uuid"] = img_uuid + + # adding structure control img + if "structure_control_image_uuid" in settings and settings["structure_control_image_uuid"] is not None: + sm_data[f"file_structure_control_img_uuid"] = settings["structure_control_image_uuid"] + + ml_query_object = MLQueryObject( + prompt="SM", # hackish fix + timing_uuid=None, + model_uuid=None, + guidance_scale=None, + seed=None, + num_inference_steps=None, + strength=None, + adapter_type=None, + negative_prompt="", + height=512, + width=512, + low_threshold=100, + high_threshold=200, + mask_uuid=None, + data=sm_data + ) + res = ml_client.predict_model_output_standardized(ML_MODEL.ad_interpolation, ml_query_object, QUEUE_INFERENCE_QUERIES, backlog) final_res.append(res) return final_res @staticmethod - def video_through_direct_morphing(img_location_list, settings, variant_count, queue_inference=False, backlog=False): + def video_through_direct_morphing(settings, variant_count, queue_inference=False, backlog=False): ml_client = get_ml_client() final_res = [] for _ in range(variant_count): - - if True: - # NOTE: @Peter these are all the settings you passed in from the UI - sm_data = { - "width": settings['width'], - "height": settings['height'], - "prompt": settings["prompt"] - } - - for idx, img_uuid in enumerate(settings['file_uuid_list']): - sm_data[f"file_image_{padded_integer(idx+1)}" + "_uuid"] = img_uuid - - ml_query_object = MLQueryObject( - prompt="SM", # hackish fix - timing_uuid=None, - model_uuid=None, - guidance_scale=None, - seed=None, - num_inference_steps=None, - strength=None, - adapter_type=None, - negative_prompt="", - height=settings['height'], - width=settings['width'], - image_uuid=None, - mask_uuid=None, - data=sm_data - ) - res = ml_client.predict_model_output_standardized(ML_MODEL.dynamicrafter, ml_query_object, QUEUE_INFERENCE_QUERIES, backlog) - + sm_data = { + "width": settings['width'], + "height": settings['height'], + "prompt": settings["prompt"] + } + + for idx, img_uuid in enumerate(settings['file_uuid_list']): + sm_data[f"file_image_{padded_integer(idx+1)}" + "_uuid"] = img_uuid + + ml_query_object = MLQueryObject( + prompt="Dynamicrafter", # hackish fix + timing_uuid=None, + model_uuid=None, + guidance_scale=None, + seed=None, + num_inference_steps=None, + strength=None, + adapter_type=None, + negative_prompt="", + height=settings['height'], + width=settings['width'], + image_uuid=None, + mask_uuid=None, + data=sm_data + ) + res = ml_client.predict_model_output_standardized(ML_MODEL.dynamicrafter, ml_query_object, QUEUE_INFERENCE_QUERIES, backlog) final_res.append(res) return final_res diff --git a/utils/ml_processor/comfy_data_transform.py b/utils/ml_processor/comfy_data_transform.py index 54293311..b38c0c72 100644 --- a/utils/ml_processor/comfy_data_transform.py +++ b/utils/ml_processor/comfy_data_transform.py @@ -33,6 +33,12 @@ # these methods return the workflow along with the output node class name class ComfyDataTransform: + # there are certain files which need to be stored in a subfolder + # creating a dict of filename <-> subfolder_name + filename_subfolder_dict = { + "structure_control_img": "sci" + } + @staticmethod def get_workflow_json(model: ComfyWorkflow): json_file_path = "./utils/ml_processor/" + MODEL_PATH_DICT[model]["workflow_path"] @@ -86,7 +92,6 @@ def transform_sdxl_img2img_workflow(query: MLQueryObject): return json.dumps(workflow), output_node_ids, [], [] - @staticmethod def transform_sdxl_controlnet_workflow(query: MLQueryObject): data_repo = DataRepo() @@ -112,7 +117,6 @@ def transform_sdxl_controlnet_workflow(query: MLQueryObject): return json.dumps(workflow), output_node_ids, [], [] - @staticmethod def transform_ipadapter_composition_workflow(query: MLQueryObject): data_repo = DataRepo() @@ -311,19 +315,15 @@ def transform_ipadaptor_face_plus_workflow(query: MLQueryObject): @staticmethod def transform_steerable_motion_workflow(query: MLQueryObject): - - - def update_structure_control_image(json, image, weight): + def update_structure_control_image(json, image_uuid, weight): # Integrate all updates including new nodes and modifications in a single step data_repo = DataRepo() - image = data_repo.get_file_from_uuid(image) - image = image.filename - # image = os.path.basename(image) + image = data_repo.get_file_from_uuid(image_uuid) json.update({ "560": { "inputs": { - "image": image, + "image": "sci/" + image.filename, # TODO: hardcoding for now, pick a proper flow later "upload": "image" }, "class_type": "LoadImage", @@ -367,7 +367,6 @@ def update_structure_control_image(json, image, weight): return json - def update_json_with_loras(json_data, loras): start_id = 536 new_ids = [] @@ -449,13 +448,11 @@ def update_json_with_loras(json_data, loras): workflow["543"]["inputs"]["max_frames"] = int(float(sm_data.get('max_frames'))) workflow["543"]["inputs"]["text"] = sm_data.get('individual_negative_prompts') - if sm_data.get('structure_control_image'): - - workflow = update_structure_control_image(workflow, sm_data.get('structure_control_image'), sm_data.get('strength_of_structure_control_image')) + if sm_data.get('file_structure_control_img_uuid'): + workflow = update_structure_control_image(workflow, sm_data.get('file_structure_control_img_uuid'), sm_data.get('strength_of_structure_control_image')) ignore_list = sm_data.get("lora_data", []) return json.dumps(workflow), output_node_ids, [], ignore_list - @staticmethod def transform_dynamicrafter_workflow(query: MLQueryObject): @@ -477,7 +474,7 @@ def transform_dynamicrafter_workflow(query: MLQueryObject): extra_models_list = [ { "filename": "dynamicrafter_512_interp_v1.ckpt", - "url": "https://huggingface.co/Kijai/DynamiCrafter_pruned/blob/resolve/dynamicrafter_512_interp_v1_bf16.safetensors?download=true", + "url": "https://huggingface.co/Doubiiu/DynamiCrafter_512_Interp/resolve/main/model.ckpt?download=true", "dest": "./ComfyUI/models/checkpoints/" }] @@ -614,18 +611,28 @@ def get_workflow_json_url(workflow_json): def get_file_list_from_query_obj(query_obj: MLQueryObject): file_uuid_list = [] - + custom_dest = {} + if query_obj.image_uuid: file_uuid_list.append(query_obj.image_uuid) if query_obj.mask_uuid: file_uuid_list.append(query_obj.mask_uuid) - for k, v in query_obj.data.get('data', {}).items(): - if k.startswith("file_"): - file_uuid_list.append(v) + for file_key, file_uuid in query_obj.data.get('data', {}).items(): + if file_key.startswith("file_"): + dest = "" + for filename in ComfyDataTransform.filename_subfolder_dict.keys(): + if filename in file_key: + dest = ComfyDataTransform.filename_subfolder_dict[filename] + break + + if dest: + custom_dest[str(file_uuid)] = dest + + file_uuid_list.append(file_uuid) - return file_uuid_list + return file_uuid_list, custom_dest # returns the zip file which can be passed to the comfy_runner replicate endpoint def get_file_zip_url(file_uuid_list, index_files=False) -> str: diff --git a/utils/ml_processor/comfy_workflows/dynamicrafter_api.json b/utils/ml_processor/comfy_workflows/dynamicrafter_api.json index a656278e..046cacf7 100644 --- a/utils/ml_processor/comfy_workflows/dynamicrafter_api.json +++ b/utils/ml_processor/comfy_workflows/dynamicrafter_api.json @@ -23,7 +23,8 @@ "11": { "inputs": { "ckpt_name": "dynamicrafter_512_interp_v1.ckpt", - "dtype": "auto" + "dtype": "auto", + "fp8_unet": false }, "class_type": "DynamiCrafterModelLoader", "_meta": { diff --git a/utils/ml_processor/gpu/gpu.py b/utils/ml_processor/gpu/gpu.py index 0a323225..1f2c0a73 100644 --- a/utils/ml_processor/gpu/gpu.py +++ b/utils/ml_processor/gpu/gpu.py @@ -36,7 +36,7 @@ def predict_model_output_standardized(self, model: MLModel, query_obj: MLQueryOb workflow_json, output_node_ids, extra_model_list, ignore_list = get_model_workflow_from_query(model, query_obj) file_uuid_list = [] - file_uuid_list = get_file_list_from_query_obj(query_obj) + file_uuid_list, custom_dest = get_file_list_from_query_obj(query_obj) file_list = data_repo.get_image_list_from_uuid_list(file_uuid_list) models_using_sdxl = [ @@ -55,7 +55,6 @@ def predict_model_output_standardized(self, model: MLModel, query_obj: MLQueryOb res = [] for file in file_list: new_width, new_height = determine_dimensions_for_sdxl(query_obj.width, query_obj.height) - # although the new_file created using create_new_file has the same location as the original file, it is # scaled to the original resolution after inference save (so resize has no effect) new_file = normalize_size_internal_file_obj(file, dim=[new_width, new_height], create_new_file=True) @@ -67,10 +66,14 @@ def predict_model_output_standardized(self, model: MLModel, query_obj: MLQueryOb file_path_list = [] for idx, file in enumerate(file_list): _, filename = os.path.split(file.local_path) - new_filename = f"{padded_integer(idx+1)}_" + filename \ - if model.display_name() == ComfyWorkflow.STEERABLE_MOTION.value else filename + if str(file.uuid) not in custom_dest: + new_filename = f"{padded_integer(idx+1)}_" + filename \ + if model.display_name() == ComfyWorkflow.STEERABLE_MOTION.value else filename + file_path_list.append("videos/temp/" + new_filename) + else: + new_filename = filename + file_path_list.append({"filepath": "videos/temp/" + new_filename, "dest_folder": custom_dest[str(file.uuid)]}) copy_local_file(file.local_path, "videos/temp/", new_filename) - file_path_list.append("videos/temp/" + new_filename) # replacing old files with resized files # if len(new_file_map.keys()): diff --git a/utils/ml_processor/replicate/utils.py b/utils/ml_processor/replicate/utils.py index ed25e32d..e7e1ebeb 100644 --- a/utils/ml_processor/replicate/utils.py +++ b/utils/ml_processor/replicate/utils.py @@ -34,7 +34,6 @@ def get_model_params_from_query_obj(model, query_obj: MLQueryObject): # handling comfy_runner workflows if model.name == ComfyRunnerModel.name: - # TODO: add custom model download option in the replicate cog workflow_json, output_node_ids, extra_model_list, ignore_list = get_model_workflow_from_query(model, query_obj) workflow_file = get_workflow_json_url(workflow_json) @@ -49,7 +48,7 @@ def get_model_params_from_query_obj(model, query_obj: MLQueryObject): ] # resizing image for sdxl - file_uuid_list = get_file_list_from_query_obj(query_obj) + file_uuid_list, custom_dest = get_file_list_from_query_obj(query_obj) # TODO: handle custom_dest if model.display_name() in models_using_sdxl and len(file_uuid_list): new_uuid_list = [] for file_uuid in file_uuid_list: From 3acd53bbff2ecce6e0e27e9252336b58cb15b705 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Fri, 22 Mar 2024 11:48:40 +0530 Subject: [PATCH 37/43] state reloading fixed --- sample_assets/interpolation_workflow.json | 1286 ----------------- scripts/entrypoint.bat | 2 + scripts/entrypoint.sh | 1 + scripts/linux_setup.sh | 35 + scripts/windows_setup.bat | 22 + ui_components/components/animate_shot_page.py | 40 +- .../components/video_rendering_page.py | 36 +- ui_components/constants.py | 3 +- .../methods/animation_style_methods.py | 105 +- ui_components/methods/video_methods.py | 10 - .../widgets/sm_animation_style_element.py | 51 +- .../widgets/variant_comparison_grid.py | 43 +- utils/constants.py | 3 + utils/media_processor/interpolator.py | 4 +- 14 files changed, 260 insertions(+), 1381 deletions(-) delete mode 100644 sample_assets/interpolation_workflow.json create mode 100644 scripts/entrypoint.bat create mode 100644 scripts/entrypoint.sh create mode 100644 scripts/linux_setup.sh create mode 100644 scripts/windows_setup.bat diff --git a/sample_assets/interpolation_workflow.json b/sample_assets/interpolation_workflow.json deleted file mode 100644 index f96fb13c..00000000 --- a/sample_assets/interpolation_workflow.json +++ /dev/null @@ -1,1286 +0,0 @@ -{ - "last_node_id": 366, - "last_link_id": 667, - "nodes": [ - { - "id": 354, - "type": "VHS_SplitImages", - "pos": [ - 2529.475905889869, - -1142.7707149068153 - ], - "size": { - "0": 315, - "1": 118 - }, - "flags": {}, - "order": 19, - "mode": 0, - "inputs": [ - { - "name": "images", - "type": "IMAGE", - "link": 653 - } - ], - "outputs": [ - { - "name": "IMAGE_A", - "type": "IMAGE", - "links": null, - "shape": 3 - }, - { - "name": "A_count", - "type": "INT", - "links": null, - "shape": 3 - }, - { - "name": "IMAGE_B", - "type": "IMAGE", - "links": [ - 654 - ], - "shape": 3, - "slot_index": 2 - }, - { - "name": "B_count", - "type": "INT", - "links": null, - "shape": 3, - "slot_index": 3 - } - ], - "properties": { - "Node name for S&R": "VHS_SplitImages" - }, - "widgets_values": { - "split_index": 4 - } - }, - { - "id": 292, - "type": "STMFNet VFI", - "pos": [ - 2568.475905889869, - -932.7707149068144 - ], - "size": { - "0": 443.4000244140625, - "1": 150 - }, - "flags": {}, - "order": 20, - "mode": 0, - "inputs": [ - { - "name": "frames", - "type": "IMAGE", - "link": 654 - }, - { - "name": "optional_interpolation_states", - "type": "INTERPOLATION_STATES", - "link": null - } - ], - "outputs": [ - { - "name": "IMAGE", - "type": "IMAGE", - "links": [ - 656 - ], - "shape": 3, - "slot_index": 0 - } - ], - "properties": { - "Node name for S&R": "STMFNet VFI" - }, - "widgets_values": [ - "stmfnet.pth", - 14, - 2, - true - ] - }, - { - "id": 281, - "type": "VHS_VideoCombine", - "pos": [ - 3351.7515134107794, - -1039.9403187565451 - ], - "size": [ - 320, - 496 - ], - "flags": {}, - "order": 21, - "mode": 0, - "inputs": [ - { - "name": "images", - "type": "IMAGE", - "link": 656 - } - ], - "outputs": [], - "properties": { - "Node name for S&R": "VHS_VideoCombine" - }, - "widgets_values": { - "frame_rate": 14, - "loop_count": 0, - "filename_prefix": "creative_interpolation_results/AD_", - "format": "video/h264-mp4", - "pingpong": false, - "save_image": true, - "crf": 20, - "videopreview": { - "hidden": false, - "paused": false, - "params": { - "filename": "AD__00059.mp4", - "subfolder": "creative_interpolation_results", - "type": "output", - "format": "video/h264-mp4" - } - } - } - }, - { - "id": 364, - "type": "Note", - "pos": [ - -416, - -964 - ], - "size": { - "0": 312.1289367675781, - "1": 245.289306640625 - }, - "flags": {}, - "order": 0, - "mode": 0, - "properties": { - "text": "" - }, - "widgets_values": [ - "Please set batch_size at frames_per_key_frame * number_of_key_frames + 4 - the 4 is to give additional buffer to solve issues with the uniform context scheduling. A hacky solution for now." - ], - "color": "#432", - "bgcolor": "#653" - }, - { - "id": 361, - "type": "Note", - "pos": [ - 682.4829725972701, - -283.2324170759726 - ], - "size": { - "0": 312.1289367675781, - "1": 245.289306640625 - }, - "flags": {}, - "order": 1, - "mode": 0, - "properties": { - "text": "" - }, - "widgets_values": [ - "Ipadapter weight also has a huge impact on the outcome - more = more adherence to the original images. Sometimes this can be too much control." - ], - "color": "#432", - "bgcolor": "#653" - }, - { - "id": 342, - "type": "ADE_AnimateDiffUniformContextOptions", - "pos": [ - 1465, - -1439 - ], - "size": { - "0": 315, - "1": 154 - }, - "flags": {}, - "order": 2, - "mode": 0, - "outputs": [ - { - "name": "CONTEXT_OPTIONS", - "type": "CONTEXT_OPTIONS", - "links": [ - 627 - ], - "shape": 3, - "slot_index": 0 - } - ], - "properties": { - "Node name for S&R": "ADE_AnimateDiffUniformContextOptions" - }, - "widgets_values": [ - 16, - 2, - 2, - "uniform", - false - ] - }, - { - "id": 362, - "type": "Note", - "pos": [ - 1884, - -1504 - ], - "size": { - "0": 312.1289367675781, - "1": 245.289306640625 - }, - "flags": {}, - "order": 3, - "mode": 0, - "properties": { - "text": "" - }, - "widgets_values": [ - "motion_scale can also be really useful - tuning it down can remove jolty motion." - ], - "color": "#432", - "bgcolor": "#653" - }, - { - "id": 363, - "type": "Note", - "pos": [ - 2651.475905889869, - -676.7707149068145 - ], - "size": { - "0": 312.1289367675781, - "1": 245.289306640625 - }, - "flags": {}, - "order": 4, - "mode": 0, - "properties": { - "text": "" - }, - "widgets_values": [ - "Increasing the multipler makes the video longer by creating new frames - it can result in some weird effects >2 but worth experimenting with for additional motion/fluidity." - ], - "color": "#432", - "bgcolor": "#653" - }, - { - "id": 359, - "type": "Note", - "pos": [ - -1737, - -576 - ], - "size": { - "0": 261.6278991699219, - "1": 118.72222137451172 - }, - "flags": {}, - "order": 5, - "mode": 0, - "properties": { - "text": "" - }, - "widgets_values": [ - "## You'll need to create this folder in ComfyUI/inputs and place your images inside it in numerical order - 1.png, 2.png, etc." - ], - "color": "#432", - "bgcolor": "#653" - }, - { - "id": 294, - "type": "IPAdapterModelLoader", - "pos": [ - 170, - -61 - ], - "size": { - "0": 315, - "1": 58 - }, - "flags": {}, - "order": 6, - "mode": 0, - "outputs": [ - { - "name": "IPADAPTER", - "type": "IPADAPTER", - "links": [ - 564 - ], - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "IPAdapterModelLoader" - }, - "widgets_values": [ - "ip-adapter_sd15.bin" - ] - }, - { - "id": 301, - "type": "IPAdapterApplyEncoded", - "pos": [ - 288, - -337 - ], - "size": { - "0": 315, - "1": 142 - }, - "flags": {}, - "order": 15, - "mode": 0, - "inputs": [ - { - "name": "ipadapter", - "type": "IPADAPTER", - "link": 564, - "slot_index": 0 - }, - { - "name": "embeds", - "type": "EMBEDS", - "link": 563, - "slot_index": 1 - }, - { - "name": "model", - "type": "MODEL", - "link": 571 - }, - { - "name": "attn_mask", - "type": "MASK", - "link": null - } - ], - "outputs": [ - { - "name": "MODEL", - "type": "MODEL", - "links": [ - 570 - ], - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "IPAdapterApplyEncoded" - }, - "widgets_values": [ - 0.6900000000000001 - ] - }, - { - "id": 300, - "type": "IPAdapterEncoder", - "pos": [ - -168, - -384 - ], - "size": { - "0": 315, - "1": 258 - }, - "flags": {}, - "order": 12, - "mode": 0, - "inputs": [ - { - "name": "clip_vision", - "type": "CLIP_VISION", - "link": 566, - "slot_index": 0 - }, - { - "name": "image_1", - "type": "IMAGE", - "link": 657 - }, - { - "name": "image_2", - "type": "IMAGE", - "link": null - }, - { - "name": "image_3", - "type": "IMAGE", - "link": null - }, - { - "name": "image_4", - "type": "IMAGE", - "link": null - } - ], - "outputs": [ - { - "name": "EMBEDS", - "type": "EMBEDS", - "links": [ - 563 - ], - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "IPAdapterEncoder" - }, - "widgets_values": [ - false, - 0, - 1, - 1, - 1, - 1 - ] - }, - { - "id": 323, - "type": "VHS_LoadImagesPath", - "pos": [ - -1775, - -829 - ], - "size": { - "0": 315, - "1": 170 - }, - "flags": {}, - "order": 7, - "mode": 0, - "outputs": [ - { - "name": "IMAGE", - "type": "IMAGE", - "links": [ - 619, - 657, - 665 - ], - "shape": 3, - "slot_index": 0 - }, - { - "name": "MASK", - "type": "MASK", - "links": null, - "shape": 3 - }, - { - "name": "INT", - "type": "INT", - "links": [], - "shape": 3, - "slot_index": 2 - } - ], - "properties": { - "Node name for S&R": "VHS_LoadImagesPath" - }, - "widgets_values": { - "directory": "input", - "image_load_cap": 0, - "skip_first_images": 0, - "select_every_nth": 1 - }, - "color": "#332922", - "bgcolor": "#593930" - }, - { - "id": 360, - "type": "Note", - "pos": [ - 631, - -1677 - ], - "size": { - "0": 508.23260498046875, - "1": 385.82281494140625 - }, - "flags": {}, - "order": 8, - "mode": 0, - "properties": { - "text": "" - }, - "widgets_values": [ - "Key frames are **distributed either linearly of dynamically**. \n\nIf you set type_of_frame_distribution to linear, you need to set linear_frame_distribution_value to the gap you wish to have between each key frame - e.g. 16 would mean the frames are at 0, 16, 32, 48, etc.\n\nAlternatively, if you set it to dynamic, you can input the positions of the key frames in the text box below this.\n\nSimilarly, type_of_key_frame_influence determines how the frames influence the next ones. If you set it to linear, linear_key_frame_influence_value will determine how long every frame influences - 1.0 will make it normal, higher values will make it longer. If you set type_of_key_frame_influence to dynamic, you can input distinct values in linear_key_frame_influence_value for each frame.\n\nOther than this, please experiment with the different settings to achieve your desired effect:\n\n- frames_per_key_frame: How many frames to generate between each main key frame you provide.\n- cn_strength: How strong the control of the ControlNet should overall.\n- soft_scaled_cn_weights_multiplier: kinda similar to cn_strength but also different - please experiment!\n- interpolation: the type of interpolation from one image to the next.\n- buffer is the number of buffer frames places before your video - this is to prevent the end of the generation influencing the beginning due to how the context scheduler works." - ], - "color": "#432", - "bgcolor": "#653" - }, - { - "id": 352, - "type": "CLIPTextEncode", - "pos": [ - 10, - -962 - ], - "size": { - "0": 483.6921691894531, - "1": 182.32534790039062 - }, - "flags": { - "pinned": false - }, - "order": 14, - "mode": 0, - "inputs": [ - { - "name": "clip", - "type": "CLIP", - "link": 644 - } - ], - "outputs": [ - { - "name": "CONDITIONING", - "type": "CONDITIONING", - "links": [ - 664 - ], - "slot_index": 0 - } - ], - "properties": { - "Node name for S&R": "CLIPTextEncode" - }, - "widgets_values": [ - "hands, hand, (worst quality, low quality:1.2)" - ], - "color": "#322", - "bgcolor": "#533" - }, - { - "id": 297, - "type": "CLIPVisionLoader", - "pos": [ - -505, - -29 - ], - "size": { - "0": 315, - "1": 58 - }, - "flags": {}, - "order": 9, - "mode": 0, - "outputs": [ - { - "name": "CLIP_VISION", - "type": "CLIP_VISION", - "links": [ - 566 - ], - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CLIPVisionLoader" - }, - "widgets_values": [ - "SD1.5/pytorch_model.bin" - ] - }, - { - "id": 207, - "type": "KSampler Adv. (Efficient)", - "pos": [ - 1510, - -1040 - ], - "size": { - "0": 325, - "1": 658 - }, - "flags": { - "pinned": false - }, - "order": 18, - "mode": 0, - "inputs": [ - { - "name": "model", - "type": "MODEL", - "link": 543 - }, - { - "name": "positive", - "type": "CONDITIONING", - "link": 666 - }, - { - "name": "negative", - "type": "CONDITIONING", - "link": 667 - }, - { - "name": "latent_image", - "type": "LATENT", - "link": 633 - }, - { - "name": "optional_vae", - "type": "VAE", - "link": 632 - }, - { - "name": "script", - "type": "SCRIPT", - "link": null, - "slot_index": 5 - } - ], - "outputs": [ - { - "name": "MODEL", - "type": "MODEL", - "links": [], - "shape": 3, - "slot_index": 0 - }, - { - "name": "CONDITIONING+", - "type": "CONDITIONING", - "links": [], - "shape": 3, - "slot_index": 1 - }, - { - "name": "CONDITIONING-", - "type": "CONDITIONING", - "links": [], - "shape": 3, - "slot_index": 2 - }, - { - "name": "LATENT", - "type": "LATENT", - "links": [], - "shape": 3, - "slot_index": 3 - }, - { - "name": "VAE", - "type": "VAE", - "links": [], - "shape": 3, - "slot_index": 4 - }, - { - "name": "IMAGE", - "type": "IMAGE", - "links": [ - 653 - ], - "shape": 3, - "slot_index": 5 - } - ], - "title": "KSampler Adv. (Efficient), CN sampler", - "properties": { - "Node name for S&R": "KSampler Adv. (Efficient)" - }, - "widgets_values": [ - "enable", - 6, - null, - 20, - 10, - "ddpm", - "karras", - 5, - 20, - "disable", - "auto", - "true" - ], - "color": "#222233", - "bgcolor": "#333355", - "shape": 1 - }, - { - "id": 187, - "type": "ADE_AnimateDiffLoaderWithContext", - "pos": [ - 1916, - -840 - ], - "size": { - "0": 315, - "1": 190 - }, - "flags": { - "pinned": false - }, - "order": 17, - "mode": 0, - "inputs": [ - { - "name": "model", - "type": "MODEL", - "link": 570, - "slot_index": 0 - }, - { - "name": "context_options", - "type": "CONTEXT_OPTIONS", - "link": 627, - "slot_index": 1 - }, - { - "name": "motion_lora", - "type": "MOTION_LORA", - "link": null - }, - { - "name": "motion_model_settings", - "type": "MOTION_MODEL_SETTINGS", - "link": null, - "slot_index": 3 - } - ], - "outputs": [ - { - "name": "MODEL", - "type": "MODEL", - "links": [ - 543 - ], - "shape": 3, - "slot_index": 0 - } - ], - "properties": { - "Node name for S&R": "ADE_AnimateDiffLoaderWithContext" - }, - "widgets_values": [ - "mm_sd_v15_v2.ckpt", - "sqrt_linear (AnimateDiff)", - 0.8, - false - ], - "color": "#232", - "bgcolor": "#353" - }, - { - "id": 347, - "type": "BatchPromptSchedule", - "pos": [ - 95, - -1565 - ], - "size": { - "0": 418.2182922363281, - "1": 459.75811767578125 - }, - "flags": {}, - "order": 13, - "mode": 0, - "inputs": [ - { - "name": "clip", - "type": "CLIP", - "link": 634 - } - ], - "outputs": [ - { - "name": "POS", - "type": "CONDITIONING", - "links": [ - 663 - ], - "shape": 3, - "slot_index": 0 - }, - { - "name": "NEG", - "type": "CONDITIONING", - "links": [], - "shape": 3, - "slot_index": 1 - } - ], - "properties": { - "Node name for S&R": "BatchPromptSchedule" - }, - "widgets_values": [ - "\"0\" :\"\",\n\"16\" :\"\",\n\"32\" :\"\",\n\"48\" :\"\",\n\"64\" :\"\",\n\"80\" :\"\",\n\"96\" :\"\"\n", - 324, - false, - "PRE", - "APP", - 0, - 0, - 0, - 0 - ] - }, - { - "id": 189, - "type": "Efficient Loader", - "pos": [ - -510, - -1656 - ], - "size": { - "0": 448.8115539550781, - "1": 605.9432983398438 - }, - "flags": { - "pinned": false - }, - "order": 10, - "mode": 0, - "inputs": [ - { - "name": "lora_stack", - "type": "LORA_STACK", - "link": null - }, - { - "name": "cnet_stack", - "type": "CONTROL_NET_STACK", - "link": null, - "slot_index": 1 - } - ], - "outputs": [ - { - "name": "MODEL", - "type": "MODEL", - "links": [ - 571 - ], - "shape": 3, - "slot_index": 0 - }, - { - "name": "CONDITIONING+", - "type": "CONDITIONING", - "links": [], - "shape": 3, - "slot_index": 1 - }, - { - "name": "CONDITIONING-", - "type": "CONDITIONING", - "links": [], - "shape": 3, - "slot_index": 2 - }, - { - "name": "LATENT", - "type": "LATENT", - "links": [ - 633 - ], - "shape": 3, - "slot_index": 3 - }, - { - "name": "VAE", - "type": "VAE", - "links": [ - 632 - ], - "shape": 3, - "slot_index": 4 - }, - { - "name": "CLIP", - "type": "CLIP", - "links": [ - 634, - 644 - ], - "shape": 3, - "slot_index": 5 - }, - { - "name": "DEPENDENCIES", - "type": "DEPENDENCIES", - "links": null, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "Efficient Loader" - }, - "widgets_values": [ - "Realistic_Vision_V5.0.safetensors", - "vae-ft-mse-840000-ema-pruned.safetensors", - -2, - "None", - 1, - 1, - "", - "", - "none", - "comfy", - 512, - 512, - 68 - ], - "color": "#223333", - "bgcolor": "#335555", - "shape": 1 - }, - { - "id": 365, - "type": "BatchCreativeInterpolation", - "pos": [ - 624, - -1205 - ], - "size": { - "0": 488.5699157714844, - "1": 456 - }, - "flags": {}, - "order": 16, - "mode": 0, - "inputs": [ - { - "name": "positive", - "type": "CONDITIONING", - "link": 663 - }, - { - "name": "negative", - "type": "CONDITIONING", - "link": 664 - }, - { - "name": "images", - "type": "IMAGE", - "link": 665 - } - ], - "outputs": [ - { - "name": "positive", - "type": "CONDITIONING", - "links": [ - 666 - ], - "shape": 3, - "slot_index": 0 - }, - { - "name": "negative", - "type": "CONDITIONING", - "links": [ - 667 - ], - "shape": 3, - "slot_index": 1 - } - ], - "properties": { - "Node name for S&R": "BatchCreativeInterpolation" - }, - "widgets_values": [ - "control_v11f1e_sd15_tile_fp16.safetensors", - "linear", - 16, - "0,10,26,40,46", - "linear", - 1.1, - "0.5,0.5,2.0,0.5", - "dynamic", - 0.5, - "0.8,0.2,0.2,0.8", - 0.85, - "ease-in-out", - 4 - ] - }, - { - "id": 332, - "type": "PreviewImage", - "pos": [ - -1460, - -1290 - ], - "size": { - "0": 380, - "1": 250 - }, - "flags": {}, - "order": 11, - "mode": 0, - "inputs": [ - { - "name": "images", - "type": "IMAGE", - "link": 619 - } - ], - "properties": { - "Node name for S&R": "PreviewImage" - } - } - ], - "links": [ - [ - 543, - 187, - 0, - 207, - 0, - "MODEL" - ], - [ - 563, - 300, - 0, - 301, - 1, - "EMBEDS" - ], - [ - 564, - 294, - 0, - 301, - 0, - "IPADAPTER" - ], - [ - 566, - 297, - 0, - 300, - 0, - "CLIP_VISION" - ], - [ - 570, - 301, - 0, - 187, - 0, - "MODEL" - ], - [ - 571, - 189, - 0, - 301, - 2, - "MODEL" - ], - [ - 619, - 323, - 0, - 332, - 0, - "IMAGE" - ], - [ - 627, - 342, - 0, - 187, - 1, - "CONTEXT_OPTIONS" - ], - [ - 632, - 189, - 4, - 207, - 4, - "VAE" - ], - [ - 633, - 189, - 3, - 207, - 3, - "LATENT" - ], - [ - 634, - 189, - 5, - 347, - 0, - "CLIP" - ], - [ - 644, - 189, - 5, - 352, - 0, - "CLIP" - ], - [ - 653, - 207, - 5, - 354, - 0, - "IMAGE" - ], - [ - 654, - 354, - 2, - 292, - 0, - "IMAGE" - ], - [ - 656, - 292, - 0, - 281, - 0, - "IMAGE" - ], - [ - 657, - 323, - 0, - 300, - 1, - "IMAGE" - ], - [ - 663, - 347, - 0, - 365, - 0, - "CONDITIONING" - ], - [ - 664, - 352, - 0, - 365, - 1, - "CONDITIONING" - ], - [ - 665, - 323, - 0, - 365, - 2, - "IMAGE" - ], - [ - 666, - 365, - 0, - 207, - 1, - "CONDITIONING" - ], - [ - 667, - 365, - 1, - 207, - 2, - "CONDITIONING" - ] - ], - "groups": [ - { - "title": "IPAdapter", - "bounding": [ - -570, - -485, - 1674, - 582 - ], - "color": "#3f789e", - "font_size": 24 - }, - { - "title": "First sampler, interpolation", - "bounding": [ - 1418, - -1603, - 862, - 1341 - ], - "color": "#8A8", - "font_size": 24 - }, - { - "title": "Increase Framerate", - "bounding": [ - 2458, - -1259, - 657, - 898 - ], - "color": "#8A8", - "font_size": 24 - }, - { - "title": "Saving", - "bounding": [ - 3325, - -1153, - 375, - 657 - ], - "color": "#a1309b", - "font_size": 24 - }, - { - "title": "Group", - "bounding": [ - -602, - -1770, - 1783, - 1088 - ], - "color": "#3f789e", - "font_size": 24 - }, - { - "title": "Group", - "bounding": [ - -1849, - -1462, - 914, - 1124 - ], - "color": "#3f789e", - "font_size": 24 - } - ], - "config": {}, - "extra": {}, - "version": 0.4 - } \ No newline at end of file diff --git a/scripts/entrypoint.bat b/scripts/entrypoint.bat new file mode 100644 index 00000000..c6ccf2ae --- /dev/null +++ b/scripts/entrypoint.bat @@ -0,0 +1,2 @@ +@echo off +streamlit run app.py --runner.fastReruns false --server.port 5500 \ No newline at end of file diff --git a/scripts/entrypoint.sh b/scripts/entrypoint.sh new file mode 100644 index 00000000..ec0ff19e --- /dev/null +++ b/scripts/entrypoint.sh @@ -0,0 +1 @@ +streamlit run app.py --runner.fastReruns false --server.port 5500 \ No newline at end of file diff --git a/scripts/linux_setup.sh b/scripts/linux_setup.sh new file mode 100644 index 00000000..cfc91fd9 --- /dev/null +++ b/scripts/linux_setup.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# Store the current directory path +current_dir="$(pwd)" + +# Define the project directory path +project_dir="$current_dir/Dough" + +# Check if the "Dough" directory doesn't exist and we're not already inside it +if [ ! -d "$project_dir" ] && [ "$(basename "$current_dir")" != "Dough" ]; then + # Clone the git repo + git clone --depth 1 -b main https://github.com/banodoco/Dough.git "$project_dir" + cd "$project_dir" + git clone --depth 1 -b feature/package https://github.com/piyushK52/comfy_runner.git + git clone https://github.com/comfyanonymous/ComfyUI.git + + # Create virtual environment + python3 -m venv "dough-env" + + # Install system dependencies + if command -v sudo &> /dev/null; then + sudo apt-get update && sudo apt-get install -y libpq-dev python3.10-dev + else + apt-get update && apt-get install -y libpq-dev python3.10-dev + fi + + # Install Python dependencies + echo $(pwd) + . ./dough-env/bin/activate && pip install -r "requirements.txt" + . ./dough-env/bin/activate && pip install -r "comfy_runner/requirements.txt" + . ./dough-env/bin/activate && pip install -r "ComfyUI/requirements.txt" + + # Copy the environment file + cp "$project_dir/.env.sample" "$project_dir/.env" +fi diff --git a/scripts/windows_setup.bat b/scripts/windows_setup.bat new file mode 100644 index 00000000..858b9e87 --- /dev/null +++ b/scripts/windows_setup.bat @@ -0,0 +1,22 @@ +@echo off +set "folderName=Dough" +if not exist "%folderName%\" ( + if /i not "%CD%"=="%~dp0%folderName%\" ( + git clone --depth 1 -b main https://github.com/banodoco/Dough.git + cd Dough + git clone --depth 1 -b feature/package https://github.com/piyushK52/comfy_runner.git + git clone https://github.com/comfyanonymous/ComfyUI.git + python -m venv dough-env + call dough-env\Scripts\activate.bat + python.exe -m pip install --upgrade pip + pip install -r requirements.txt + pip install websocket + pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 + pip install -r comfy_runner\requirements.txt + pip install -r ComfyUI\requirements.txt + call dough-env\Scripts\deactivate.bat + copy .env.sample .env + cd .. + pause + ) +) \ No newline at end of file diff --git a/ui_components/components/animate_shot_page.py b/ui_components/components/animate_shot_page.py index 61e2c1f0..8bc7964f 100644 --- a/ui_components/components/animate_shot_page.py +++ b/ui_components/components/animate_shot_page.py @@ -6,8 +6,11 @@ from ui_components.widgets.frame_selector import frame_selector_widget from ui_components.widgets.variant_comparison_grid import variant_comparison_grid from utils import st_memory +from utils.constants import AnimateShotMethod from utils.data_repo.data_repo import DataRepo from ui_components.widgets.sidebar_logger import sidebar_logger +from utils.enum import ExtendedEnum + def animate_shot_page(shot_uuid: str, h2): data_repo = DataRepo() @@ -35,16 +38,27 @@ def video_rendering_page(shot_uuid, selected_variant): shot = data_repo.get_shot_from_uuid(shot_uuid) file_uuid_list = [] - # loading images from a particular video variant - if selected_variant: - log = data_repo.get_inference_log_from_uuid(selected_variant) - shot_data = json.loads(log.input_params) - file_uuid_list = shot_data.get('origin_data', json.dumps({})).get('settings', {}).get('file_uuid_list', []) - # picking current images if no variant is selected - else: - for timing in shot.timing_list: - if timing.primary_image and timing.primary_image.location: - file_uuid_list.append(timing.primary_image.uuid) + if f"type_of_animation_{shot.uuid}" not in st.session_state: + st.session_state[f"type_of_animation_{shot.uuid}"] = 0 + if st.session_state[f"type_of_animation_{shot.uuid}"] == 0: # AnimateShotMethod.BATCH_CREATIVE_INTERPOLATION.value + # loading images from a particular video variant + if selected_variant: + log = data_repo.get_inference_log_from_uuid(selected_variant) + shot_data = json.loads(log.input_params) + file_uuid_list = shot_data.get('origin_data', json.dumps({})).get('settings', {}).get('file_uuid_list', []) + # picking current images if no variant is selected + else: + for timing in shot.timing_list: + if timing.primary_image and timing.primary_image.location: + file_uuid_list.append(timing.primary_image.uuid) + else: # AnimateShotMethod.DYNAMICRAFTER_INTERPOLATION.value + # hackish sol, will fix later + for idx in range(2): + if f'img{idx+1}_uuid_{shot_uuid}' in st.session_state and st.session_state[f'img{idx+1}_uuid_{shot_uuid}']: + file_uuid_list.append(st.session_state[f'img{idx+1}_uuid_{shot_uuid}']) + + if not (f'video_desc_{shot_uuid}' in st.session_state and st.session_state[f'video_desc_{shot_uuid}']): + st.session_state[f'video_desc_{shot_uuid}'] = "" img_list = data_repo.get_all_file_list(uuid__in=file_uuid_list, file_type=InternalFileType.IMAGE.value)[0] @@ -54,9 +68,11 @@ def video_rendering_page(shot_uuid, selected_variant): st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") with headline3: with st.expander("Type of animation", expanded=False): - type_of_animation = st_memory.radio("What type of animation would you like to generate?", options=["Batch Creative Interpolation", "2-Image Realistic Interpolation (beta)"],horizontal=True, help="**Batch Creative Interpolaton** lets you input multple images and control the motion and style of each frame - resulting in a fluid, surreal and highly-controllable motion. \n\n **2-Image Realistic Interpolation** is a simpler way to generate animations - it generates a video by interpolating between two images, and is best for realistic motion.",key=f"type_of_animation_{shot.uuid}") + type_of_animation = st_memory.radio("What type of animation would you like to generate?", \ + options=AnimateShotMethod.value_list(), horizontal=True, \ + help="**Batch Creative Interpolaton** lets you input multple images and control the motion and style of each frame - resulting in a fluid, surreal and highly-controllable motion. \n\n **2-Image Realistic Interpolation** is a simpler way to generate animations - it generates a video by interpolating between two images, and is best for realistic motion.",key=f"type_of_animation_{shot.uuid}") - if type_of_animation == "Batch Creative Interpolation": + if type_of_animation == AnimateShotMethod.BATCH_CREATIVE_INTERPOLATION.value: sm_video_rendering_page(shot_uuid, img_list) else: two_img_realistic_interpolation_page(shot_uuid, img_list) diff --git a/ui_components/components/video_rendering_page.py b/ui_components/components/video_rendering_page.py index 89712979..5e05f84f 100644 --- a/ui_components/components/video_rendering_page.py +++ b/ui_components/components/video_rendering_page.py @@ -7,7 +7,7 @@ select_motion_lora_element, select_sd_model_element, video_motion_settings from ui_components.models import InternalFileObject, InternalShotObject from ui_components.methods.animation_style_methods import toggle_generate_inference, transform_data, \ - update_session_state_with_animation_details + update_session_state_with_animation_details, update_session_state_with_dc_details from ui_components.methods.video_methods import create_single_interpolated_clip from utils import st_memory from utils.data_repo.data_repo import DataRepo @@ -39,7 +39,7 @@ def sm_video_rendering_page(shot_uuid, img_list: List[InternalFileObject]): # ----------- OTHER SETTINGS ------------ strength_of_adherence, overall_positive_prompt, \ - overall_negative_prompt, type_of_motion_context = video_motion_settings(shot_uuid, img_list) + overall_negative_prompt, type_of_motion_context, amount_of_motion = video_motion_settings(shot_uuid, img_list) type_of_frame_distribution = "dynamic" type_of_key_frame_influence = "dynamic" @@ -56,7 +56,6 @@ def sm_video_rendering_page(shot_uuid, img_list: List[InternalFileObject]): motion_scale = 1.3 interpolation_style = 'ease-in-out' buffer = 4 - amount_of_motion = 1.3 (dynamic_strength_values, dynamic_key_frame_influence_values, dynamic_frame_distribution_values, context_length, context_stride, context_overlap, multipled_base_end_percent, multipled_base_adapter_strength, @@ -109,6 +108,7 @@ def sm_video_rendering_page(shot_uuid, img_list: List[InternalFileObject]): individual_negative_prompts=negative_prompt_travel, animation_stype=AnimationStyleType.CREATIVE_INTERPOLATION.value, max_frames=str(dynamic_frame_distribution_values[-1]), + amount_of_motion=amount_of_motion, lora_data=lora_data, shot_data=shot_meta_data, pil_img_structure_control_image=st.session_state[f"structure_control_image_{shot.uuid}"], # this is a PIL object @@ -127,6 +127,17 @@ def sm_video_rendering_page(shot_uuid, img_list: List[InternalFileObject]): # last keyframe position * 16 duration = float(dynamic_frame_distribution_values[-1] / 16) data_repo.update_shot(uuid=shot_uuid, duration=duration) + + # converting PIL imgs to InternalFileObject + from ui_components.methods.common_methods import save_new_image + key = "pil_img_structure_control_image" + image = None + if settings[key]: + image = save_new_image(settings[key], shot.project.uuid) + del settings[key] + new_key = key.replace("pil_img_", "") + "_uuid" + settings[new_key] = image.uuid + shot_data = update_session_state_with_animation_details( shot_uuid, img_list, @@ -138,7 +149,9 @@ def sm_video_rendering_page(shot_uuid, img_list: List[InternalFileObject]): individual_prompts, individual_negative_prompts, lora_data, - default_model + default_model, + image.uuid if image else None, + settings["strength_of_structure_control_image"] ) settings.update(shot_data=shot_data) vid_quality = "full" @@ -227,7 +240,8 @@ def two_img_realistic_interpolation_page(shot_uuid, img_list): st.image(img_list[1].location, use_column_width=True) with col2: - description_of_motion = st_memory.text_area("Describe the motion you want between the frames:", key=f"description_of_motion_{shot.uuid}") + description_of_motion = st.text_area("Describe the motion you want between the frames:", \ + key=f"description_of_motion_{shot.uuid}", value=st.session_state[f'video_desc_{shot_uuid}']) st.info("This is very important and will likely require some iteration.") variant_count = 1 # Assuming a default value for variant_count, adjust as necessary @@ -235,14 +249,18 @@ def two_img_realistic_interpolation_page(shot_uuid, img_list): position = "dynamiccrafter" if f"{position}_generate_inference" in st.session_state and st.session_state[f"{position}_generate_inference"]: - st.success("Generating clip - see status in the Generation Log in the sidebar. Press 'Refresh log' to update.") # Assuming the logic to generate the clip based on two images, the described motion, and fixed duration duration = 4 # Fixed duration of 4 seconds data_repo.update_shot(uuid=shot.uuid, duration=duration) project_settings = data_repo.get_project_setting(shot.project.uuid) - + meta_data = update_session_state_with_dc_details( + shot_uuid, + img_list, + description_of_motion, + ) + settings.update(shot_data=meta_data) settings.update( duration= duration, animation_style=AnimationStyleType.DIRECT_MORPHING.value, @@ -263,10 +281,6 @@ def two_img_realistic_interpolation_page(shot_uuid, img_list): backlog_update = {f'{shot_uuid}_backlog_enabled': False} toggle_generate_inference(position, **backlog_update) st.rerun() - - - # Placeholder for the logic to generate the clip and update session state as needed - # This should include calling the function that handles the interpolation process with the updated settings # Buttons for adding to queue or backlog, assuming these are still relevant btn1, btn2, btn3 = st.columns([1, 1, 1]) diff --git a/ui_components/constants.py b/ui_components/constants.py index 64f2d2ee..b839dd58 100644 --- a/ui_components/constants.py +++ b/ui_components/constants.py @@ -17,7 +17,8 @@ class CreativeProcessType(ExtendedEnum): MOTION = "Shots" class ShotMetaData(ExtendedEnum): - MOTION_DATA = "motion_data" # {"timing_data": [...]} + MOTION_DATA = "motion_data" # {"timing_data": [...], "main_setting_data": {}} + DYNAMICRAFTER_DATA = "dynamicrafter_data" class GalleryImageViewType(ExtendedEnum): EXPLORER_ONLY = "explorer" diff --git a/ui_components/methods/animation_style_methods.py b/ui_components/methods/animation_style_methods.py index bf681976..059591bf 100644 --- a/ui_components/methods/animation_style_methods.py +++ b/ui_components/methods/animation_style_methods.py @@ -6,6 +6,7 @@ from backend.models import InternalFileObject from shared.constants import InferenceParamType from ui_components.constants import DEFAULT_SHOT_MOTION_VALUES, ShotMetaData +from utils.constants import AnimateShotMethod from utils.data_repo.data_repo import DataRepo import numpy as np import matplotlib.pyplot as plt @@ -17,10 +18,15 @@ def get_generation_settings_from_log(log_uuid=None): input_params = json.loads(log.input_params) if log.input_params else {} query_obj = json.loads(input_params.get(InferenceParamType.QUERY_DICT.value, json.dumps({}))) shot_meta_data = query_obj['data'].get('data', {}).get("shot_data", {}) - shot_meta_data = json.loads(shot_meta_data.get("motion_data")) if shot_meta_data \ - and "motion_data" in shot_meta_data else None + data_type = None + if shot_meta_data and ShotMetaData.MOTION_DATA.value in shot_meta_data: + data_type = ShotMetaData.MOTION_DATA.value + elif shot_meta_data and ShotMetaData.DYNAMICRAFTER_DATA.value in shot_meta_data: + data_type = ShotMetaData.DYNAMICRAFTER_DATA.value - return shot_meta_data + shot_meta_data = json.loads(shot_meta_data.get(data_type)) if data_type else None + + return shot_meta_data, data_type def load_shot_settings(shot_uuid, log_uuid=None): data_repo = DataRepo() @@ -31,29 +37,41 @@ def load_shot_settings(shot_uuid, log_uuid=None): if not log_uuid: shot_meta_data = shot.meta_data_dict.get(ShotMetaData.MOTION_DATA.value, json.dumps({})) shot_meta_data = json.loads(shot_meta_data) + data_type = None st.session_state[f"{shot_uuid}_selected_variant_log_uuid"] = None # loading settings from that particular log else: - shot_meta_data = get_generation_settings_from_log(log_uuid) + shot_meta_data, data_type = get_generation_settings_from_log(log_uuid) st.session_state[f"{shot_uuid}_selected_variant_log_uuid"] = log_uuid if shot_meta_data: - # updating timing data - timing_data = shot_meta_data.get("timing_data", []) - for idx, _ in enumerate(shot.timing_list): # fix: check how the image list is being stored here and use that instead - # setting default parameters (fetching data from the shot if it's present) - if timing_data and len(timing_data) >= idx + 1: - motion_data = timing_data[idx] - - for k, v in motion_data.items(): - st.session_state[f'{k}_{shot_uuid}_{idx}'] = v - - # updating other settings main settings - main_setting_data = shot_meta_data.get("main_setting_data", {}) - for key in main_setting_data: + if not data_type or data_type == ShotMetaData.MOTION_DATA.value: + st.session_state[f"type_of_animation_{shot.uuid}"] = 0 + # updating timing data + timing_data = shot_meta_data.get("timing_data", []) + for idx, _ in enumerate(shot.timing_list): # fix: check how the image list is being stored here and use that instead + # setting default parameters (fetching data from the shot if it's present) + if timing_data and len(timing_data) >= idx + 1: + motion_data = timing_data[idx] + + for k, v in motion_data.items(): + st.session_state[f'{k}_{shot_uuid}_{idx}'] = v + + # updating other settings main settings + main_setting_data = shot_meta_data.get("main_setting_data", {}) + for key in main_setting_data: + st.session_state[key] = main_setting_data[key] + if key == f"structure_control_image_uuid_{shot_uuid}" and not main_setting_data[key]: # hackish sol, will fix later + st.session_state[f"structure_control_image_{shot_uuid}"] = None + + st.rerun() + elif data_type == ShotMetaData.DYNAMICRAFTER_DATA.value: + st.session_state[f"type_of_animation_{shot.uuid}"] = 1 + main_setting_data = shot_meta_data.get("main_setting_data", {}) + for key in main_setting_data: st.session_state[key] = main_setting_data[key] - st.rerun() + st.rerun() else: for idx, _ in enumerate(shot.timing_list): # fix: check how the image list is being stored here for k, v in DEFAULT_SHOT_MOTION_VALUES.items(): @@ -363,7 +381,20 @@ def format_frame_prompts_with_buffer(frame_numbers, individual_prompts, buffer): return output_strength, output_speeds, cumulative_distances, context_length, context_stride, context_overlap, multipled_base_end_percent, multipled_base_adapter_strength, formatted_individual_prompts, formatted_individual_negative_prompts,motions_during_frames -def update_session_state_with_animation_details(shot_uuid, img_list: List[InternalFileObject], strength_of_frames, distances_to_next_frames, speeds_of_transitions, freedoms_between_frames, motions_during_frames, individual_prompts, individual_negative_prompts, lora_data, default_model): +def update_session_state_with_animation_details(shot_uuid, + img_list: List[InternalFileObject], + strength_of_frames, + distances_to_next_frames, + speeds_of_transitions, + freedoms_between_frames, + motions_during_frames, + individual_prompts, + individual_negative_prompts, + lora_data, + default_model, + structure_control_img_uuid = None, + strength_of_structure_control_img = None + ): data_repo = DataRepo() shot = data_repo.get_shot_from_uuid(shot_uuid) meta_data = shot.meta_data_dict @@ -398,7 +429,9 @@ def update_session_state_with_animation_details(shot_uuid, img_list: List[Intern main_setting_data[f"type_of_motion_context_index_{shot.uuid}"] = st.session_state["type_of_motion_context"] main_setting_data[f"positive_prompt_video_{shot.uuid}"] = st.session_state["overall_positive_prompt"] main_setting_data[f"negative_prompt_video_{shot.uuid}"] = st.session_state["overall_negative_prompt"] - # main_setting_data[f"amount_of_motion_{shot.uuid}"] = st.session_state["amount_of_motion"] + main_setting_data[f"amount_of_motion_{shot.uuid}"] = st.session_state["amount_of_motion_overall"] + main_setting_data[f"structure_control_image_uuid_{shot.uuid}"] = structure_control_img_uuid + main_setting_data[f"saved_strength_of_structure_control_image_{shot.uuid}"] = strength_of_structure_control_img checkpoints_dir = "ComfyUI/models/checkpoints" all_files = os.listdir(checkpoints_dir) @@ -411,8 +444,7 @@ def update_session_state_with_animation_details(shot_uuid, img_list: List[Intern else: main_setting_data[f'ckpt_{shot.uuid}'] = default_model - meta_data.update( - { + update_data = { ShotMetaData.MOTION_DATA.value : json.dumps( { "timing_data": timing_data, @@ -420,7 +452,32 @@ def update_session_state_with_animation_details(shot_uuid, img_list: List[Intern } ) } - ) + meta_data.update(update_data) + data_repo.update_shot(**{"uuid": shot_uuid, "meta_data": json.dumps(meta_data)}) + return update_data + +# saving dynamic crafter generation details +def update_session_state_with_dc_details( + shot_uuid, + img_list, + video_desc +): + data_repo = DataRepo() + shot = data_repo.get_shot_from_uuid(shot_uuid) + meta_data = shot.meta_data_dict + main_setting_data = {} + for idx, img in enumerate(img_list): + main_setting_data[f'img{idx+1}_uuid_{shot_uuid}'] = img.uuid + + main_setting_data[f'video_desc_{shot_uuid}'] = video_desc + update_data = { + ShotMetaData.DYNAMICRAFTER_DATA.value : json.dumps( + { + "main_setting_data": main_setting_data + } + ) + } + meta_data.update(update_data) data_repo.update_shot(**{"uuid": shot_uuid, "meta_data": json.dumps(meta_data)}) - return meta_data + return update_data \ No newline at end of file diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index 681c31fd..1215a4c7 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -42,18 +42,8 @@ def create_single_interpolated_clip(shot_uuid, quality, settings={}, variant_cou elif quality == 'preview': interpolation_steps = 3 - img_list = [t.primary_image.location for t in timing_list] settings.update(interpolation_steps=interpolation_steps) settings.update(file_uuid_list=[t.primary_image.uuid for t in timing_list]) - - # converting PIL imgs to InternalFileObject - from ui_components.methods.common_methods import save_new_image - for key in settings.keys(): - if key.startswith("pil_img_") and settings[key]: - image = save_new_image(settings[key], shot.project.uuid) - del settings[key] - new_key = key.replace("pil_img_", "") + "_uuid" - settings[new_key] = image.uuid # res is an array of tuples (video_bytes, log) res = VideoInterpolator.create_interpolated_clip( diff --git a/ui_components/widgets/sm_animation_style_element.py b/ui_components/widgets/sm_animation_style_element.py index 94242b7d..8f9dd4aa 100644 --- a/ui_components/widgets/sm_animation_style_element.py +++ b/ui_components/widgets/sm_animation_style_element.py @@ -154,7 +154,7 @@ def update_motion_for_all_frames(shot_uuid, timing_list): if f"type_of_motion_context_index_{shot_uuid}" in st.session_state and isinstance(st.session_state[f"type_of_motion_context_index_{shot_uuid}"], str): st.session_state[f"type_of_motion_context_index_{shot_uuid}"] = ["Low", "Standard", "High"].index(st.session_state[f"type_of_motion_context_index_{shot_uuid}"]) type_of_motion_context = st.radio("Type of motion context:", options=["Low", "Standard", "High"], key="type_of_motion_context", horizontal=True, index=st.session_state[f"type_of_motion_context_index_{shot.uuid}"], help="This is how much the motion will be informed by the previous and next frames. 'High' can make it smoother but increase artifacts - while 'Low' make the motion less smooth but removes artifacts. Naturally, we recommend Standard.") - st.session_state[f"amount_of_motion_{shot_uuid}"] = st.slider("Amount of motion:", min_value=0.5, max_value=1.5, step=0.01,value=1.3, key="amount_of_motion_overall", on_change=lambda: update_motion_for_all_frames(shot.uuid, img_list), help="You can also tweak this on an individual frame level in the advanced settings above.") + amount_of_motion = st.slider("Amount of motion:", min_value=0.5, max_value=1.5, step=0.01, value=st.session_state[f"amount_of_motion_{shot_uuid}"], key="amount_of_motion_overall", on_change=lambda: update_motion_for_all_frames(shot.uuid, img_list), help="You can also tweak this on an individual frame level in the advanced settings above.") i1, i2, i3 = st.columns([1, 0.5, 1.5]) with i1: @@ -166,18 +166,27 @@ def update_motion_for_all_frames(shot_uuid, timing_list): control_motion_with_image = st_memory.toggle("Control motion with an image", help="This will allow you to upload images to control the motion of the video.",key=f"control_motion_with_image_{shot_uuid}") if control_motion_with_image: - uploaded_image = st.file_uploader("Upload images to control motion", type=["png", "jpg", "jpeg"], accept_multiple_files=False) - if st.button("Add image", key="add_images"): - if uploaded_image: - project_settings = data_repo.get_project_setting(shot.project.uuid) - width, height = project_settings.width, project_settings.height - # Convert the uploaded image file to PIL Image - uploaded_image_pil = Image.open(uploaded_image) - uploaded_image_pil = uploaded_image_pil.resize((width, height)) - st.session_state[f"structure_control_image_{shot.uuid}"] = uploaded_image_pil - st.rerun() - else: - st.warning("No images uploaded") + project_settings = data_repo.get_project_setting(shot.project.uuid) + width, height = project_settings.width, project_settings.height + if f"structure_control_image_uuid_{shot_uuid}" in st.session_state and st.session_state[f"structure_control_image_uuid_{shot_uuid}"]: + uploaded_image = data_repo.get_file_from_uuid(st.session_state[f"structure_control_image_uuid_{shot_uuid}"]) + uploaded_image_pil = Image.open(uploaded_image.location) + uploaded_image_pil = uploaded_image_pil.resize((width, height)) + st.session_state[f"structure_control_image_{shot.uuid}"] = uploaded_image_pil + else: + st.session_state[f"structure_control_image_uuid_{shot_uuid}"] = None + st.session_state[f"saved_strength_of_structure_control_image_{shot_uuid}"] = None + uploaded_image = st.file_uploader("Upload images to control motion", type=["png", "jpg", "jpeg"], accept_multiple_files=False) + + if st.button("Add image", key="add_images"): + if uploaded_image: + # Convert the uploaded image file to PIL Image + uploaded_image_pil = Image.open(uploaded_image) if not isinstance(uploaded_image, Image.Image) else uploaded_image + uploaded_image_pil = uploaded_image_pil.resize((width, height)) + st.session_state[f"structure_control_image_{shot.uuid}"] = uploaded_image_pil + st.rerun() + else: + st.warning("No images uploaded") else: st.session_state[f"structure_control_image_{shot_uuid}"] = None @@ -185,13 +194,22 @@ def update_motion_for_all_frames(shot_uuid, timing_list): if f"structure_control_image_{shot_uuid}" in st.session_state and st.session_state[f"structure_control_image_{shot_uuid}"]: st.info("Control image:") st.image(st.session_state[f"structure_control_image_{shot_uuid}"]) - st.session_state[f"strength_of_structure_control_image_{shot_uuid}"] = st.slider("Strength of control image:", min_value=0.0, max_value=1.0, step=0.01, key="strength_of_structure_control_image", value=0.5, help="This is how much the control image will influence the motion of the video.") + st.session_state[f"strength_of_structure_control_image_{shot_uuid}"] = st.slider( + "Strength of control image:", + min_value=0.0, + max_value=1.0, + step=0.01, + key="strength_of_structure_control_image", + value=st.session_state[f"saved_strength_of_structure_control_image_{shot_uuid}"], + help="This is how much the control image will influence the motion of the video." + ) if st.button("Remove image", key="remove_images"): st.session_state[f"structure_control_image_{shot_uuid}"] = None + st.session_state[f"structure_control_image_uuid_{shot_uuid}"] = None st.success("Image removed") st.rerun() - return strength_of_adherence, overall_positive_prompt, overall_negative_prompt, type_of_motion_context + return strength_of_adherence, overall_positive_prompt, overall_negative_prompt, type_of_motion_context, amount_of_motion def select_motion_lora_element(shot_uuid, model_files): data_repo = DataRepo() @@ -399,7 +417,8 @@ def select_sd_model_element(shot_uuid, default_model): else: model_files = [file for file in all_files if file.endswith('.safetensors') or file.endswith('.ckpt')] - model_files = [file for file in model_files if "xl" not in file] + ignored_model_list = ["dynamicrafter_512_interp_v1.ckpt"] + model_files = [file for file in model_files if "xl" not in file and file not in ignored_model_list] sd_model_dict = { "Anything V3 FP16 Pruned": { diff --git a/ui_components/widgets/variant_comparison_grid.py b/ui_components/widgets/variant_comparison_grid.py index 3ffc9f70..45d30625 100644 --- a/ui_components/widgets/variant_comparison_grid.py +++ b/ui_components/widgets/variant_comparison_grid.py @@ -6,7 +6,7 @@ import os from PIL import Image from shared.constants import AIModelCategory, InferenceParamType, InternalFileTag -from ui_components.constants import CreativeProcessType +from ui_components.constants import CreativeProcessType, ShotMetaData from ui_components.methods.animation_style_methods import get_generation_settings_from_log, load_shot_settings from ui_components.methods.common_methods import promote_image_variant, promote_video_variant from ui_components.methods.file_methods import create_duplicate_file @@ -205,28 +205,31 @@ def variant_inference_detail_element(variant: InternalFileObject, stage, shot_uu if open_data: with st.expander("Settings", expanded=False): - shot_meta_data = get_generation_settings_from_log(variant.inference_log.uuid) + shot_meta_data, data_type = get_generation_settings_from_log(variant.inference_log.uuid) if shot_meta_data and shot_meta_data.get("main_setting_data", None): st.markdown("##### Main settings ---") - for k, v in shot_meta_data.get("main_setting_data", {}).items(): - # Bold the title - title = f"**{k.split(str(shot.uuid))[0][:-1]}:**" - - # Check if the key starts with 'lora_data' - if k.startswith('lora_data'): - if isinstance(v, list) and len(v) > 0: # Check if v is a list and has more than 0 items - # Handle lora_data differently to format each item in the list - lora_items = [f"- {item.get('filename', 'No filename')} - {item.get('lora_strength', 'No strength')} strength" for item in v] - lora_data_formatted = "\n".join(lora_items) - st.markdown(f"{title} \n{lora_data_formatted}", unsafe_allow_html=True) - # If there are no items in the list, do not display anything for lora_data - else: - # For other keys, display as before but with the title in bold and using a colon - if v: # Check if v is not empty or None - st.markdown(f"{title} {v}", unsafe_allow_html=True) + if data_type == ShotMetaData.MOTION_DATA.value: + for k, v in shot_meta_data.get("main_setting_data", {}).items(): + # Bold the title + title = f"**{k.split(str(shot.uuid))[0][:-1]}:**" + + # Check if the key starts with 'lora_data' + if k.startswith('lora_data'): + if isinstance(v, list) and len(v) > 0: # Check if v is a list and has more than 0 items + # Handle lora_data differently to format each item in the list + lora_items = [f"- {item.get('filename', 'No filename')} - {item.get('lora_strength', 'No strength')} strength" for item in v] + lora_data_formatted = "\n".join(lora_items) + st.markdown(f"{title} \n{lora_data_formatted}", unsafe_allow_html=True) + # If there are no items in the list, do not display anything for lora_data else: - # Optionally handle empty or None values differently here - pass + # For other keys, display as before but with the title in bold and using a colon + if v: # Check if v is not empty or None + st.markdown(f"{title} {v}", unsafe_allow_html=True) + else: + # Optionally handle empty or None values differently here + pass + elif data_type == ShotMetaData.DYNAMICRAFTER_DATA.value: + st.markdown(shot_meta_data.get("main_setting_data", {}).get(f"video_desc_{shot_uuid}", "")) st.markdown("##### Frame settings ---") st.write("To see the settings for each frame, click on the 'Boot up settings' button above and they'll load below.") diff --git a/utils/constants.py b/utils/constants.py index df2a5464..dfa75d21 100644 --- a/utils/constants.py +++ b/utils/constants.py @@ -16,6 +16,9 @@ class ImageStage(ExtendedEnum): MAIN_VARIANT = 'Main Variant' NONE = 'None' +class AnimateShotMethod(ExtendedEnum): # remove this and have a common nomenclature throughout + BATCH_CREATIVE_INTERPOLATION = "Batch Creative Interpolation" + DYNAMICRAFTER_INTERPOLATION = "2-Image Realistic Interpolation (beta)" # single template for passing query params class MLQueryObject: diff --git a/utils/media_processor/interpolator.py b/utils/media_processor/interpolator.py index dc91060b..511da85e 100644 --- a/utils/media_processor/interpolator.py +++ b/utils/media_processor/interpolator.py @@ -90,6 +90,7 @@ def video_through_frame_interpolation(settings, variant_count, queue_inference=F "multipled_base_adapter_strength": settings["multipled_base_adapter_strength"], "individual_prompts": settings["individual_prompts"], "individual_negative_prompts": settings["individual_negative_prompts"], + "amount_of_motion": settings["amount_of_motion"], "max_frames": settings["max_frames"], "lora_data": settings["lora_data"], "shot_data": settings["shot_data"], @@ -136,7 +137,8 @@ def video_through_direct_morphing(settings, variant_count, queue_inference=False sm_data = { "width": settings['width'], "height": settings['height'], - "prompt": settings["prompt"] + "prompt": settings["prompt"], + "shot_data": settings["shot_data"] } for idx, img_uuid in enumerate(settings['file_uuid_list']): From 7525bc95816bab5d476d60b8e2d9e25a394a51e9 Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Fri, 22 Mar 2024 18:45:05 +0000 Subject: [PATCH 38/43] ad lcm added --- .../components/video_rendering_page.py | 19 ++++++- .../methods/animation_style_methods.py | 8 ++- utils/media_processor/interpolator.py | 3 +- utils/ml_processor/comfy_data_transform.py | 51 ++++++++++++++++++- 4 files changed, 76 insertions(+), 5 deletions(-) diff --git a/ui_components/components/video_rendering_page.py b/ui_components/components/video_rendering_page.py index 5e05f84f..dd7f61c2 100644 --- a/ui_components/components/video_rendering_page.py +++ b/ui_components/components/video_rendering_page.py @@ -119,6 +119,20 @@ def sm_video_rendering_page(shot_uuid, img_list: List[InternalFileObject]): st.markdown("***") st.markdown("##### Generation Settings") + if f"type_of_generation_index_{shot.uuid}" not in st.session_state or \ + not isinstance(st.session_state[f"type_of_generation_index_{shot.uuid}"], int): + st.session_state[f"type_of_generation_index_{shot.uuid}"] = 0 + + generation_types = ["Fast", "Detailed"] + type_of_generation = st.radio( + "Type of generation:", + options=generation_types, + key="creative_interpolation_type", + horizontal=True, + index=st.session_state[f"type_of_generation_index_{shot.uuid}"], + help="" + ) + animate_col_1, _, _ = st.columns([3, 1, 1]) with animate_col_1: variant_count = st.number_input("How many variants?", min_value=1, max_value=5, value=1, step=1, key="variant_count") @@ -151,9 +165,12 @@ def sm_video_rendering_page(shot_uuid, img_list: List[InternalFileObject]): lora_data, default_model, image.uuid if image else None, - settings["strength_of_structure_control_image"] + settings["strength_of_structure_control_image"], + generation_types.index(st.session_state['creative_interpolation_type']) ) settings.update(shot_data=shot_data) + settings.update(type_of_generation=type_of_generation) + vid_quality = "full" st.success("Generating clip - see status in the Generation Log in the sidebar. Press 'Refresh log' to update.") diff --git a/ui_components/methods/animation_style_methods.py b/ui_components/methods/animation_style_methods.py index 059591bf..f939f2e1 100644 --- a/ui_components/methods/animation_style_methods.py +++ b/ui_components/methods/animation_style_methods.py @@ -64,6 +64,10 @@ def load_shot_settings(shot_uuid, log_uuid=None): st.session_state[key] = main_setting_data[key] if key == f"structure_control_image_uuid_{shot_uuid}" and not main_setting_data[key]: # hackish sol, will fix later st.session_state[f"structure_control_image_{shot_uuid}"] = None + elif key == f"type_of_generation_index_{shot.uuid}": + if not isinstance(st.session_state[key], int): + st.session_state[key] = 0 + st.session_state["creative_interpolation_type"] = ["Fast", "Detailed"][st.session_state[key]] st.rerun() elif data_type == ShotMetaData.DYNAMICRAFTER_DATA.value: @@ -393,7 +397,8 @@ def update_session_state_with_animation_details(shot_uuid, lora_data, default_model, structure_control_img_uuid = None, - strength_of_structure_control_img = None + strength_of_structure_control_img = None, + type_of_generation_index = 0 ): data_repo = DataRepo() shot = data_repo.get_shot_from_uuid(shot_uuid) @@ -432,6 +437,7 @@ def update_session_state_with_animation_details(shot_uuid, main_setting_data[f"amount_of_motion_{shot.uuid}"] = st.session_state["amount_of_motion_overall"] main_setting_data[f"structure_control_image_uuid_{shot.uuid}"] = structure_control_img_uuid main_setting_data[f"saved_strength_of_structure_control_image_{shot.uuid}"] = strength_of_structure_control_img + main_setting_data[f"type_of_generation_index_{shot.uuid}"] = type_of_generation_index checkpoints_dir = "ComfyUI/models/checkpoints" all_files = os.listdir(checkpoints_dir) diff --git a/utils/media_processor/interpolator.py b/utils/media_processor/interpolator.py index 511da85e..df2173cc 100644 --- a/utils/media_processor/interpolator.py +++ b/utils/media_processor/interpolator.py @@ -94,7 +94,8 @@ def video_through_frame_interpolation(settings, variant_count, queue_inference=F "max_frames": settings["max_frames"], "lora_data": settings["lora_data"], "shot_data": settings["shot_data"], - "strength_of_structure_control_image": settings["strength_of_structure_control_image"] + "strength_of_structure_control_image": settings["strength_of_structure_control_image"], + "use_ad_lcm": True if settings["type_of_generation"] == "Fast" else False } # adding the input images diff --git a/utils/ml_processor/comfy_data_transform.py b/utils/ml_processor/comfy_data_transform.py index b38c0c72..059a2e12 100644 --- a/utils/ml_processor/comfy_data_transform.py +++ b/utils/ml_processor/comfy_data_transform.py @@ -401,7 +401,44 @@ def update_json_with_loras(json_data, loras): json_data["545"]["inputs"]["motion_lora"][0] = "536" return json_data - + + def convert_to_animate_lcm(json_data): + json_data.update( + { + "565": { + "inputs": { + "lora_name": "AnimateLCM_sd15_t2v_lora.safetensors", + "strength_model": 0.8, + "strength_clip": 1, + "model": [ + "470", + 0 + ], + "clip": [ + "470", + 1 + ] + }, + "class_type": "LoraLoader", + "_meta": { + "title": "Load LoRA" + } + } + } + ) + + json_data["558"]["inputs"]["model"] = ["565", 0] + json_data["541"]["inputs"]["clip"] = ["565", 1] + json_data["547"]["inputs"]["beta_schedule"] = "lcm avg(sqrt_linear,linear)" + + json_data["207"]["inputs"]["sample_name"] = "lcm" + json_data["207"]["inputs"]["steps"] = 8 + json_data["207"]["inputs"]["cfg"] = 2.2 + json_data["207"]["inputs"]["sample_name"] = "sgm_uniform" + + return json_data + + extra_models_list = [] sm_data = query.data.get('data', {}) workflow, output_node_ids = ComfyDataTransform.get_workflow_json(ComfyWorkflow.STEERABLE_MOTION) workflow = update_json_with_loras(workflow, sm_data.get('lora_data')) @@ -451,8 +488,18 @@ def update_json_with_loras(json_data, loras): if sm_data.get('file_structure_control_img_uuid'): workflow = update_structure_control_image(workflow, sm_data.get('file_structure_control_img_uuid'), sm_data.get('strength_of_structure_control_image')) + if sm_data.get("use_ad_lcm", False): + workflow = convert_to_animate_lcm(workflow) + extra_models_list = [ + { + "filename": "AnimateLCM_sd15_t2v_lora.safetensors", + "url": "https://huggingface.co/wangfuyun/AnimateLCM/resolve/main/AnimateLCM_sd15_t2v_lora.safetensors?download=true", + "dest": "./ComfyUI/models/loras/" + } + ] + ignore_list = sm_data.get("lora_data", []) - return json.dumps(workflow), output_node_ids, [], ignore_list + return json.dumps(workflow), output_node_ids, extra_models_list, ignore_list @staticmethod def transform_dynamicrafter_workflow(query: MLQueryObject): From 4b97bd8ca07187151101509bf5704cedf49865bc Mon Sep 17 00:00:00 2001 From: peteromallet Date: Sat, 23 Mar 2024 03:37:41 +0100 Subject: [PATCH 39/43] Smoke 'em up --- ui_components/components/animate_shot_page.py | 32 +- .../components/video_rendering_page.py | 13 +- .../widgets/animation_style_element.py | 1237 ----------------- ui_components/widgets/display_element.py | 17 +- ui_components/widgets/image_zoom_widgets.py | 4 +- ui_components/widgets/shot_view.py | 59 +- .../widgets/sm_animation_style_element.py | 55 +- 7 files changed, 100 insertions(+), 1317 deletions(-) delete mode 100644 ui_components/widgets/animation_style_element.py diff --git a/ui_components/components/animate_shot_page.py b/ui_components/components/animate_shot_page.py index 8bc7964f..92a4ad92 100644 --- a/ui_components/components/animate_shot_page.py +++ b/ui_components/components/animate_shot_page.py @@ -40,27 +40,19 @@ def video_rendering_page(shot_uuid, selected_variant): file_uuid_list = [] if f"type_of_animation_{shot.uuid}" not in st.session_state: st.session_state[f"type_of_animation_{shot.uuid}"] = 0 - if st.session_state[f"type_of_animation_{shot.uuid}"] == 0: # AnimateShotMethod.BATCH_CREATIVE_INTERPOLATION.value + # AnimateShotMethod.BATCH_CREATIVE_INTERPOLATION.value # loading images from a particular video variant - if selected_variant: - log = data_repo.get_inference_log_from_uuid(selected_variant) - shot_data = json.loads(log.input_params) - file_uuid_list = shot_data.get('origin_data', json.dumps({})).get('settings', {}).get('file_uuid_list', []) - # picking current images if no variant is selected - else: - for timing in shot.timing_list: - if timing.primary_image and timing.primary_image.location: - file_uuid_list.append(timing.primary_image.uuid) - else: # AnimateShotMethod.DYNAMICRAFTER_INTERPOLATION.value - # hackish sol, will fix later - for idx in range(2): - if f'img{idx+1}_uuid_{shot_uuid}' in st.session_state and st.session_state[f'img{idx+1}_uuid_{shot_uuid}']: - file_uuid_list.append(st.session_state[f'img{idx+1}_uuid_{shot_uuid}']) - - if not (f'video_desc_{shot_uuid}' in st.session_state and st.session_state[f'video_desc_{shot_uuid}']): - st.session_state[f'video_desc_{shot_uuid}'] = "" + if selected_variant: + log = data_repo.get_inference_log_from_uuid(selected_variant) + shot_data = json.loads(log.input_params) + file_uuid_list = shot_data.get('origin_data', json.dumps({})).get('settings', {}).get('file_uuid_list', []) + # picking current images if no variant is selected + else: + for timing in shot.timing_list: + if timing.primary_image and timing.primary_image.location: + file_uuid_list.append(timing.primary_image.uuid) - img_list = data_repo.get_all_file_list(uuid__in=file_uuid_list, file_type=InternalFileType.IMAGE.value)[0] + img_list = data_repo.get_all_file_list(uuid__in=file_uuid_list, file_type=InternalFileType.IMAGE.value)[0] headline1, _, headline3 = st.columns([1, 1, 1]) with headline1: @@ -74,7 +66,7 @@ def video_rendering_page(shot_uuid, selected_variant): if type_of_animation == AnimateShotMethod.BATCH_CREATIVE_INTERPOLATION.value: sm_video_rendering_page(shot_uuid, img_list) - else: + else: two_img_realistic_interpolation_page(shot_uuid, img_list) st.markdown("***") \ No newline at end of file diff --git a/ui_components/components/video_rendering_page.py b/ui_components/components/video_rendering_page.py index dd7f61c2..365c01c8 100644 --- a/ui_components/components/video_rendering_page.py +++ b/ui_components/components/video_rendering_page.py @@ -123,19 +123,19 @@ def sm_video_rendering_page(shot_uuid, img_list: List[InternalFileObject]): not isinstance(st.session_state[f"type_of_generation_index_{shot.uuid}"], int): st.session_state[f"type_of_generation_index_{shot.uuid}"] = 0 - generation_types = ["Fast", "Detailed"] + generation_types = ["Detailed", "Fast"] type_of_generation = st.radio( "Type of generation:", options=generation_types, key="creative_interpolation_type", horizontal=True, index=st.session_state[f"type_of_generation_index_{shot.uuid}"], - help="" + help="Detailed generation will around twice as long but provide more detailed results." ) animate_col_1, _, _ = st.columns([3, 1, 1]) with animate_col_1: - variant_count = st.number_input("How many variants?", min_value=1, max_value=5, value=1, step=1, key="variant_count") + variant_count = 1 if "generate_vid_generate_inference" in st.session_state and st.session_state["generate_vid_generate_inference"]: # last keyframe position * 16 @@ -240,7 +240,7 @@ def sm_video_rendering_page(shot_uuid, img_list: List[InternalFileObject]): ) -def two_img_realistic_interpolation_page(shot_uuid, img_list): +def two_img_realistic_interpolation_page(shot_uuid, img_list: List[InternalFileObject]): if not (img_list and len(img_list) >= 2): st.error("You need two images for this interpolation") return @@ -249,6 +249,7 @@ def two_img_realistic_interpolation_page(shot_uuid, img_list): shot = data_repo.get_shot_from_uuid(shot_uuid) settings = {} + st.markdown("***") col1, col2, col3 = st.columns([1, 1, 1]) with col1: st.image(img_list[0].location, use_column_width=True) @@ -257,6 +258,8 @@ def two_img_realistic_interpolation_page(shot_uuid, img_list): st.image(img_list[1].location, use_column_width=True) with col2: + if f'video_desc_{shot_uuid}' not in st.session_state: + st.session_state[f'video_desc_{shot_uuid}'] = "" description_of_motion = st.text_area("Describe the motion you want between the frames:", \ key=f"description_of_motion_{shot.uuid}", value=st.session_state[f'video_desc_{shot_uuid}']) st.info("This is very important and will likely require some iteration.") @@ -300,8 +303,10 @@ def two_img_realistic_interpolation_page(shot_uuid, img_list): st.rerun() # Buttons for adding to queue or backlog, assuming these are still relevant + st.markdown("***") btn1, btn2, btn3 = st.columns([1, 1, 1]) backlog_no_update = {f'{shot_uuid}_backlog_enabled': False} + with btn1: st.button("Add to queue", key="generate_animation_clip", disabled=False, help="Generate the interpolation clip based on the two images and described motion.", on_click=lambda: toggle_generate_inference(position, **backlog_no_update), type="primary", use_container_width=True) diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py deleted file mode 100644 index 4f3fbcf4..00000000 --- a/ui_components/widgets/animation_style_element.py +++ /dev/null @@ -1,1237 +0,0 @@ -import json -import tarfile -import time -import zipfile -import streamlit as st -from typing import List -from shared.constants import AnimationStyleType, AnimationToolType, InferenceParamType -from ui_components.constants import DEFAULT_SHOT_MOTION_VALUES, DefaultProjectSettingParams, ShotMetaData -from ui_components.methods.animation_style_methods import load_shot_settings -from ui_components.methods.common_methods import save_new_image -from ui_components.methods.video_methods import create_single_interpolated_clip -from utils.data_repo.data_repo import DataRepo -from utils.ml_processor.motion_module import AnimateDiffCheckpoint -from ui_components.models import InternalFrameTimingObject, InternalShotObject -from ui_components.methods.file_methods import save_or_host_file -from utils import st_memory -import numpy as np -import matplotlib.pyplot as plt -import os -import requests -from PIL import Image -# import re -import uuid -import re - -default_model = "Deliberate_v2.safetensors" - -def animation_style_element(shot_uuid): - disable_generate = False - help = "" - backlog_help = "This will add the new video generation in the backlog" - motion_modules = AnimateDiffCheckpoint.get_name_list() - variant_count = 1 - current_animation_style = AnimationStyleType.CREATIVE_INTERPOLATION.value # setting a default value - data_repo = DataRepo() - - shot: InternalShotObject = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) - st.session_state['project_uuid'] = str(shot.project.uuid) - timing_list: List[InternalFrameTimingObject] = shot.timing_list - - settings = { - 'animation_tool': AnimationToolType.ANIMATEDIFF.value, - } - - headline1, headline2, headline3 = st.columns([1, 1, 1]) - with headline1: - - st.markdown("### 🎥 Generate animations") - st.write("##### _\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_") - with headline3: - with st.expander("", expanded=False): - type_of_animation = st_memory.radio("What type of animation would you like to generate?", options=["Batch Creative Interpolation", "2-Image Realistic Interpolation (beta)"],horizontal=True, help="**Batch Creative Interpolaton** lets you input multple images and control the motion and style of each frame - resulting in a fluid, surreal and highly-controllable motion. \n\n **2-Image Realistic Interpolation** is a simpler way to generate animations - it generates a video by interpolating between two images, and is best for realistic motion.",key=f"type_of_animation_{shot.uuid}") - - if type_of_animation == "Batch Creative Interpolation": - - with st.container(): - advanced1, advanced2, advanced3 = st.columns([1.0,1.5, 1.0]) - - with advanced1: - st.markdown("##### Individual frame settings") - - items_per_row = 3 - strength_of_frames = [] - distances_to_next_frames = [] - speeds_of_transitions = [] - freedoms_between_frames = [] - individual_prompts = [] - individual_negative_prompts = [] - motions_during_frames = [] - shot_meta_data = {} - - if len(timing_list) <= 1: - st.warning("You need at least two frames to generate a video.") - st.stop() - - open_advanced_settings = st_memory.toggle("Open all advanced settings", key="advanced_settings", value=False) - - # setting default values to main shot settings - if f'lora_data_{shot.uuid}' not in st.session_state: - st.session_state[f'lora_data_{shot.uuid}'] = [] - - if f'strength_of_adherence_value_{shot.uuid}' not in st.session_state: - st.session_state[f'strength_of_adherence_value_{shot.uuid}'] = 0.10 - - if f'type_of_motion_context_index_{shot.uuid}' not in st.session_state: - st.session_state[f'type_of_motion_context_index_{shot.uuid}'] = 1 - - if f'positive_prompt_video_{shot.uuid}' not in st.session_state: - st.session_state[f"positive_prompt_video_{shot.uuid}"] = "" - - if f'negative_prompt_video_{shot.uuid}' not in st.session_state: - st.session_state[f"negative_prompt_video_{shot.uuid}"] = "" - - if f'ckpt_{shot.uuid}' not in st.session_state: - st.session_state[f'ckpt_{shot.uuid}'] = "" - - if f"amount_of_motion_{shot.uuid}" not in st.session_state: - st.session_state[f"amount_of_motion_{shot.uuid}"] = 1.3 - - # loading settings of the last shot (if this shot is being loaded for the first time) - if f'strength_of_frame_{shot_uuid}_0' not in st.session_state: - load_shot_settings(shot.uuid) - - # ------------- Timing Frame and their settings ------------------- - for i in range(0, len(timing_list) , items_per_row): - with st.container(): - grid = st.columns([2 if j%2==0 else 1 for j in range(2*items_per_row)]) # Adjust the column widths - for j in range(items_per_row): - - idx = i + j - if idx < len(timing_list): - with grid[2*j]: # Adjust the index for image column - timing = timing_list[idx] - if timing.primary_image and timing.primary_image.location: - - st.info(f"**Frame {idx + 1}**") - - st.image(timing.primary_image.location, use_column_width=True) - - # settings control - with st.expander("Advanced settings:", expanded=open_advanced_settings): - # checking for newly added frames - if f'individual_prompt_{shot.uuid}_{idx}' not in st.session_state: - for k, v in DEFAULT_SHOT_MOTION_VALUES.items(): - st.session_state[f"{k}_{shot_uuid}_{idx}"] = v - - individual_prompt = st.text_input("What to include:", key=f"individual_prompt_widget_{idx}_{timing.uuid}", value=st.session_state[f'individual_prompt_{shot.uuid}_{idx}'], help="Use this sparingly, as it can have a large impact on the video and cause weird distortions.") - individual_prompts.append(individual_prompt) - individual_negative_prompt = st.text_input("What to avoid:", key=f"negative_prompt_widget_{idx}_{timing.uuid}", value=st.session_state[f'individual_negative_prompt_{shot.uuid}_{idx}'],help="Use this sparingly, as it can have a large impact on the video and cause weird distortions.") - individual_negative_prompts.append(individual_negative_prompt) - strength1, strength2 = st.columns([1, 1]) - with strength1: - strength_of_frame = st.slider("Strength of current frame:", min_value=0.25, max_value=1.0, step=0.01, key=f"strength_of_frame_widget_{shot.uuid}_{idx}", value=st.session_state[f'strength_of_frame_{shot.uuid}_{idx}']) - strength_of_frames.append(strength_of_frame) - with strength2: - motion_during_frame = st.slider("Motion during frame:", min_value=0.5, max_value=1.5, step=0.01, key=f"motion_during_frame_widget_{idx}_{timing.uuid}", value=st.session_state[f'motion_during_frame_{shot.uuid}_{idx}']) - motions_during_frames.append(motion_during_frame) - else: - st.warning("No primary image present.") - - # distance, speed and freedom settings (also aggregates them into arrays) - with grid[2*j+1]: # Add the new column after the image column - if idx < len(timing_list) - 1: - - # if st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}'] is a int, make it a float - if isinstance(st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}'], int): - st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}'] = float(st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}']) - distance_to_next_frame = st.slider("Seconds to next frame:", min_value=0.25, max_value=6.00, step=0.25, key=f"distance_to_next_frame_widget_{idx}_{timing.uuid}", value=st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}']) - distances_to_next_frames.append(distance_to_next_frame/2) - speed_of_transition = st.slider("Speed of transition:", min_value=0.45, max_value=0.7, step=0.01, key=f"speed_of_transition_widget_{idx}_{timing.uuid}", value=st.session_state[f'speed_of_transition_{shot.uuid}_{idx}']) - speeds_of_transitions.append(speed_of_transition) - freedom_between_frames = st.slider("Freedom between frames:", min_value=0.15, max_value=0.85, step=0.01, key=f"freedom_between_frames_widget_{idx}_{timing.uuid}", value=st.session_state[f'freedom_between_frames_{shot.uuid}_{idx}']) - freedoms_between_frames.append(freedom_between_frames) - - if (i < len(timing_list) - 1) or (len(timing_list) % items_per_row != 0): - st.markdown("***") - - - st.markdown("##### Style model") - tab1, tab2 = st.tabs(["Choose Model","Download Models"]) - - checkpoints_dir = "ComfyUI/models/checkpoints" - all_files = os.listdir(checkpoints_dir) - if len(all_files) == 0: - model_files = [default_model] - - else: - # Filter files to only include those with .safetensors and .ckpt extensions - model_files = [file for file in all_files if file.endswith('.safetensors') or file.endswith('.ckpt')] - # drop all files that contain xl - model_files = [file for file in model_files if "xl" not in file] - - # Mapping of model names to their download URLs - sd_model_dict = { - "Anything V3 FP16 Pruned": { - "url": "https://weights.replicate.delivery/default/comfy-ui/checkpoints/anything-v3-fp16-pruned.safetensors.tar", - "filename": "anything-v3-fp16-pruned.safetensors.tar" - }, - "Deliberate V2": { - "url": "https://weights.replicate.delivery/default/comfy-ui/checkpoints/Deliberate_v2.safetensors.tar", - "filename": "Deliberate_v2.safetensors.tar" - }, - "Dreamshaper 8": { - "url": "https://weights.replicate.delivery/default/comfy-ui/checkpoints/dreamshaper_8.safetensors.tar", - "filename": "dreamshaper_8.safetensors.tar" - }, - "epicrealism_pureEvolutionV5": { - "url": "https://civitai.com/api/download/models/134065", - "filename": "epicrealism_pureEvolutionv5.safetensors" - }, - "majicmixRealistic_v6": { - "url": "https://civitai.com/api/download/models/94640", - "filename": "majicmixRealistic_v6.safetensors" - }, - } - - cur_model = st.session_state[f'ckpt_{shot.uuid}'] - current_model_index = model_files.index(cur_model) if (cur_model and cur_model in model_files) else 0 - # st.session_state['sd_model_video'] = current_model_index - # ---------------- SELECT CKPT -------------- - with tab1: - model1, model2 = st.columns([1, 1]) - with model1: - sd_model = "" - def update_model(): - global sd_model - sd_model = checkpoints_dir + "/" + st.session_state['sd_model_video'] - - if model_files and len(model_files): - sd_model = st.selectbox( - label="Which model would you like to use?", - options=model_files, - key="sd_model_video", - index=current_model_index, - on_change=update_model - ) - else: - st.write("") - st.info("Default model Deliberate V2 would be selected") - with model2: - if len(all_files) == 0: - st.write("") - st.info("This is the default model - to download more, go to the Download Models tab.") - else: - st.write("") - st.info("To download more models, go to the Download Models tab.") - - # if it's in sd_model-list, just pass the name. If not, stick checkpoints_dir in front of it - # sd_model = checkpoints_dir + "/" + sd_model - - # ---------------- ADD CKPT --------------- - with tab2: - where_to_get_model = st.radio("Where would you like to get the model from?", options=["Our list", "Upload a model", "From a URL"], key="where_to_get_model") - - if where_to_get_model == "Our list": - # Use the keys (model names) for the selection box - model_name_selected = st.selectbox("Which model would you like to download?", options=list(sd_model_dict.keys()), key="model_to_download") - - if st.button("Download Model", key="download_model"): - with st.spinner("Downloading model..."): - download_bar = st.progress(0, text="") - save_directory = "ComfyUI/models/checkpoints" - os.makedirs(save_directory, exist_ok=True) # Create the directory if it doesn't exist - - # Retrieve the URL using the selected model name - model_url = sd_model_dict[model_name_selected]["url"] - - # Download the model and save it to the directory - response = requests.get(model_url, stream=True) - zip_filename = sd_model_dict[model_name_selected]["filename"] - filepath = os.path.join(save_directory, zip_filename) - print("filepath: ", filepath) - if response.status_code == 200: - total_size = int(response.headers.get('content-length', 0)) - - with open(filepath, 'wb') as f: - received_bytes = 0 - - for data in response.iter_content(chunk_size=8192): - f.write(data) - received_bytes += len(data) - progress = received_bytes / total_size - download_bar.progress(progress) - - st.success(f"Downloaded {model_name_selected} to {save_directory}") - download_bar.empty() - - if model_url.endswith(".zip") or model_url.endswith(".tar"): - st.success("Extracting the zip file. Please wait...") - new_filepath = filepath.replace(zip_filename, "") - if model_url.endswith(".zip"): - with zipfile.ZipFile(f"{filepath}", "r") as zip_ref: - zip_ref.extractall(new_filepath) - else: - with tarfile.open(f"{filepath}", "r") as tar_ref: - tar_ref.extractall(new_filepath) - - os.remove(filepath) - st.rerun() - - elif where_to_get_model == "Upload a model": - st.info("It's simpler to just drop this into the ComfyUI/models/checkpoints directory.") - - elif where_to_get_model == "From a URL": - text1, text2 = st.columns([1, 1]) - with text1: - - text_input = st.text_input("Enter the URL of the model", key="text_input") - with text2: - st.info("Make sure to get the download url of the model. \n\n For example, from Civit, this should look like this: https://civitai.com/api/download/models/179446. \n\n While from Hugging Face, it should look like this: https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/1000_jeep_driving_r32_temporal_unet.safetensors") - if st.button("Download Model", key="download_model"): - with st.spinner("Downloading model..."): - save_directory = "ComfyUI/models/checkpoints" - os.makedirs(save_directory, exist_ok=True) - response = requests.get(text_input) - if response.status_code == 200: - with open(os.path.join(save_directory, text_input.split("/")[-1]), 'wb') as f: - f.write(response.content) - st.success(f"Downloaded model to {save_directory}") - else: - st.error("Failed to download model") - - # if it's in local DEVELOPMENT ENVIRONMENT - st.markdown("***") - st.markdown("##### Motion guidance") - tab1, tab2, tab3 = st.tabs(["Apply LoRAs","Download LoRAs","Train LoRAs"]) - - lora_data = [] - lora_file_dest = "ComfyUI/models/animatediff_motion_lora" - - # ---------------- ADD LORA ----------------- - with tab1: - # Initialize a single list to hold dictionaries for LoRA data - # Check if the directory exists and list files, or use a default list - if os.path.exists(lora_file_dest): - files = os.listdir(lora_file_dest) - # remove files that start with a dot - files = [file for file in files if not file.startswith(".")] - else: - files = [] - - # Iterate through each current LoRA in session state - if len(files) == 0: - st.error("No LoRAs found in the directory - go to Explore to download some, or drop them into ComfyUI/models/animatediff_motion_lora") - if st.button("Check again", key="check_again"): - st.rerun() - else: - # cleaning empty lora vals - for idx, lora in enumerate(st.session_state[f"lora_data_{shot.uuid}"]): - if not lora: - st.session_state[f"lora_data_{shot.uuid}"].pop(idx) - - for idx, lora in enumerate(st.session_state[f"lora_data_{shot.uuid}"]): - if not lora: - continue - h1, h2, h3, h4 = st.columns([1, 1, 1, 0.5]) - with h1: - file_idx = files.index(lora["filename"]) - which_lora = st.selectbox("Which LoRA would you like to use?", options=files, key=f"which_lora_{idx}", index=file_idx) - - with h2: - strength_of_lora = st.slider("How strong would you like the LoRA to be?", min_value=0.0, max_value=1.0, value=lora["lora_strength"], step=0.01, key=f"strength_of_lora_{idx}") - lora_data.append({"filename": which_lora, "lora_strength": strength_of_lora, "filepath": lora_file_dest + "/" + which_lora}) - - with h3: - when_to_apply_lora = st.slider("When to apply the LoRA?", min_value=0, max_value=100, value=(0,100), step=1, key=f"when_to_apply_lora_{idx}",disabled=True,help="This feature is not yet available.") - - with h4: - st.write("") - if st.button("Remove", key=f"remove_lora_{idx}"): - st.session_state[f"lora_data_{shot.uuid}"].pop(idx) - st.rerun() - - if len(st.session_state[f"lora_data_{shot.uuid}"]) == 0: - text = "Add a LoRA" - else: - text = "Add another LoRA" - if st.button(text, key="add_motion_guidance"): - if files and len(files): - st.session_state[f"lora_data_{shot.uuid}"].append({ - "filename": files[0], - "lora_strength": 0.5, - "filepath": lora_file_dest + "/" + files[0] - }) - st.rerun() - # ---------------- DOWNLOAD LORA --------------- - with tab2: - text1, text2 = st.columns([1, 1]) - with text1: - where_to_download_from = st.radio("Where would you like to get the LoRA from?", options=["Our list", "From a URL","Upload a LoRA"], key="where_to_download_from", horizontal=True) - - if where_to_download_from == "Our list": - with text1: - file_links = [ - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/1000_jeep_driving_r32_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/250_tony_stark_r64_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/250_train_r128_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/300_car_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_car_desert_48_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_car_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_jeep_driving_r32_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_man_running_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_rotation_temporal_unet.safetensors", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/750_jeep_driving_r32_temporal_unet.safetensors", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/300_zooming_in_temporal_unet.safetensors", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_cat_walking_temporal_unet.safetensors", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_playing_banjo_temporal_unet.safetensors", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_woman_dancing_temporal_unet.safetensors", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_zooming_out_temporal_unet.safetensors" - ] - - selected_lora_optn = st.selectbox("Which LoRA would you like to download?", options=[a.split("/")[-1] for a in file_links], key="selected_lora") - if st.button("Download LoRA", key="download_lora"): - with st.spinner("Downloading LoRA..."): - save_directory = "ComfyUI/models/animatediff_motion_lora" - os.makedirs(save_directory, exist_ok=True) # Create the directory if it doesn't exist - - # Extract the filename from the URL - selected_lora = next((ele for idx, ele in enumerate(file_links) if selected_lora_optn in ele), None) - filename = selected_lora.split("/")[-1] - save_path = os.path.join(save_directory, filename) - - # Download the file - download_lora_bar = st.progress(0, text="") - response = requests.get(selected_lora, stream=True) - if response.status_code == 200: - total_size = int(response.headers.get('content-length', 0)) - with open(save_path, 'wb') as f: - received_bytes = 0 - - for data in response.iter_content(chunk_size=8192): - f.write(data) - received_bytes += len(data) - progress = received_bytes / total_size - download_lora_bar.progress(progress) - - st.success(f"Downloaded LoRA to {save_path}") - download_lora_bar.empty() - st.rerun() - else: - st.error("Failed to download LoRA") - - elif where_to_download_from == "From a URL": - - with text1: - text_input = st.text_input("Enter the URL of the LoRA", key="text_input_lora") - with text2: - st.write("") - st.write("") - st.write("") - st.info("Make sure to get the download url of the LoRA. \n\n For example, from Hugging Face, it should look like this: https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/1000_jeep_driving_r32_temporal_unet.safetensors") - with text1: - if st.button("Download LoRA", key="download_lora"): - with st.spinner("Downloading LoRA..."): - save_directory = "ComfyUI/models/animatediff_motion_lora" - os.makedirs(save_directory, exist_ok=True) - response = requests.get(text_input) - if response.status_code == 200: - with open(os.path.join(save_directory, text_input.split("/")[-1]), 'wb') as f: - f.write(response.content) - st.success(f"Downloaded LoRA to {save_directory}") - else: - st.error("Failed to download LoRA") - elif where_to_download_from == "Upload a LoRA": - st.info("It's simpler to just drop this into the ComfyUI/models/animatediff_motion_lora directory.") - # ---------------- TRAIN LORA -------------- - with tab3: - b1, b2 = st.columns([1, 1]) - with b1: - st.error("This feature is not yet available.") - name_this_lora = st.text_input("Name this LoRA", key="name_this_lora") - describe_the_motion = st.text_area("Describe the motion", key="describe_the_motion") - training_video = st.file_uploader("Upload a video to train a new LoRA", type=["mp4"]) - - if st.button("Train LoRA", key="train_lora", use_container_width=True): - st.write("Training LoRA") - - st.markdown("***") - st.markdown("##### Overall style settings") - - e1, e2, e3 = st.columns([1, 1,1]) - with e1: - strength_of_adherence = st.slider("How much would you like to force adherence to the input images?", min_value=0.0, max_value=1.0, step=0.01, key="strength_of_adherence", value=st.session_state[f"strength_of_adherence_value_{shot.uuid}"], help="Higher values may cause flickering and sudden changes in the video. Lower values may cause the video to be less influenced by the input images but can lead to smoother motion and better colours.") - - f1, f2, f3 = st.columns([1, 1, 1]) - with f1: - overall_positive_prompt = "" - def update_prompt(): - global overall_positive_prompt - overall_positive_prompt = st.session_state[f"positive_prompt_video_{shot.uuid}"] - - overall_positive_prompt = st.text_area( - "What would you like to see in the videos?", - key="overall_positive_prompt", - value=st.session_state[f"positive_prompt_video_{shot.uuid}"], - on_change=update_prompt - ) - with f2: - overall_negative_prompt = st.text_area( - "What would you like to avoid in the videos?", - key="overall_negative_prompt", - value=st.session_state[f"negative_prompt_video_{shot.uuid}"] - ) - - with f3: - st.write("") - st.write("") - st.info("Use these sparingly, as they can have a large impact on the video. You can also edit them for individual frames in the advanced settings above.") - - st.markdown("***") - st.markdown("##### Overall motion settings") - h1, h2, h3 = st.columns([1, 0.5, 1.0]) - with h1: - # will fix this later - if f"type_of_motion_context_index_{shot.uuid}" in st.session_state and isinstance(st.session_state[f"type_of_motion_context_index_{shot.uuid}"], str): - st.session_state[f"type_of_motion_context_index_{shot.uuid}"] = ["Low", "Standard", "High"].index(st.session_state[f"type_of_motion_context_index_{shot.uuid}"]) - type_of_motion_context = st.radio("Type of motion context:", options=["Low", "Standard", "High"], key="type_of_motion_context", horizontal=True, index=st.session_state[f"type_of_motion_context_index_{shot.uuid}"], help="This is how much the motion will be informed by the previous and next frames. 'High' can make it smoother but increase artifacts - while 'Low' make the motion less smooth but removes artifacts. Naturally, we recommend Standard.") - - st.session_state[f"amount_of_motion_{shot.uuid}"] = st.slider("Amount of motion:", min_value=0.5, max_value=1.5, step=0.01,value=1.3, key="amount_of_motion_overall", on_change=lambda: update_motion_for_all_frames(shot.uuid, timing_list), help="You can also tweak this on an individual frame level in the advanced settings above.") - - - i1, i2, i3 = st.columns([1, 0.5, 1.5]) - - with i1: - if f'structure_control_image_{shot.uuid}' not in st.session_state: - st.session_state[f"structure_control_image_{shot.uuid}"] = None - - if f"strength_of_structure_control_image_{shot.uuid}" not in st.session_state: - st.session_state[f"strength_of_structure_control_image_{shot.uuid}"] = None - control_motion_with_image = st_memory.toggle("Control motion with an image", help="This will allow you to upload images to control the motion of the video.",key=f"control_motion_with_image_{shot.uuid}") - - if control_motion_with_image: - uploaded_image = st.file_uploader("Upload images to control motion", type=["png", "jpg", "jpeg"], accept_multiple_files=False) - if st.button("Add image", key="add_images"): - if uploaded_image: - - - project_settings = data_repo.get_project_setting(shot.project.uuid) - - width, height = project_settings.width, project_settings.height - # Convert the uploaded image file to PIL Image - uploaded_image_pil = Image.open(uploaded_image) - uploaded_image_pil = uploaded_image_pil.resize((width, height)) - image = save_new_image(uploaded_image_pil, shot.project.uuid) - # image_location = image.local_path - - # Update session state with the URL of the uploaded image - st.success("Image uploaded") - st.session_state[f"structure_control_image_{shot.uuid}"] = image.uuid - st.rerun() - - - else: - st.warning("No images uploaded") - else: - st.session_state[f"structure_control_image_{shot.uuid}"] = None - with i2: - if st.session_state[f"structure_control_image_{shot.uuid}"]: - st.info("Control image:") - file = data_repo.get_file_from_uuid(st.session_state[f"structure_control_image_{shot.uuid}"]) - image = file.local_path - st.image(image) - - st.session_state[f"strength_of_structure_control_image_{shot.uuid}"] = st.slider("Strength of control image:", min_value=0.0, max_value=1.0, step=0.01, key="strength_of_structure_control_image", value=0.5, help="This is how much the control image will influence the motion of the video.") - if st.button("Remove image", key="remove_images"): - st.session_state[f"structure_control_image_{shot.uuid}"] = None - st.success("Image removed") - st.rerun() - - - - type_of_frame_distribution = "dynamic" - type_of_key_frame_influence = "dynamic" - type_of_strength_distribution = "dynamic" - linear_frame_distribution_value = 16 - linear_key_frame_influence_value = 1.0 - linear_cn_strength_value = 1.0 - relative_ipadapter_strength = 1.0 - relative_cn_strength = 0.0 - project_settings = data_repo.get_project_setting(shot.project.uuid) - width = project_settings.width - height = project_settings.height - img_dimension = f"{width}x{height}" - motion_scale = 1.3 - interpolation_style = 'ease-in-out' - buffer = 4 - amount_of_motion = 1.3 - - - (dynamic_strength_values, dynamic_key_frame_influence_values, dynamic_frame_distribution_values, - context_length, context_stride, context_overlap, multipled_base_end_percent, multipled_base_adapter_strength, - prompt_travel, negative_prompt_travel, motion_scales) = transform_data(strength_of_frames, - freedoms_between_frames, speeds_of_transitions, distances_to_next_frames, type_of_motion_context, - strength_of_adherence,individual_prompts, individual_negative_prompts, buffer, motions_during_frames) - - - - settings.update( - ckpt=sd_model, - width=width, - height=height, - buffer=4, - motion_scale=motion_scale, - motion_scales=motion_scales, - image_dimension=img_dimension, - output_format="video/h264-mp4", - prompt=overall_positive_prompt, - negative_prompt=overall_negative_prompt, - interpolation_type=interpolation_style, - stmfnet_multiplier=2, - relative_ipadapter_strength=relative_ipadapter_strength, - relative_cn_strength=relative_cn_strength, - type_of_strength_distribution=type_of_strength_distribution, - linear_strength_value=str(linear_cn_strength_value), - dynamic_strength_values=str(dynamic_strength_values), - linear_frame_distribution_value=linear_frame_distribution_value, - dynamic_frame_distribution_values=dynamic_frame_distribution_values, - type_of_frame_distribution=type_of_frame_distribution, - type_of_key_frame_influence=type_of_key_frame_influence, - linear_key_frame_influence_value=float(linear_key_frame_influence_value), - dynamic_key_frame_influence_values=dynamic_key_frame_influence_values, - normalise_speed=True, - ipadapter_noise=0.3, - animation_style=AnimationStyleType.CREATIVE_INTERPOLATION.value, - context_length=context_length, - context_stride=context_stride, - context_overlap=context_overlap, - multipled_base_end_percent=multipled_base_end_percent, - multipled_base_adapter_strength=multipled_base_adapter_strength, - individual_prompts=prompt_travel, - individual_negative_prompts=negative_prompt_travel, - animation_stype=AnimationStyleType.CREATIVE_INTERPOLATION.value, - max_frames=str(dynamic_frame_distribution_values[-1]), - lora_data=lora_data, - shot_data=shot_meta_data, - structure_control_image=st.session_state[f"structure_control_image_{shot.uuid}"], - strength_of_structure_control_image=st.session_state[f"strength_of_structure_control_image_{shot.uuid}"], - - - ) - - position = "generate_vid" - st.markdown("***") - st.markdown("##### Generation Settings") - - animate_col_1, animate_col_2, _ = st.columns([3, 1, 1]) - with animate_col_1: - variant_count = st.number_input("How many variants?", min_value=1, max_value=5, value=1, step=1, key="variant_count") - - if "generate_vid_generate_inference" in st.session_state and st.session_state["generate_vid_generate_inference"]: - - st.success("Generating clip - see status in the Generation Log in the sidebar. Press 'Refresh log' to update.") - # last keyframe position * 16 - duration = float(dynamic_frame_distribution_values[-1] / 16) - data_repo.update_shot(uuid=shot.uuid, duration=duration) - shot_data = update_session_state_with_animation_details( - shot.uuid, - timing_list, - strength_of_frames, - distances_to_next_frames, - speeds_of_transitions, - freedoms_between_frames, - motions_during_frames, - individual_prompts, - individual_negative_prompts, - lora_data - ) - settings.update(shot_data=shot_data) - vid_quality = "full" # TODO: add this if video_resolution == "Full Resolution" else "preview" - - - positive_prompt = "" - append_to_prompt = "" # TODO: add this - for idx, timing in enumerate(timing_list): - if timing.primary_image and timing.primary_image.location: - b = timing.primary_image.inference_params - prompt = b.get("prompt", "") if b else "" - prompt += append_to_prompt - frame_prompt = f"{idx * linear_frame_distribution_value}_" + prompt - positive_prompt += ":" + frame_prompt if positive_prompt else frame_prompt - else: - st.error("Please generate primary images") - time.sleep(0.7) - st.rerun() - - if f'{shot_uuid}_backlog_enabled' not in st.session_state: - st.session_state[f'{shot_uuid}_backlog_enabled'] = False - - create_single_interpolated_clip( - shot_uuid, - vid_quality, - settings, - variant_count, - st.session_state[f'{shot_uuid}_backlog_enabled'] - ) - - backlog_update = {f'{shot_uuid}_backlog_enabled': False} - toggle_generate_inference(position, **backlog_update) - st.rerun() - - btn1, btn2, btn3 = st.columns([1, 1, 1]) - backlog_no_update = {f'{shot_uuid}_backlog_enabled': False} - with btn1: - st.button("Add to queue", key="generate_animation_clip", disabled=disable_generate, help=help, on_click=lambda: toggle_generate_inference(position, **backlog_no_update),type="primary",use_container_width=True) - - backlog_update = {f'{shot_uuid}_backlog_enabled': True} - with btn2: - st.button("Add to backlog", key="generate_animation_clip_backlog", disabled=disable_generate, help=backlog_help, on_click=lambda: toggle_generate_inference(position, **backlog_update),type="secondary") - - - with st.sidebar: - with st.expander("⚙️ Animation settings", expanded=True): - if st_memory.toggle("Open", key="open_motion_data"): - - st.markdown("### Visualisation of current motion") - keyframe_positions = get_keyframe_positions(type_of_frame_distribution, dynamic_frame_distribution_values, timing_list, linear_frame_distribution_value) - keyframe_positions = [int(kf * 16) for kf in keyframe_positions] - last_key_frame_position = (keyframe_positions[-1]) - strength_values = extract_strength_values(type_of_strength_distribution, dynamic_strength_values, keyframe_positions, linear_cn_strength_value) - key_frame_influence_values = extract_influence_values(type_of_key_frame_influence, dynamic_key_frame_influence_values, keyframe_positions, linear_key_frame_influence_value) - weights_list, frame_numbers_list = calculate_weights(keyframe_positions, strength_values, 4, key_frame_influence_values,last_key_frame_position) - # s - - plot_weights(weights_list, frame_numbers_list) - - st.markdown("***") - - bulk1, bulk2 = st.columns([1, 1]) - with bulk1: - st.markdown("### Bulk edit frame settings") - with bulk2: - if st.button("Reset to Default", use_container_width=True, key="reset_to_default"): - for idx, timing in enumerate(timing_list): - for k, v in DEFAULT_SHOT_MOTION_VALUES.items(): - st.session_state[f'{k}_{shot.uuid}_{idx}'] = v - - st.success("All frames have been reset to default values.") - st.rerun() - - # New feature: Selecting a range to edit - range_to_edit = st.slider("Select the range of frames you would like to edit:", - min_value=1, max_value=len(timing_list), - value=(1, len(timing_list)), step=1, key="range_to_edit") - edit1, edit2 = st.columns([1, 1]) - with edit1: - editable_entity = st.selectbox("What would you like to edit?", options=["Seconds to next frames", "Speed of transitions", "Freedom between frames","Strength of frames","Motion during frames"], key="editable_entity") - with edit2: - if editable_entity == "Seconds to next frames": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.25, max_value=6.00, step=0.25, value=1.0, key="entity_new_val_seconds") - elif editable_entity == "Strength of frames": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.25, max_value=1.0, step=0.01, value=0.5, key="entity_new_val_strength") - elif editable_entity == "Speed of transitions": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.45, max_value=0.7, step=0.01, value=0.6, key="entity_new_val_speed") - elif editable_entity == "Freedom between frames": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.15, max_value=0.85, step=0.01, value=0.5, key="entity_new_val_freedom") - elif editable_entity == "Motion during frames": - entity_new_val = st.slider("What would you like to change it to?", min_value=0.5, max_value=1.5, step=0.01, value=1.3, key="entity_new_val_motion") - - if st.button("Bulk edit", key="bulk_edit", use_container_width=True): - start_idx, end_idx = range_to_edit - for idx in range(start_idx - 1, end_idx): # Adjusting index to be 0-based - if editable_entity == "Strength of frames": - st.session_state[f'strength_of_frame_{shot.uuid}_{idx}'] = entity_new_val - elif editable_entity == "Seconds to next frames": - st.session_state[f'distance_to_next_frame_{shot.uuid}_{idx}'] = entity_new_val - elif editable_entity == "Speed of transitions": - st.session_state[f'speed_of_transition_{shot.uuid}_{idx}'] = entity_new_val - elif editable_entity == "Freedom between frames": - st.session_state[f'freedom_between_frames_{shot.uuid}_{idx}'] = entity_new_val - elif editable_entity == "Motion during frames": - st.session_state[f'motion_during_frame_{shot.uuid}_{idx}'] = entity_new_val - st.rerun() - - - st.markdown("***") - st.markdown("### Save current settings") - if st.button("Save current settings", key="save_current_settings",use_container_width=True,help="Settings will also be saved when you generate the animation."): - update_session_state_with_animation_details(shot.uuid, timing_list, strength_of_frames, distances_to_next_frames, speeds_of_transitions, freedoms_between_frames, motions_during_frames, individual_prompts, individual_negative_prompts) - st.success("Settings saved successfully.") - time.sleep(0.7) - st.rerun() - - elif type_of_animation == "2-Image Realistic Interpolation (beta)": - - col1, col2, col3 = st.columns([1, 1, 1]) - for i in range(0, 2, 2): # Iterate two items at a time - if i < len(timing_list): - timing_first = timing_list[i] - if timing_first.primary_image and timing_first.primary_image.location: - with col1: - st.image(timing_first.primary_image.location, use_column_width=True) - - if i + 1 < len(timing_list): - timing_second = timing_list[i + 1] - if timing_second.primary_image and timing_second.primary_image.location: - with col3: - st.image(timing_second.primary_image.location, use_column_width=True) - - with col2: - description_of_motion = st_memory.text_area("Describe the motion you want between the frames:", key=f"description_of_motion_{shot.uuid}") - st.info("This is very important and will likely require some iteration.") - - variant_count = 1 # Assuming a default value for variant_count, adjust as necessary - vid_quality = "full" # Assuming full quality, adjust as necessary based on your requirements - position = "generate_vid" - - if "generate_vid_generate_inference" in st.session_state and st.session_state["generate_vid_generate_inference"]: - - st.success("Generating clip - see status in the Generation Log in the sidebar. Press 'Refresh log' to update.") - # Assuming the logic to generate the clip based on two images, the described motion, and fixed duration - duration = 4 # Fixed duration of 4 seconds - data_repo.update_shot(uuid=shot.uuid, duration=duration) - - project_settings = data_repo.get_project_setting(shot.project.uuid) - - settings.update( - duration= duration, - animation_style=AnimationStyleType.DIRECT_MORPHING.value, - output_format="video/h264-mp4", - width=project_settings.width, - height=project_settings.height, - prompt=description_of_motion - ) - - create_single_interpolated_clip( - shot_uuid, - vid_quality, - settings, - variant_count, - st.session_state[f'{shot_uuid}_backlog_enabled'] - ) - - backlog_update = {f'{shot_uuid}_backlog_enabled': False} - toggle_generate_inference(position, **backlog_update) - # settings.update(shot_data=shot_data) # Save compiled shot_data into settings - st.rerun() - - - # Placeholder for the logic to generate the clip and update session state as needed - # This should include calling the function that handles the interpolation process with the updated settings - - # Buttons for adding to queue or backlog, assuming these are still relevant - btn1, btn2, btn3 = st.columns([1, 1, 1]) - backlog_no_update = {f'{shot_uuid}_backlog_enabled': False} - with btn1: - st.button("Add to queue", key="generate_animation_clip", disabled=False, help="Generate the interpolation clip based on the two images and described motion.", on_click=lambda: toggle_generate_inference(position, **backlog_no_update), type="primary", use_container_width=True) - - backlog_update = {f'{shot_uuid}_backlog_enabled': True} - with btn2: - st.button("Add to backlog", key="generate_animation_clip_backlog", disabled=False, help="Add the 2-Image Realistic Interpolation to the backlog.", on_click=lambda: toggle_generate_inference(position, **backlog_update), type="secondary") - - -# --------------------- METHODS ----------------------- -def toggle_generate_inference(position, **kwargs): - for k,v in kwargs.items(): - st.session_state[k] = v - if position + '_generate_inference' not in st.session_state: - st.session_state[position + '_generate_inference'] = True - else: - st.session_state[position + '_generate_inference'] = not st.session_state[position + '_generate_inference'] - -def update_session_state_with_animation_details(shot_uuid, timing_list, strength_of_frames, distances_to_next_frames, speeds_of_transitions, freedoms_between_frames, motions_during_frames, individual_prompts, individual_negative_prompts, lora_data): - data_repo = DataRepo() - shot = data_repo.get_shot_from_uuid(shot_uuid) - meta_data = shot.meta_data_dict - timing_data = [] - for idx, timing in enumerate(timing_list): - if idx < len(timing_list): - st.session_state[f'strength_of_frame_{shot_uuid}_{idx}'] = strength_of_frames[idx] - st.session_state[f'individual_prompt_{shot_uuid}_{idx}'] = individual_prompts[idx] - st.session_state[f'individual_negative_prompt_{shot_uuid}_{idx}'] = individual_negative_prompts[idx] - st.session_state[f'motion_during_frame_{shot_uuid}_{idx}'] = motions_during_frames[idx] - if idx < len(timing_list) - 1: - st.session_state[f'distance_to_next_frame_{shot_uuid}_{idx}'] = distances_to_next_frames[idx] * 2 - st.session_state[f'speed_of_transition_{shot_uuid}_{idx}'] = speeds_of_transitions[idx] - st.session_state[f'freedom_between_frames_{shot_uuid}_{idx}'] = freedoms_between_frames[idx] - - # adding into the meta-data - state_data = { - "strength_of_frame" : strength_of_frames[idx], - "individual_prompt" : individual_prompts[idx], - "individual_negative_prompt" : individual_negative_prompts[idx], - "motion_during_frame" : motions_during_frames[idx], - "distance_to_next_frame" : distances_to_next_frames[idx] * 2 if idx < len(timing_list) - 1 else DEFAULT_SHOT_MOTION_VALUES["distance_to_next_frame"], - "speed_of_transition" : speeds_of_transitions[idx] if idx < len(timing_list) - 1 else DEFAULT_SHOT_MOTION_VALUES["speed_of_transition"], - "freedom_between_frames" : freedoms_between_frames[idx] if idx < len(timing_list) - 1 else DEFAULT_SHOT_MOTION_VALUES["freedom_between_frames"], - } - - timing_data.append(state_data) - - main_setting_data = {} - main_setting_data[f'lora_data_{shot.uuid}'] = lora_data - main_setting_data[f"strength_of_adherence_value_{shot.uuid}"] = st.session_state["strength_of_adherence"] - main_setting_data[f"type_of_motion_context_index_{shot.uuid}"] = st.session_state["type_of_motion_context"] - main_setting_data[f"positive_prompt_video_{shot.uuid}"] = st.session_state["overall_positive_prompt"] - main_setting_data[f"negative_prompt_video_{shot.uuid}"] = st.session_state["overall_negative_prompt"] - # main_setting_data[f"amount_of_motion_{shot.uuid}"] = st.session_state["amount_of_motion"] - - checkpoints_dir = "ComfyUI/models/checkpoints" - all_files = os.listdir(checkpoints_dir) - model_files = [file for file in all_files if file.endswith('.safetensors') or file.endswith('.ckpt')] - model_files = [file for file in model_files if "xl" not in file] - - if 'sd_model_video' in st.session_state and len(model_files): - idx = model_files.index(st.session_state["sd_model_video"]) if st.session_state["sd_model_video"] in model_files else 0 - main_setting_data[f'ckpt_{shot.uuid}'] = model_files[idx] - else: - main_setting_data[f'ckpt_{shot.uuid}'] = default_model - - meta_data.update( - { - ShotMetaData.MOTION_DATA.value : json.dumps( - { - "timing_data": timing_data, - "main_setting_data": main_setting_data - } - ) - } - ) - - data_repo.update_shot(**{"uuid": shot_uuid, "meta_data": json.dumps(meta_data)}) - return meta_data - - -def update_motion_for_all_frames(shot_uuid, timing_list): - amount_of_motion = st.session_state.get("amount_of_motion_overall", 1.0) # Default to 1.0 if not set - for idx, _ in enumerate(timing_list): - st.session_state[f'motion_during_frame_{shot_uuid}_{idx}'] = amount_of_motion - - -def format_frame_prompts_with_buffer(frame_numbers, individual_prompts, buffer): - adjusted_frame_numbers = [frame + buffer for frame in frame_numbers] - - # Preprocess prompts to remove any '/' or '"' from the values - processed_prompts = [prompt.replace("/", "").replace('"', '') for prompt in individual_prompts] - - # Format the adjusted frame numbers and processed prompts - formatted = ', '.join(f'"{int(frame)}": "{prompt}"' for frame, prompt in zip(adjusted_frame_numbers, processed_prompts)) - return formatted - -def extract_strength_values(type_of_key_frame_influence, dynamic_key_frame_influence_values, keyframe_positions, linear_key_frame_influence_value): - - if type_of_key_frame_influence == "dynamic": - # Process the dynamic_key_frame_influence_values depending on its format - if isinstance(dynamic_key_frame_influence_values, str): - dynamic_values = eval(dynamic_key_frame_influence_values) - else: - dynamic_values = dynamic_key_frame_influence_values - - # Iterate through the dynamic values and convert tuples with two values to three values - dynamic_values_corrected = [] - for value in dynamic_values: - if len(value) == 2: - value = (value[0], value[1], value[0]) - dynamic_values_corrected.append(value) - - return dynamic_values_corrected - else: - # Process for linear or other types - if len(linear_key_frame_influence_value) == 2: - linear_key_frame_influence_value = (linear_key_frame_influence_value[0], linear_key_frame_influence_value[1], linear_key_frame_influence_value[0]) - return [linear_key_frame_influence_value for _ in range(len(keyframe_positions) - 1)] - -def update_interpolation_settings(values=None, timing_list=None): - default_values = { - 'type_of_frame_distribution': 0, - 'frames_per_keyframe': 16, - 'type_of_key_frame_influence': 0, - 'length_of_key_frame_influence': 1.0, - 'type_of_cn_strength_distribution': 0, - 'linear_cn_strength_value': (0.0,0.7), - 'linear_frame_distribution_value': 16, - 'linear_key_frame_influence_value': 1.0, - 'interpolation_style': 0, - 'motion_scale': 1.0, - 'negative_prompt_video': 'bad image, worst quality', - 'ip_adapter_strength': 1.0, - 'ip_adapter_influence': 1.0, - 'soft_scaled_cn_weights_multiple_video': 0.85 - } - - for idx in range(0, len(timing_list)): - default_values[f'dynamic_frame_distribution_values_{idx}'] = (idx) * 16 - default_values[f'dynamic_key_frame_influence_values_{idx}'] = 1.0 - default_values[f'dynamic_strength_values_{idx}'] = (0.0,0.7) - - for key, default_value in default_values.items(): - st.session_state[key] = values.get(key, default_value) if values and values.get(key) is not None else default_value - # print(f"{key}: {st.session_state[key]}") - -def extract_influence_values(type_of_key_frame_influence, dynamic_key_frame_influence_values, keyframe_positions, linear_key_frame_influence_value): - # Check and convert linear_key_frame_influence_value if it's a float or string float - # if it's a string that starts with a parenthesis, convert it to a tuple - if isinstance(linear_key_frame_influence_value, str) and linear_key_frame_influence_value[0] == "(": - linear_key_frame_influence_value = eval(linear_key_frame_influence_value) - - - if not isinstance(linear_key_frame_influence_value, tuple): - if isinstance(linear_key_frame_influence_value, (float, str)): - try: - value = float(linear_key_frame_influence_value) - linear_key_frame_influence_value = (value, value) - except ValueError: - raise ValueError("linear_key_frame_influence_value must be a float or a string representing a float") - - number_of_outputs = len(keyframe_positions) - - if type_of_key_frame_influence == "dynamic": - # Convert list of individual float values into tuples - if all(isinstance(x, float) for x in dynamic_key_frame_influence_values): - dynamic_values = [(value, value) for value in dynamic_key_frame_influence_values] - elif isinstance(dynamic_key_frame_influence_values[0], str) and dynamic_key_frame_influence_values[0] == "(": - string_representation = ''.join(dynamic_key_frame_influence_values) - dynamic_values = eval(f'[{string_representation}]') - else: - dynamic_values = dynamic_key_frame_influence_values if isinstance(dynamic_key_frame_influence_values, list) else [dynamic_key_frame_influence_values] - return dynamic_values[:number_of_outputs] - else: - return [linear_key_frame_influence_value for _ in range(number_of_outputs)] - -def get_keyframe_positions(type_of_frame_distribution, dynamic_frame_distribution_values, images, linear_frame_distribution_value): - if type_of_frame_distribution == "dynamic": - # Check if the input is a string or a list - if isinstance(dynamic_frame_distribution_values, str): - # Sort the keyframe positions in numerical order - return sorted([int(kf.strip()) for kf in dynamic_frame_distribution_values.split(',')]) - elif isinstance(dynamic_frame_distribution_values, list): - return sorted(dynamic_frame_distribution_values) - else: - # Calculate the number of keyframes based on the total duration and linear_frames_per_keyframe - return [i * linear_frame_distribution_value for i in range(len(images))] - -def calculate_weights(keyframe_positions, strength_values, buffer, key_frame_influence_values,last_key_frame_position): - - def calculate_influence_frame_number(key_frame_position, next_key_frame_position, distance): - # Calculate the absolute distance between key frames - key_frame_distance = abs(next_key_frame_position - key_frame_position) - - # Apply the distance multiplier - extended_distance = key_frame_distance * distance - - # Determine the direction of influence based on the positions of the key frames - if key_frame_position < next_key_frame_position: - # Normal case: influence extends forward - influence_frame_number = key_frame_position + extended_distance - else: - # Reverse case: influence extends backward - influence_frame_number = key_frame_position - extended_distance - - # Return the result rounded to the nearest integer - return round(influence_frame_number) - - def find_curve(batch_index_from, batch_index_to, strength_from, strength_to, interpolation,revert_direction_at_midpoint, last_key_frame_position,i, number_of_items,buffer): - # Initialize variables based on the position of the keyframe - range_start = batch_index_from - range_end = batch_index_to - # if it's the first value, set influence range from 1.0 to 0.0 - if i == number_of_items - 1: - range_end = last_key_frame_position - - steps = range_end - range_start - diff = strength_to - strength_from - - # Calculate index for interpolation - index = np.linspace(0, 1, steps // 2 + 1) if revert_direction_at_midpoint else np.linspace(0, 1, steps) - - # Calculate weights based on interpolation type - if interpolation == "linear": - weights = np.linspace(strength_from, strength_to, len(index)) - elif interpolation == "ease-in": - weights = diff * np.power(index, 2) + strength_from - elif interpolation == "ease-out": - weights = diff * (1 - np.power(1 - index, 2)) + strength_from - elif interpolation == "ease-in-out": - weights = diff * ((1 - np.cos(index * np.pi)) / 2) + strength_from - - if revert_direction_at_midpoint: - weights = np.concatenate([weights, weights[::-1]]) - - # Generate frame numbers - frame_numbers = np.arange(range_start, range_start + len(weights)) - - # "Dropper" component: For keyframes with negative start, drop the weights - if range_start < 0 and i > 0: - drop_count = abs(range_start) - weights = weights[drop_count:] - frame_numbers = frame_numbers[drop_count:] - - # Dropper component: for keyframes a range_End is greater than last_key_frame_position, drop the weights - if range_end > last_key_frame_position and i < number_of_items - 1: - drop_count = range_end - last_key_frame_position - weights = weights[:-drop_count] - frame_numbers = frame_numbers[:-drop_count] - - return weights, frame_numbers - - weights_list = [] - frame_numbers_list = [] - - for i in range(len(keyframe_positions)): - keyframe_position = keyframe_positions[i] - interpolation = "ease-in-out" - # strength_from = strength_to = 1.0 - - if i == 0: # first image - # GET IMAGE AND KEYFRAME INFLUENCE VALUES - key_frame_influence_from, key_frame_influence_to = key_frame_influence_values[i] - start_strength, mid_strength, end_strength = strength_values[i] - keyframe_position = keyframe_positions[i] - next_key_frame_position = keyframe_positions[i+1] - batch_index_from = keyframe_position - batch_index_to_excl = calculate_influence_frame_number(keyframe_position, next_key_frame_position, key_frame_influence_to) - weights, frame_numbers = find_curve(batch_index_from, batch_index_to_excl, mid_strength, end_strength, interpolation, False, last_key_frame_position, i, len(keyframe_positions), buffer) - # interpolation = "ease-in" - - elif i == len(keyframe_positions) - 1: # last image - # GET IMAGE AND KEYFRAME INFLUENCE VALUES - key_frame_influence_from,key_frame_influence_to = key_frame_influence_values[i] - start_strength, mid_strength, end_strength = strength_values[i] - # strength_from, strength_to = cn_strength_values[i-1] - keyframe_position = keyframe_positions[i] - previous_key_frame_position = keyframe_positions[i-1] - batch_index_from = calculate_influence_frame_number(keyframe_position, previous_key_frame_position, key_frame_influence_from) - batch_index_to_excl = keyframe_position - weights, frame_numbers = find_curve(batch_index_from, batch_index_to_excl, start_strength, mid_strength, interpolation, False, last_key_frame_position, i, len(keyframe_positions), buffer) - # interpolation = "ease-out" - - else: # middle images - # GET IMAGE AND KEYFRAME INFLUENCE VALUES - key_frame_influence_from,key_frame_influence_to = key_frame_influence_values[i] - start_strength, mid_strength, end_strength = strength_values[i] - keyframe_position = keyframe_positions[i] - - # CALCULATE WEIGHTS FOR FIRST HALF - previous_key_frame_position = keyframe_positions[i-1] - batch_index_from = calculate_influence_frame_number(keyframe_position, previous_key_frame_position, key_frame_influence_from) - batch_index_to_excl = keyframe_position - first_half_weights, first_half_frame_numbers = find_curve(batch_index_from, batch_index_to_excl, start_strength, mid_strength, interpolation, False, last_key_frame_position, i, len(keyframe_positions), buffer) - - # CALCULATE WEIGHTS FOR SECOND HALF - next_key_frame_position = keyframe_positions[i+1] - batch_index_from = keyframe_position - batch_index_to_excl = calculate_influence_frame_number(keyframe_position, next_key_frame_position, key_frame_influence_to) - second_half_weights, second_half_frame_numbers = find_curve(batch_index_from, batch_index_to_excl, mid_strength, end_strength, interpolation, False, last_key_frame_position, i, len(keyframe_positions), buffer) - - # COMBINE FIRST AND SECOND HALF - weights = np.concatenate([first_half_weights, second_half_weights]) - frame_numbers = np.concatenate([first_half_frame_numbers, second_half_frame_numbers]) - - weights_list.append(weights) - frame_numbers_list.append(frame_numbers) - - return weights_list, frame_numbers_list - -def plot_weights(weights_list, frame_numbers_list): - plt.figure(figsize=(12, 6)) - for i, weights in enumerate(weights_list): - # Divide each frame number by 100 - frame_numbers = [frame_number / 100 for frame_number in frame_numbers_list[i]] - - plt.plot(frame_numbers, weights, label=f'Frame {i + 1}') - - # Plot settings - plt.xlabel('Seconds') # Updated to represent seconds - plt.ylabel('Weight') - plt.legend() - plt.ylim(0, 1.0) - plt.show() - st.set_option('deprecation.showPyplotGlobalUse', False) - st.pyplot() - -def transform_data(strength_of_frames, movements_between_frames, speeds_of_transitions, distances_to_next_frames, type_of_motion_context, strength_of_adherence, individual_prompts, individual_negative_prompts, buffer, motions_during_frames): - # FRAME SETTINGS - def adjust_and_invert_relative_value(middle_value, relative_value): - if relative_value is not None: - adjusted_value = middle_value * relative_value - return round(middle_value - adjusted_value, 2) - return None - - def invert_value(value): - return round(1.0 - value, 2) if value is not None else None - - # Creating output_strength with relative and inverted start and end values - output_strength = [] - for i, strength in enumerate(strength_of_frames): - start_value = None if i == 0 else movements_between_frames[i - 1] - end_value = None if i == len(strength_of_frames) - 1 else movements_between_frames[i] - - # Adjusting and inverting start and end values relative to the middle value - adjusted_start = adjust_and_invert_relative_value(strength, start_value) - adjusted_end = adjust_and_invert_relative_value(strength, end_value) - - output_strength.append((adjusted_start, strength, adjusted_end)) - - # Creating output_speeds with inverted values - output_speeds = [(None, None) for _ in range(len(speeds_of_transitions) + 1)] - for i in range(len(speeds_of_transitions)): - current_tuple = list(output_speeds[i]) - next_tuple = list(output_speeds[i + 1]) - - inverted_speed = invert_value(speeds_of_transitions[i]) - current_tuple[1] = inverted_speed * 2 - next_tuple[0] = inverted_speed * 2 - - output_speeds[i] = tuple(current_tuple) - output_speeds[i + 1] = tuple(next_tuple) - - # Creating cumulative_distances - cumulative_distances = [0] - for distance in distances_to_next_frames: - cumulative_distances.append(cumulative_distances[-1] + distance) - - cumulative_distances = [int(float(value) * 16) for value in cumulative_distances] - - # MOTION CONTEXT SETTINGS - if type_of_motion_context == "Low": - context_length = 16 - context_stride = 1 - context_overlap = 2 - - elif type_of_motion_context == "Standard": - context_length = 16 - context_stride = 2 - context_overlap = 4 - - elif type_of_motion_context == "High": - context_length = 16 - context_stride = 4 - context_overlap = 4 - - # SPARSE CTRL SETTINGS - multipled_base_end_percent = 0.05 * (strength_of_adherence * 10) - multipled_base_adapter_strength = 0.05 * (strength_of_adherence * 20) - - # FRAME PROMPTS FORMATTING - def format_frame_prompts_with_buffer(frame_numbers, individual_prompts, buffer): - adjusted_frame_numbers = [frame + buffer for frame in frame_numbers] - - # Preprocess prompts to remove any '/' or '"' from the values - processed_prompts = [prompt.replace("/", "").replace('"', '') for prompt in individual_prompts] - - # Format the adjusted frame numbers and processed prompts - formatted = ', '.join(f'"{int(frame)}": "{prompt}"' for frame, prompt in zip(adjusted_frame_numbers, processed_prompts)) - return formatted - - # Applying format_frame_prompts_with_buffer - formatted_individual_prompts = format_frame_prompts_with_buffer(cumulative_distances, individual_prompts, buffer) - formatted_individual_negative_prompts = format_frame_prompts_with_buffer(cumulative_distances, individual_negative_prompts, buffer) - - # MOTION STRENGTHS FORMATTING - adjusted_frame_numbers = [0] + [frame + buffer for frame in cumulative_distances[1:]] - - # Format the adjusted frame numbers and strengths - motions_during_frames = ', '.join(f'{int(frame)}:({strength})' for frame, strength in zip(adjusted_frame_numbers, motions_during_frames)) - - return output_strength, output_speeds, cumulative_distances, context_length, context_stride, context_overlap, multipled_base_end_percent, multipled_base_adapter_strength, formatted_individual_prompts, formatted_individual_negative_prompts,motions_during_frames - diff --git a/ui_components/widgets/display_element.py b/ui_components/widgets/display_element.py index 5fa7cb21..658d0a65 100644 --- a/ui_components/widgets/display_element.py +++ b/ui_components/widgets/display_element.py @@ -16,12 +16,15 @@ def individual_video_display_element(file: Union[InternalFileObject, str]): def display_motion_lora(motion_lora, lora_file_dict = {}): filename_video_dict = read_from_motion_lora_local_db() - if motion_lora and motion_lora in filename_video_dict: + if motion_lora and motion_lora in filename_video_dict and filename_video_dict[motion_lora]: st.image(filename_video_dict[motion_lora]) - else: + elif motion_lora in lora_file_dict: loras = [ele.split("/")[-1] for ele in lora_file_dict.keys()] - idx = loras.index(motion_lora) - if idx >= 0: - st.image(lora_file_dict[list(lora_file_dict.keys())[idx]]) - else: - st.warning("No preview video available") \ No newline at end of file + try: + idx = loras.index(motion_lora) + if lora_file_dict[list(lora_file_dict.keys())[idx]]: + st.image(lora_file_dict[list(lora_file_dict.keys())[idx]]) + except ValueError: + st.write("") + + diff --git a/ui_components/widgets/image_zoom_widgets.py b/ui_components/widgets/image_zoom_widgets.py index 063965bc..88bcd0a5 100644 --- a/ui_components/widgets/image_zoom_widgets.py +++ b/ui_components/widgets/image_zoom_widgets.py @@ -8,7 +8,7 @@ from utils.data_repo.data_repo import DataRepo -def zoom_inputs(position='in-frame', horizontal=False): +def zoom_inputs(position='in-frame', horizontal=False, shot_uuid=None): if horizontal: col1, col2 = st.columns(2) col3, col4 = st.columns(2) @@ -26,7 +26,7 @@ def zoom_inputs(position='in-frame', horizontal=False): col1.number_input( - "Zoom In/Out", min_value=10, max_value=1000, step=10, key=f"zoom_level_input", value=st.session_state['zoom_level_input_default']) + "Zoom In/Out", min_value=10, max_value=1000, step=1, key=f"zoom_level_input", value=st.session_state['zoom_level_input_default']) col2.number_input( "Rotate", min_value=-360, max_value=360, step=5, key="rotation_angle_input", value=st.session_state['rotation_angle_input_default']) diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index 66041e6d..4fda6a36 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -37,12 +37,13 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None, position="Timel timing_list: List[InternalFrameTimingObject] = shot.timing_list with column: - col1, col2, col3 = st.columns([1, 1, 1]) + col1, col2, col3 = st.columns([1.25, 0.75, 1]) with col1: - move_frame_mode = st_memory.toggle("Enter Frame Changer™ mode", value=False, key=f"move_frame_mode_{shot.uuid}") + + move_frame_mode = st_memory.toggle("Open Frame Changer™", value=False, key=f"move_frame_mode_{shot.uuid}", help="Enable to move frames around") if st.session_state[f"move_frame_mode_{shot.uuid}"]: - st.warning("You are in 'move frame' mode. You have to press 'Save' below to save the changes.") - if st.button("Save", key=f"save_move_frame_{shot.uuid}", help="Save the changes made in 'move frame' mode", use_container_width=True): + st.warning("You're in frame moving mode. You must press 'Save' to save changes.") + if st.button("Save", key=f"save_move_frame_{shot.uuid}", help="Save the changes made in 'move frame' mode", use_container_width=True, type="primary"): update_shot_frames(shot_uuid, timing_list) st.rerun() if f"shot_data_{shot_uuid}" not in st.session_state: @@ -54,8 +55,10 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None, position="Timel "position": idx } for idx, timing in enumerate(timing_list)] st.session_state[f"shot_data_{shot_uuid}"] = pd.DataFrame(shot_data) - else: - st.info("Use this to move frames") + if st.button("Discard changes", key=f"discard_changes_{shot.uuid}", help="Discard all changes made in 'move frame' mode", use_container_width=True): + st.session_state[f"move_frame_mode_{shot.uuid}"] = False + st.rerun() + else: st.session_state[f"shot_data_{shot_uuid}"] = None @@ -100,9 +103,10 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None, position="Timel for i in range(0, len(st.session_state[f"shot_data_{shot_uuid}"]), items_per_row): with st.container(): - grid = st.columns(items_per_row) + grid = st.columns(items_per_row) for j in range(items_per_row): idx = i + j + if idx < len(st.session_state[f"shot_data_{shot_uuid}"]): # Ensure idx does not exceed the length of shot_df with grid[j % items_per_row]: # Use modulo for column indexingr @@ -143,20 +147,30 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None, position="Timel st.session_state[f'list_to_move_{shot.uuid}'].remove(idx) st.rerun() - header1, header2 = st.columns([1, 1.5]) - with header1: - st_memory.toggle("Open Zoom", key=f"open_zoom_{shot.uuid}_{idx}", value=False) + if 'zoom_to_open' not in st.session_state: + st.session_state['zoom_to_open'] = None + + header1, header2 = st.columns([1.5, 1]) + + with header1: + + if f'open_zoom_{shot.uuid}_{idx}' not in st.session_state: + st.session_state[f'open_zoom_{shot.uuid}_{idx}'] = False + + + if st.button("Open zoom", key=f"open_zoom_{shot.uuid}_{idx}"): + st.session_state['zoom_to_open'] = idx + st.rerun() + - if st.session_state[f"open_zoom_{shot.uuid}_{idx}"]: + if st.session_state['zoom_to_open'] == idx: with header2: - if st.button("Reset",use_container_width=True): + if st.button("Reset",use_container_width=True,key=f"reset_zoom_{shot.uuid}_{idx}"): reset_zoom_element() st.rerun() - # close all other zooms - for i in range(0, len(st.session_state[f"shot_data_{shot_uuid}"])): - if i != idx: - st.session_state[f"open_zoom_{shot.uuid}_{i}"] = False + + input_image = generate_pil_image(st.session_state[f"shot_data_{shot_uuid}"].loc[idx]['image_location']) @@ -168,7 +182,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None, position="Timel st.session_state['flip_vertically'] = False st.session_state['flip_horizontally'] = False - zoom_inputs(horizontal=True) + zoom_inputs(horizontal=True, shot_uuid=f"{shot_uuid}_{idx}") st.caption("Output Image:") @@ -185,7 +199,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None, position="Timel st.image(output_image, use_column_width=True) - if st.button("Save", key=f"save_zoom_{idx}", help="Save the changes made in 'move frame' mode",type="primary",use_container_width=True): + if st.button("Save", key=f"save_zoom_{idx}", help="Save the changes made in 'move frame' mode",use_container_width=True,type="primary"): # make file_name into a random uuid using uuid file_name = f"{uuid.uuid4()}.png" @@ -205,14 +219,15 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None, position="Timel location = save_location st.session_state[f"shot_data_{shot_uuid}"].loc[idx, 'image_location'] = location - st.session_state[f'open_zoom_{shot.uuid}_{idx}'] = False + #st.session_state[f'open_zoom_{shot.uuid}_{idx}'] = False + st.session_state['zoom_to_open'] = None st.rerun() st.markdown("***") - bottom1, bottom2 = st.columns([1, 1]) + bottom1, bottom2 = st.columns([1, 2]) with bottom1: - st.warning("You are in 'move frame' mode. You have to press 'Save' below to save the changes.") - if st.button("Save", key=f"save_move_frame_{shot.uuid}_bottom", help="Save the changes made in 'move frame' mode", use_container_width=True): + st.warning("You're in frame moving mode. You must press 'Save' to save changes.") + if st.button("Save", key=f"save_move_frame_{shot.uuid}_bottom", help="Save the changes made in 'move frame' mode", use_container_width=True,type="primary"): update_shot_frames(shot_uuid, timing_list) st.rerun() st.markdown("***") diff --git a/ui_components/widgets/sm_animation_style_element.py b/ui_components/widgets/sm_animation_style_element.py index 8f9dd4aa..1f64db1b 100644 --- a/ui_components/widgets/sm_animation_style_element.py +++ b/ui_components/widgets/sm_animation_style_element.py @@ -139,7 +139,7 @@ def update_prompt(): with f3: st.write("") st.write("") - st.info("Use these sparingly, as they can have a large impact on the video. You can also edit them for individual frames in the advanced settings above.") + st.info("Use these sparingly, as they can have a large impact on the video. You can also edit them for individual frames above.") st.markdown("***") st.markdown("##### Overall motion settings") @@ -223,21 +223,22 @@ def select_motion_lora_element(shot_uuid, model_files): lora_data = [] lora_file_dest = "ComfyUI/models/animatediff_motion_lora" lora_file_links = { - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/1000_jeep_driving_r32_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/250_tony_stark_r64_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/250_train_r128_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/300_car_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_car_desert_48_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_car_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_jeep_driving_r32_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_man_running_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_rotation_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/750_jeep_driving_r32_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/300_zooming_in_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_cat_walking_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_playing_banjo_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_woman_dancing_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif", - "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_zooming_out_temporal_unet.safetensors" :"https://cdn.pixabay.com/animation/2023/06/17/16/02/16-02-33-34_512.gif" + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/1000_jeep_driving_r32_temporal_unet.safetensors" :"", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/250_tony_stark_r64_temporal_unet.safetensors" :"", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/250_train_r128_temporal_unet.safetensors" :"", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/300_car_temporal_unet.safetensors" :"", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_car_desert_48_temporal_unet.safetensors" :"", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_car_temporal_unet.safetensors" :"", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_jeep_driving_r32_temporal_unet.safetensors" :"", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_man_running_temporal_unet.safetensors" :"", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/500_rotation_temporal_unet.safetensors" :"", + "https://huggingface.co/Kijai/animatediff_motion_director_loras/resolve/main/750_jeep_driving_r32_temporal_unet.safetensors" :"", + "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/300_zooming_in_temporal_unet.safetensors" :"", + "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_cat_walking_temporal_unet.safetensors" :"", + "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_playing_banjo_temporal_unet.safetensors" :"", + "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_woman_dancing_temporal_unet.safetensors" :"", + "https://huggingface.co/peteromallet/ad_motion_loras/resolve/main/400_zooming_out_temporal_unet.safetensors" :"", + } # ---------------- ADD LORA ----------------- @@ -258,26 +259,29 @@ def select_motion_lora_element(shot_uuid, model_files): for idx, lora in enumerate(st.session_state[f"lora_data_{shot_uuid}"]): if not lora: continue - h1, h2, h3, h4 = st.columns([1, 1, 1, 0.5]) + h1, h2, h3, h4, h5, h6, h7 = st.columns([1, 0.25, 1,0.25, 1, 0.25,0.5]) with h1: file_idx = files.index(lora["filename"]) motion_lora = st.selectbox("Which LoRA would you like to use?", options=files, key=f"motion_lora_{idx}", index=file_idx) with h2: - strength_of_lora = st.slider("How strong would you like the LoRA to be?", min_value=0.0, max_value=1.0, value=lora["lora_strength"], step=0.01, key=f"strength_of_lora_{idx}") - lora_data.append({"filename": motion_lora, "lora_strength": strength_of_lora, "filepath": lora_file_dest + "/" + motion_lora}) + display_motion_lora(motion_lora, lora_file_links) with h3: - when_to_apply_lora = st.slider("When to apply the LoRA?", min_value=0, max_value=100, value=(0,100), step=1, key=f"when_to_apply_lora_{idx}",disabled=True,help="This feature is not yet available.") + strength_of_lora = st.slider("Strength:", min_value=0.0, max_value=1.0, value=lora["lora_strength"], step=0.01, key=f"strength_of_lora_{idx}") + lora_data.append({"filename": motion_lora, "lora_strength": strength_of_lora, "filepath": lora_file_dest + "/" + motion_lora}) - with h4: + with h5: + when_to_apply_lora = st.slider("When to apply:", min_value=0, max_value=100, value=(0,100), step=1, key=f"when_to_apply_lora_{idx}",disabled=True,help="This feature is not yet available.") + + with h7: st.write("") if st.button("Remove", key=f"remove_lora_{idx}"): st.session_state[f"lora_data_{shot_uuid}"].pop(idx) st.rerun() # displaying preview - display_motion_lora(motion_lora, lora_file_links) + if len(st.session_state[f"lora_data_{shot_uuid}"]) == 0: text = "Add a LoRA" @@ -359,12 +363,12 @@ def select_motion_lora_element(shot_uuid, model_files): # ---------------- TRAIN LORA -------------- with tab3: - b1, b2 = st.columns([1, 1]) + b1, b2, b3 = st.columns([1, 1, 0.5]) with b1: lora_name = st.text_input("Name this LoRA", key="lora_name") if model_files and len(model_files): base_sd_model = st.selectbox( - label="Select base sd model for training", + label="Select base:", options=model_files, key="base_sd_model_video", index=0 @@ -402,7 +406,8 @@ def select_motion_lora_element(shot_uuid, model_files): video_height, base_sd_model ) - + with b2: + st.info("This takes around 30 minutes to train.") return lora_data From 95625df590804d7b7e2fa3b6eee441e6b8dc1446 Mon Sep 17 00:00:00 2001 From: peteromallet Date: Sun, 24 Mar 2024 03:04:23 +0100 Subject: [PATCH 40/43] Small UX improvements --- ui_components/widgets/shot_view.py | 86 ++++++++++++++++++++-- utils/ml_processor/comfy_data_transform.py | 58 +-------------- 2 files changed, 80 insertions(+), 64 deletions(-) diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index 4fda6a36..357b3c3e 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -119,7 +119,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None, position="Timel else: st.warning("No primary image present.") - btn1, btn2, btn3, btn4, btn5 = st.columns([1, 1, 1, 1, 1.25]) + btn1, btn2, btn3, btn4, btn5 = st.columns([1, 1, 1, 1, 3.5]) with btn1: if st.button("⬅️", key=f"move_frame_back_{idx}", help="Move frame back", use_container_width=True): @@ -157,10 +157,21 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None, position="Timel if f'open_zoom_{shot.uuid}_{idx}' not in st.session_state: st.session_state[f'open_zoom_{shot.uuid}_{idx}'] = False - - if st.button("Open zoom", key=f"open_zoom_{shot.uuid}_{idx}"): - st.session_state['zoom_to_open'] = idx - st.rerun() + if idx != st.session_state['zoom_to_open']: + if st.button("Open zoom", key=f"open_zoom_{shot.uuid}_{idx}_button"): + st.session_state['zoom_level_input'] = 100 + st.session_state['rotation_angle_input'] = 0 + st.session_state['x_shift'] = 0 + st.session_state['y_shift'] = 0 + st.session_state['flip_vertically'] = False + st.session_state['flip_horizontally'] = False + + st.session_state['zoom_to_open'] = idx + st.rerun() + else: + if st.button("Close zoom", key=f"close_zoom_{shot.uuid}_{idx}_button"): + st.session_state['zoom_to_open'] = None + st.rerun() if st.session_state['zoom_to_open'] == idx: @@ -180,9 +191,67 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None, position="Timel st.session_state['x_shift'] = 0 st.session_state['y_shift'] = 0 st.session_state['flip_vertically'] = False - st.session_state['flip_horizontally'] = False + st.session_state['flip_horizontally'] = False + + st.caption("Zoom and Rotate:") + h1, h2, h3, h4 = st.columns([1, 1, 1, 1]) + + with h1: + # zoom in with emoji button that increases zoom level by 10 + if st.button("➕", key=f"zoom_in_{idx}", help="Zoom in by 10%", use_container_width=True): + st.session_state['zoom_level_input'] += 10 + # zoom out with emoji button that decreases zoom level by 10 + if st.button("➖", key=f"zoom_out_{idx}", help="Zoom out by 10%", use_container_width=True): + st.session_state['zoom_level_input'] -= 10 + + with h2: + # shift up with emoji button that decreases y shift by 10 + if st.button("⬆️", key=f"shift_up_{idx}", help="Shift up by 10px", use_container_width=True): + st.session_state['y_shift'] += 10 + + # shift down with emoji button that increases y shift by 10 + if st.button("⬇️", key=f"shift_down_{idx}", help="Shift down by 10px", use_container_width=True): + st.session_state['y_shift'] -= 10 + + with h3: + + # shift left with emoji button that decreases x shift by 10 + if st.button("⬅️", key=f"shift_left_{idx}", help="Shift left by 10px", use_container_width=True): + st.session_state['x_shift'] -= 10 + # rotate left with emoji button that decreases rotation angle by 90 + if st.button("↩️", key=f"rotate_left_{idx}", help="Rotate left by 5°", use_container_width=True): + st.session_state['rotation_angle_input'] -= 5 + + + with h4: + + + # shift right with emoji button that increases x shift by 10 + if st.button("➡️", key=f"shift_right_{idx}", help="Shift right by 10px", use_container_width=True): + st.session_state['x_shift'] += 10 + + # rotate right with emoji button that increases rotation angle by 90 + if st.button("↪️", key=f"rotate_right_{idx}", help="Rotate right by 5°", use_container_width=True): + st.session_state['rotation_angle_input'] += 5 + - zoom_inputs(horizontal=True, shot_uuid=f"{shot_uuid}_{idx}") + + + + i1, i2 = st.columns([1, 1]) + with i1: + if st.button("↕️", key=f"flip_vertically_{idx}", help="Flip vertically", use_container_width=True): + + st.session_state['flip_vertically'] = not st.session_state['flip_vertically'] + + with i2: + if st.button("↔️", key=f"flip_horizontally_{idx}", help="Flip horizontally", use_container_width=True): + st.session_state['flip_horizontally'] = not st.session_state['flip_horizontally'] + + + + +# zoom_inputs(horizontal=True, shot_uuid=f"{shot_uuid}_{idx}") st.caption("Output Image:") @@ -230,6 +299,9 @@ def shot_keyframe_element(shot_uuid, items_per_row, column=None, position="Timel if st.button("Save", key=f"save_move_frame_{shot.uuid}_bottom", help="Save the changes made in 'move frame' mode", use_container_width=True,type="primary"): update_shot_frames(shot_uuid, timing_list) st.rerun() + if st.button("Discard changes", key=f"discard_changes_{shot.uuid}_2", help="Discard all changes made in 'move frame' mode", use_container_width=True): + st.session_state[f"move_frame_mode_{shot.uuid}"] = False + st.rerun() st.markdown("***") else: diff --git a/utils/ml_processor/comfy_data_transform.py b/utils/ml_processor/comfy_data_transform.py index eb465694..a6fb8fca 100644 --- a/utils/ml_processor/comfy_data_transform.py +++ b/utils/ml_processor/comfy_data_transform.py @@ -394,62 +394,6 @@ def update_structure_control_image(json, image_uuid, weight): return json - - def update_structure_control_image(json, image, weight): - # Integrate all updates including new nodes and modifications in a single step - data_repo = DataRepo() - image = data_repo.get_file_from_uuid(image) - image = image.filename - # image = os.path.basename(image) - - json.update({ - "560": { - "inputs": { - "image": image, - "upload": "image" - }, - "class_type": "LoadImage", - "_meta": { - "title": "Load Image" - } - }, - "563": { - "inputs": { - "weight": weight, - "noise": 0.3, - "weight_type": "original", - "start_at": 0, - "end_at": 1, - "short_side_tiles": 2, - "tile_weight": 0.6, - "ipadapter": ["564", 0], - "clip_vision": ["370", 0], - "image": ["560", 0], - "model": ["558", 3] - }, - "class_type": "IPAdapterTilesMasked", - "_meta": { - "title": "IPAdapter Masked Tiles (experimental)" - } - }, - "564": { - "inputs": { - "ipadapter_file": "ip_plus_composition_sd15.safetensors" - }, - "class_type": "IPAdapterModelLoader", - "_meta": { - "title": "Load IPAdapter Model" - } - } - }) - - # Update the "207" node's model pair to point to "563" - if "207" in json: - json["207"]["inputs"]["model"] = ["563", 0] - - return json - - def update_json_with_loras(json_data, loras): start_id = 536 new_ids = [] @@ -775,4 +719,4 @@ def get_file_zip_url(file_uuid_list, index_files=False) -> str: filename_list = [f.filename for f in file_list] if not index_files else [] # file names would be indexed like 1.png, 2.png ... zip_path = zip_images([f.location for f in file_list], 'videos/temp/input_images.zip', filename_list) - return ml_client.upload_training_data(zip_path, delete_after_upload=True) + return ml_client.upload_training_data(zip_path, delete_after_upload=True) \ No newline at end of file From 2ded3a436fd24305464376d7caa3d09c645f2d7b Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Sun, 24 Mar 2024 12:14:05 +0000 Subject: [PATCH 41/43] sm workflow fix + state reloading fix + audio removed + minor fixes --- ui_components/components/animate_shot_page.py | 23 +++++--- .../components/project_settings_page.py | 5 +- .../components/video_rendering_page.py | 1 + .../methods/animation_style_methods.py | 2 +- .../widgets/sm_animation_style_element.py | 13 +++-- utils/ml_processor/comfy_data_transform.py | 54 ++++++++++++++----- 6 files changed, 72 insertions(+), 26 deletions(-) diff --git a/ui_components/components/animate_shot_page.py b/ui_components/components/animate_shot_page.py index 92a4ad92..323f1a5d 100644 --- a/ui_components/components/animate_shot_page.py +++ b/ui_components/components/animate_shot_page.py @@ -40,14 +40,25 @@ def video_rendering_page(shot_uuid, selected_variant): file_uuid_list = [] if f"type_of_animation_{shot.uuid}" not in st.session_state: st.session_state[f"type_of_animation_{shot.uuid}"] = 0 - # AnimateShotMethod.BATCH_CREATIVE_INTERPOLATION.value + if st.session_state[f"type_of_animation_{shot.uuid}"] == 0: # AnimateShotMethod.BATCH_CREATIVE_INTERPOLATION.value # loading images from a particular video variant - if selected_variant: - log = data_repo.get_inference_log_from_uuid(selected_variant) - shot_data = json.loads(log.input_params) - file_uuid_list = shot_data.get('origin_data', json.dumps({})).get('settings', {}).get('file_uuid_list', []) - # picking current images if no variant is selected + if selected_variant: + log = data_repo.get_inference_log_from_uuid(selected_variant) + shot_data = json.loads(log.input_params) + file_uuid_list = shot_data.get('origin_data', json.dumps({})).get('settings', {}).get('file_uuid_list', []) + else: + # hackish sol, will fix later + for idx in range(2): + if f'img{idx+1}_uuid_{shot_uuid}' in st.session_state and st.session_state[f'img{idx+1}_uuid_{shot_uuid}']: + file_uuid_list.append(st.session_state[f'img{idx+1}_uuid_{shot_uuid}']) + + if not (f'video_desc_{shot_uuid}' in st.session_state and st.session_state[f'video_desc_{shot_uuid}']): + st.session_state[f'video_desc_{shot_uuid}'] = "" + + # picking current images if no file_uuids are found + # (either no variant was selected or no prev img in session_state was present) + if not (file_uuid_list and len(file_uuid_list)): for timing in shot.timing_list: if timing.primary_image and timing.primary_image.location: file_uuid_list.append(timing.primary_image.uuid) diff --git a/ui_components/components/project_settings_page.py b/ui_components/components/project_settings_page.py index 69b92222..56e58ba1 100644 --- a/ui_components/components/project_settings_page.py +++ b/ui_components/components/project_settings_page.py @@ -42,7 +42,4 @@ def project_settings_page(project_uuid): if st.button("Save"): data_repo.update_project_setting(project_uuid, width=width) data_repo.update_project_setting(project_uuid, height=height) - st.experimental_rerun() - - st.write("") - attach_audio_element(project_uuid, True) \ No newline at end of file + st.experimental_rerun() \ No newline at end of file diff --git a/ui_components/components/video_rendering_page.py b/ui_components/components/video_rendering_page.py index 365c01c8..c7c351b8 100644 --- a/ui_components/components/video_rendering_page.py +++ b/ui_components/components/video_rendering_page.py @@ -263,6 +263,7 @@ def two_img_realistic_interpolation_page(shot_uuid, img_list: List[InternalFileO description_of_motion = st.text_area("Describe the motion you want between the frames:", \ key=f"description_of_motion_{shot.uuid}", value=st.session_state[f'video_desc_{shot_uuid}']) st.info("This is very important and will likely require some iteration.") + st.info("NOTE: The model for this animation is 10.5 GB in size, which can take some time to download") variant_count = 1 # Assuming a default value for variant_count, adjust as necessary vid_quality = "full" # Assuming full quality, adjust as necessary based on your requirements diff --git a/ui_components/methods/animation_style_methods.py b/ui_components/methods/animation_style_methods.py index f939f2e1..5bf3b183 100644 --- a/ui_components/methods/animation_style_methods.py +++ b/ui_components/methods/animation_style_methods.py @@ -67,7 +67,7 @@ def load_shot_settings(shot_uuid, log_uuid=None): elif key == f"type_of_generation_index_{shot.uuid}": if not isinstance(st.session_state[key], int): st.session_state[key] = 0 - st.session_state["creative_interpolation_type"] = ["Fast", "Detailed"][st.session_state[key]] + st.session_state["creative_interpolation_type"] = ["Detailed", "Fast"][st.session_state[key]] st.rerun() elif data_type == ShotMetaData.DYNAMICRAFTER_DATA.value: diff --git a/ui_components/widgets/sm_animation_style_element.py b/ui_components/widgets/sm_animation_style_element.py index 1f64db1b..aa1f1066 100644 --- a/ui_components/widgets/sm_animation_style_element.py +++ b/ui_components/widgets/sm_animation_style_element.py @@ -163,12 +163,19 @@ def update_motion_for_all_frames(shot_uuid, timing_list): if f"strength_of_structure_control_image_{shot_uuid}" not in st.session_state: st.session_state[f"strength_of_structure_control_image_{shot_uuid}"] = None - control_motion_with_image = st_memory.toggle("Control motion with an image", help="This will allow you to upload images to control the motion of the video.",key=f"control_motion_with_image_{shot_uuid}") - if control_motion_with_image: + img_loaded_from_settings = f"structure_control_image_uuid_{shot_uuid}" in st.session_state and st.session_state[f"structure_control_image_uuid_{shot_uuid}"] + control_motion_with_image = st.toggle( + "Control motion with an image", + help="This will allow you to upload images to control the motion of the video.", + key=f"control_motion_with_image_{shot_uuid}", + value=img_loaded_from_settings + ) + + if control_motion_with_image or img_loaded_from_settings: project_settings = data_repo.get_project_setting(shot.project.uuid) width, height = project_settings.width, project_settings.height - if f"structure_control_image_uuid_{shot_uuid}" in st.session_state and st.session_state[f"structure_control_image_uuid_{shot_uuid}"]: + if img_loaded_from_settings: uploaded_image = data_repo.get_file_from_uuid(st.session_state[f"structure_control_image_uuid_{shot_uuid}"]) uploaded_image_pil = Image.open(uploaded_image.location) uploaded_image_pil = uploaded_image_pil.resize((width, height)) diff --git a/utils/ml_processor/comfy_data_transform.py b/utils/ml_processor/comfy_data_transform.py index a6fb8fca..6c01f50e 100644 --- a/utils/ml_processor/comfy_data_transform.py +++ b/utils/ml_processor/comfy_data_transform.py @@ -361,22 +361,52 @@ def update_structure_control_image(json, image_uuid, weight): "563": { "inputs": { "weight": weight, - "noise": 0.3, - "weight_type": "original", + "weight_type": "linear", + "combine_embeds": "concat", "start_at": 0, "end_at": 1, - "short_side_tiles": 2, - "tile_weight": 0.6, - "ipadapter": ["564", 0], - "clip_vision": ["370", 0], - "image": ["560", 0], - "model": ["558", 3] - }, - "class_type": "IPAdapterTilesMasked", - "_meta": { - "title": "IPAdapter Masked Tiles (experimental)" + "sharpening": 0, + "model": [ + "558", + 3 + ], + "ipadapter": [ + "564", + 0 + ], + "image": [ + "560", + 0 + ], + "clip_vision": [ + "370", + 0 + ] + }, + "class_type": "IPAdapterTiled", + "_meta": { + "title": "IPAdapter Tiled" } }, + # "563": { + # "inputs": { + # "weight": weight, + # "noise": 0.3, + # "weight_type": "original", + # "start_at": 0, + # "end_at": 1, + # "short_side_tiles": 2, + # "tile_weight": 0.6, + # "ipadapter": ["564", 0], + # "clip_vision": ["370", 0], + # "image": ["560", 0], + # "model": ["558", 3] + # }, + # "class_type": "IPAdapterTilesMasked", + # "_meta": { + # "title": "IPAdapter Masked Tiles (experimental)" + # } + # }, "564": { "inputs": { "ipadapter_file": "ip_plus_composition_sd15.safetensors" From b7f0c31d285a11a1a4d1270738ab44ae11198812 Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Sun, 24 Mar 2024 12:39:27 +0000 Subject: [PATCH 42/43] ipadapter composition fixed --- .streamlit/credentials.toml | 2 ++ utils/ml_processor/comfy_data_transform.py | 3 ++- .../ipadapter_composition_workflow_api.json | 9 ++++----- 3 files changed, 8 insertions(+), 6 deletions(-) create mode 100644 .streamlit/credentials.toml diff --git a/.streamlit/credentials.toml b/.streamlit/credentials.toml new file mode 100644 index 00000000..cb8767db --- /dev/null +++ b/.streamlit/credentials.toml @@ -0,0 +1,2 @@ +[general] +email="" \ No newline at end of file diff --git a/utils/ml_processor/comfy_data_transform.py b/utils/ml_processor/comfy_data_transform.py index 6c01f50e..d8cd3a46 100644 --- a/utils/ml_processor/comfy_data_transform.py +++ b/utils/ml_processor/comfy_data_transform.py @@ -27,7 +27,8 @@ ComfyWorkflow.UPSCALER: {"workflow_path": 'comfy_workflows/video_upscaler_api.json', "output_node_id": [243]}, ComfyWorkflow.MOTION_LORA: {"workflow_path": 'comfy_workflows/motion_lora_api.json', "output_node_id": [11, 14, 26, 30, 34]}, # ComfyWorkflow.MOTION_LORA: {"workflow_path": 'comfy_workflows/motion_lora_test_api.json', "output_node_id": [11, 14]}, - ComfyWorkflow.DYNAMICRAFTER: {"workflow_path": 'comfy_workflows/dynamicrafter_api.json', "output_node_id": [2]} + ComfyWorkflow.DYNAMICRAFTER: {"workflow_path": 'comfy_workflows/dynamicrafter_api.json', "output_node_id": [2]}, + ComfyWorkflow.IPADAPTER_COMPOSITION: {"workflow_path": 'comfy_workflows/ipadapter_composition_workflow_api.json', "output_node_id": [27]} } diff --git a/utils/ml_processor/comfy_workflows/ipadapter_composition_workflow_api.json b/utils/ml_processor/comfy_workflows/ipadapter_composition_workflow_api.json index cc5a4cea..c510f328 100644 --- a/utils/ml_processor/comfy_workflows/ipadapter_composition_workflow_api.json +++ b/utils/ml_processor/comfy_workflows/ipadapter_composition_workflow_api.json @@ -144,11 +144,10 @@ "28": { "inputs": { "weight": 1, - "noise": 0, - "weight_type": "original", + "weight_type": "linear", + "combine_embeds": "concat", "start_at": 0, "end_at": 1, - "unfold_batch": false, "ipadapter": [ "3", 0 @@ -166,9 +165,9 @@ 0 ] }, - "class_type": "IPAdapterApply", + "class_type": "IPAdapterAdvanced", "_meta": { - "title": "Apply IPAdapter" + "title": "IPAdapter Advanced" } } } \ No newline at end of file From 3360a5162c65ec4675240a9c2b352a9853cdb94f Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Sun, 24 Mar 2024 18:11:21 +0000 Subject: [PATCH 43/43] ipadapter fix + readme updated --- entrypoint.bat | 2 - entrypoint.sh | 1 - linux_setup.sh | 35 -------------- readme.md | 8 ++-- resized_image.jpg | Bin 26657 -> 0 bytes text.json | 1 - ui_components/components/explorer_page.py | 44 +++++++++--------- .../components/video_rendering_page.py | 2 +- .../comfy_workflows/ipadapter_face_api.json | 21 ++++----- .../ipadapter_face_plus_api.json | 36 +++++++------- .../comfy_workflows/ipadapter_plus_api.json | 15 +++--- .../comfy_workflows/video_upscaler_api.json | 9 ++-- windows_setup.bat | 26 ----------- 13 files changed, 65 insertions(+), 135 deletions(-) delete mode 100644 entrypoint.bat delete mode 100644 entrypoint.sh delete mode 100644 linux_setup.sh delete mode 100644 resized_image.jpg delete mode 100644 text.json delete mode 100644 windows_setup.bat diff --git a/entrypoint.bat b/entrypoint.bat deleted file mode 100644 index c6ccf2ae..00000000 --- a/entrypoint.bat +++ /dev/null @@ -1,2 +0,0 @@ -@echo off -streamlit run app.py --runner.fastReruns false --server.port 5500 \ No newline at end of file diff --git a/entrypoint.sh b/entrypoint.sh deleted file mode 100644 index ec0ff19e..00000000 --- a/entrypoint.sh +++ /dev/null @@ -1 +0,0 @@ -streamlit run app.py --runner.fastReruns false --server.port 5500 \ No newline at end of file diff --git a/linux_setup.sh b/linux_setup.sh deleted file mode 100644 index cfc91fd9..00000000 --- a/linux_setup.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -# Store the current directory path -current_dir="$(pwd)" - -# Define the project directory path -project_dir="$current_dir/Dough" - -# Check if the "Dough" directory doesn't exist and we're not already inside it -if [ ! -d "$project_dir" ] && [ "$(basename "$current_dir")" != "Dough" ]; then - # Clone the git repo - git clone --depth 1 -b main https://github.com/banodoco/Dough.git "$project_dir" - cd "$project_dir" - git clone --depth 1 -b feature/package https://github.com/piyushK52/comfy_runner.git - git clone https://github.com/comfyanonymous/ComfyUI.git - - # Create virtual environment - python3 -m venv "dough-env" - - # Install system dependencies - if command -v sudo &> /dev/null; then - sudo apt-get update && sudo apt-get install -y libpq-dev python3.10-dev - else - apt-get update && apt-get install -y libpq-dev python3.10-dev - fi - - # Install Python dependencies - echo $(pwd) - . ./dough-env/bin/activate && pip install -r "requirements.txt" - . ./dough-env/bin/activate && pip install -r "comfy_runner/requirements.txt" - . ./dough-env/bin/activate && pip install -r "ComfyUI/requirements.txt" - - # Copy the environment file - cp "$project_dir/.env.sample" "$project_dir/.env" -fi diff --git a/readme.md b/readme.md index 9a4f056e..d56a732e 100644 --- a/readme.md +++ b/readme.md @@ -97,7 +97,7 @@ Then go to this URL, and it should be running! This commands sets up the app. Run this only the first time, after that you can simply start the app using the next command. ```bash -curl -sSL https://raw.githubusercontent.com/banodoco/Dough/green-head/linux_setup.sh | bash +curl -sSL https://raw.githubusercontent.com/banodoco/Dough/green-head/scripts/linux_setup.sh | bash ``` ### Run the app @@ -105,7 +105,7 @@ curl -sSL https://raw.githubusercontent.com/banodoco/Dough/green-head/linux_setu you can run the app using ```bash -source ./dough-env/bin/activate && sh entrypoint.sh +source ./dough-env/bin/activate && sh ./scripts/entrypoint.sh ``` ## Instructions for Windows: @@ -117,14 +117,14 @@ source ./dough-env/bin/activate && sh entrypoint.sh Run the setup script ```bash -iwr -useb "https://raw.githubusercontent.com/banodoco/Dough/green-head/windows_setup.bat" -OutFile "script.bat" +iwr -useb "https://raw.githubusercontent.com/banodoco/Dough/green-head/scripts/windows_setup.bat" -OutFile "script.bat" Start-Process "cmd.exe" -ArgumentList "/c script.bat" ``` ### Run the app ```bash -. .\dough-env\Scripts\activate ; .\entrypoint.bat +. .\dough-env\Scripts\activate ; .\scripts\entrypoint.bat ``` If you're having any issues, please share them in our [Discord](https://discord.com/invite/8Wx9dFu5tP). \ No newline at end of file diff --git a/resized_image.jpg b/resized_image.jpg deleted file mode 100644 index 4d8752545fb411457138c50809a90987da0f4abf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 26657 zcmbT7Wl$W^*XD=d9w1n7w?MGLC4}I?eemE6KIjmfKp0?f2m}chbRal`CAb9$?htfv zcgymB->u!M{j|HctNTme>bk$`u5(X6=Q;mo|NR9#S5;I|1fZb-0BBDK;NJp39)N>| zg^h)YgN==ii;IIt@Pd#4AD@7ll#KWV-Ae{~x|g)HjI4YdjLffCXlXe`xnA)L2n!1{ zaEMEb2}9rn50Ys za#&>AR@lrQF9bsq^Ke++R(Fu=j2*KIeeev!#iO94qJGK7&cVsWEesS96%&_`f2W|R zq^zQZXk-kswz0LdcW`uqczOHy`uRh{BR)k&eU64DC8wmOrGLrD%r7V``d(a8 zT2@nASKrXsglz8Y>h9_7>mL{#pO~DQo|&DSUtL?@*xW*G@9dtOo}FJ@UR~eZ{)Y<< zfd1cDPuKqj`#-oyp17W2V4!1Q|A!0hneWqyPJ)5SB!ERKr;TmpLB=c?iu2-aVqSFz zE{l-PG5H72F+2)Z;Z?Si|DgRB+5a6_*#8&V{{s8pT=M_|bhM|JhfV^J1&lN&l+$ME z%X_E(+A3Br?}vd5h4fmbQ{eV`@cOv4Y%Rk zv9T|ELxW7?K{d!;adL+!YwdcWG}>?zkoTBCMeYstschEf6GUdQt za8>|HH>vBmgs3b5%bO)}09Y0#2(f?v@vXD%8=F_Y_rG$cD5}@+quugO>5@po0{7x6 zRovAM7Xuw_TCImj-=32@34oFobE4F|D+ep&F}+=3OFS=+bonDw3%g?>A>Buaea_ak6(Yh%?tfC(%(8@eWE0v|4VpK)##e~ZRMtL)xQ%un0HvUe>A%4j% zoKzTkT!j|G_w#Pz>B>x9b$lcY+Qj~b$fO#h>|Kk=llX?bT)Fj|Sgo`4PG>8@naqLT z2vGFrG~Wb9r1!W@zP0V9%)^%}>fxKCxk-w}|zS9kQr}06JOj4Hp_Vy$G^2$#WS%dxlvwGBI-AA9AEcA)J?t)8S z?Ir4a?(dKKNl|LrC)H`4&ayu&DG$DH_9d^qCs(1QSuU3ezdE}4VhI@LyqF2;% z>Wm{j%eM>9v5+XxJd2;!?ueS^wW#H*b&0e*$)&{f=YJK6zhT%ssQX?fC=}0|7W;vF@DG{RvrI!CeZIs75`1Ff>+b%j(m|+;9+HmCEhOCHJH0^Ik^_7!DUAMvc1jY zJJ=VE^+UCP&6keV&$@LC&TFkEdOw>d@09`}-t2c#$>2t}6zf~V;_DwbLBbitT+EDp zck9&mdqZ{x>WU8`r~^`hNA~6qX|Y?$&?>=@25|W-@1*T!0`adbL#05MIG@|Uc|2~P zEZ&EZ2b%}u#lLA>SS#0et^R(PeHnbQS4hM$cEQY$o>h*vm<9+2VM4eV7p%wGWC-?PGrp-_SS?)i z-RuzvuZ zxGAK~sebf*k7ZYR2SDW30Zq^LyR__R8`#KB|17&*cjW5PTQIeOoBQqyG`_3cq#xuW zc<4~H5#dE+^dQvZzx0q(RInJBt6iV8EPv$-r-Fw6nQ4qXvv~e}KbYpsz<%8Mf=81@ zs}a5?t7M+5+U$qqurDR&uTCqM|26$V2^&UtGazLMS&NU1v&TrE$B5_twm1#<^myzz zItOlfE*VkbqbpIsHTyL1rdDP40M=$-LBRC!ew+ zWFIi_eJPYV)|~>?&PCuF6`Q4hpbu7X!$2J0T+kpl=dI8;*4<30Mu z%fe`hAub{ScIHwehr<>!Tg38>cxBSKdNL;JJ)!j$&E#2Y+!VD z)l8eS6P|n)URSz|E6x#UHe%=(wuV#I=MOGJ;}ykAy^b}UbFp_*MtmaT=)^lZZfMG7 zd0{8xsT*d+92kF|Nv(R~lkDi*wQ~z(J#cevpea|(NN(ac5oFn%lMANCM2JV*X0-&a zA7sV^1v=L74N!~nxO@=h>X%ucPx|5Mysf329Hao%?6QdTyw2S8mNpk>8fOv5lQTfd zgpUPkZYp*B17KFRR5a9=bRvpoX+s%rY}09p-P<%*&4+@0t`7BW{0^e;2Y{{Wxt zE3#oOa;Ec*trITvbb>>JX5miqc=sjP6QPEtD6vB#Fy)&caK;%PD(x#>c>DK89^At0 z#6lXU{h@{g_H_E?GUa>!O0u3B|1m3?^g3H~>8VCT^*|r5Is4G~ev{$&+kb#ur?X%( zQ(*gdc*H(ND(YHOb_h78Vz#(d%ELpQ!*qgf?nFU}yea$eJGI=)bnMc^*k^;%`nb+q5G14*+^)qh`=lJHWvd)wB=xiZzkIu{8QT@5K{h{WM%FL19zxilpNYJ+> z>Kz3u97tYZ%7tJ%jTEUl#1w1X*2Y>8wF-ReOhFGQyYStY2`lR9tW$UYF3fr$KTEXL ztjL03@A)9gDYlbn`Z(1#JSE&9__IFtJ?}yO((7_B2GCeTgUPf^;Mw5%5zfeq0K?Z1 zR7E^qId@B%ozOT`3ZI?E3#aC&!f-~A?+)g^Lyb80~No@Ywo+Ai$4dB zVe6LU_11rUWo38i77RUL%9&$uu6b+GWH&%gRFdhvAqMvwH=m`2Z5OlWd?#nl{Bx5e zNjJYXYyRE^_XGSll6zK+>Pn0O?iBZz-$I=v^c&~q$6}Wr>F6W#_;pQuwUT8(zVIaA zVXhzFy*=aN{Jwl>nIqB=_KtH~wI(qPXP3v9{XMBes>N~@Xiy0{446C~v39L5WV0^? zvUums8i^(eDw_^`vF2(jEUx;5{qulPR>(||zxmgX&ti|K z#uS2Mv_jhr0{d3b(Qh*6wYNY*nXglq@+TXrXl;)x!s7;i#h+BnJz9Wjw+^YLop49w zE)gWjf_x0DsN=E|8Jj)LmbjfTd&ef@B`5RejS?_D({(~cowr&Lc0?~*kF=D+cH*6< zANXwP_8pYmwJOKu(G!%Ma%nmwYXYWU_==|lcp=n${c$=vE{o}uSjRv+ITXA6nwwe~ z?nJVnDP)*9i#VWx2TQlQ0WPqos@fH^#hAMATe{;w>xcNT_-q<`vUsJxleYp+t2crd zUkQ1F)#z9>PcRH&eB;px-wzBKsF=Tuk7!mNuM@x6dgE3VBkyDsT>hCM<0sjDxoBWw z(fCyMKR_xtZ%1UAZam$kz9u5^4!H|C)MQT+ZPXe!8~I_PD8w2&dym~_5J_sRt^8`G zTT^2r`F18_54t)Hq!3UxAH?PXNMT)kgywF-ifH)sAV`s$LTJ`clDd9|!G;=SCfi@J z`GRBY%&_&|hL;IwwoUcYNpg+3rs1$t?>4}yN*}`HX*YDInMs_SxD)`zEU0YzzPzk# zj*C^bQ_LV*+U?pp2M#RVWTvpQTP*h$2C*c_X4@<;vV@5c{{h}2TwbEn z*X$1^hZP1K?A*Q_Q6~k1CDTGZN0h40}VX8dN|CSSwm5bo2U=woa?1xcxKYwLS zOB6V4*kp<5*fRi?nqJU;s80JH^On?6YeGO%f2-QAq&TpcdV{pvr^S6Z%PO!uy%pJF zK+^eMBqAY#A>$w5<>9ZjAN9Pi2Ha00=yjNE7j8pAW0^6Nus>TMDh~@|6B(vD|E)t4 z0A54m&v}wMO}eC~h+T|*vR3nQjX{W=IRTZbdnURL3SLOYd{_U2;)E#5RpnlT=TCJP zgv?}&TnjN=F&&as-rKV_I#F^PZtk&eMyko`v_F=C{z_LchmGU#CvrXW3SD}033>O< zSDl3R%PjxBg1b5EF`~T~?A}mwz7X(MlygG&J+2mZT>^c)2f|Gx0={Et3qzwv3Rm^B{ zFl{>ihMV!Fl-Ajt9T{b&$g#g_eHl4^u$n^GN1g?WKpKy+?kZ)_Wu5d=G6Di-WJ}+* zbcH5Jb9=SeQ@_r2p46>rrd`kNZFJg3#Z3s%LbP-0KUj*=>WNiyl?(j|Z+`6nQs`}~ zG~O{-*>f!-34a({d_-q&&wA`s=f*kUEpV%qd)Y^it$==?d{M=7%}^lRe11F8i?^`0 z%pBbQD~)Bl;2(g#%47PFfC8H^4CMkjlL0etB*)ElUe9PrFod!BnNUiEKV*s=mIR$J zM4eT=4rBbm4F`~yo)Cj_wWce)&Qf4=g4INGhk+wJBCierw_cTBcz9})3R+a2^+a8k zs$u933nxhE`?JpD(+1#;&j_|fWQ=l%VR?wVS`C0y~|Ur z>=E0R<{_H;^{l}q3(fS*SHiNwp@2j}Rl^KQW^$rR&;M9I+9@@ljfHDHURU%+CHCQ$ zpB6Z~Je$PFkWYG1;9*BE2uQhbFuIhg`Df~NZCBO}_(ta;hctMKrA3G8=o^wVK%}Cr{B^J)iN~LlQ=} zr4alUomnFl&l~TA_{#;i8*+O?B)=-X!(eJU} zed(J`rfK(k!&5I-^sj=RdEtPAUvRfNPKRf6h03Rd8%XF}3qxP=swoo&>curWqDs+lA^)~du-V9jGAz$UZ97OQAZDC z+WcfjUYWuwEB0e=?iM9S@`3&*X}t}4@VljGM^i&tAj@ly{$g76B(B&S4qCiHhjHvF zdaRm>ygzk2=OrJ@xm?op%bm8;n!1yhOXBP1XPEFIk}%aCOW15p0zm$N%;)gSU($#m!miTzv(qxOG7bj&Uwwh{>A;_ zZl&bYncJRqw_hAFYT93TEN1?6p$&Um@eQ+-;H}J?Ug%_X0-AJCinJW_J|Ee)jdR4% zGu3B)z~4&T(aN&E)QGP<`;6GJx09+w^W@Y{USn#p5YVM=l80&4yylls@mJ6p_3w(h zG{|9VPUDxozEC>yXncPX*Nna$P~&UomK@`8{-1tn=;UF>Ng`ZVleR%^+`IB=qRd%~8x9LjmrC5OB$@>_!4j>FBx+Y|inshgJb8@7 zToJKF`|FgK#Ukm}O#Lxd7xJ+6)Jh+o+4egVZudaFX`6W?6@qCEm(4@$CAX52;9B-D zd0xZ>u0ZI|+>tDApN9GLW8wF~aaelOuF^3&W-3hWlC57;PQU*H^qFisbvD>Am$hCt z$Fg!QO4ty|l^I-lnk>@zYp9-&dKfi**^f(g}2KO95p1-IINNYAWrdN;k) ze86%+4wzayYZ#>`zKX(ND9PtRs2W97FZD6!eA3{?&scUwY}V8U`w@tu)Zb|s2zlQr zALje7ZdNDviV~kx(VRRq3=QG0#x5@GLw5T;?qM@OsDJU2rU7!$y~(#^pCcOP-rjT; zq8wxoh&*nBQtMpn*nk09^~bK#VFY!hSOHjH_#bb-(sp2mNp+DOuG*9T!=l2 zq%H&->In--#jUR%=_^)UOzni9&sp;4el4zdQKNZRh>L}3JCwoT$QlpQ;Nr#8b=fr_ z@y}q1)l;V;$b-K~z&)ofUwHfawCuFo279J~C#TR0SS+nscJN(ybvBKy^BKzNmI9MAN{EjVQ(iHYt+{&jBpkYojAYyVkmU19+k-r>jyOX) zBjxUY04YOICbS@K0<1!Q^$b9owv?C`nQC-?O1*vXcw1!OrxI$o5Very3lXY?C6aZ=qSlVKBP=v{k*J3kmo zIc}#@W3Gc$VhE_DyEb$xm2n{hV|6V8iq-g>(gfckJQly0Omngnrw2PkmA0 zcn%2)#xB`6^G#SW3o%^Z$5np1VEt}xX9k*NB4>+Xut93WP5qTF5(3*h5I!y|r!Vlp zL+b@t$`Tl6DcG-R(;Ai*nRd5E*jcz~)Q>r0cT+wmW9#;iaj%wcL7j;GDlOmW(|FKc zU`t&fEU#=>x!pn*(^1^tkL?L2AH7U864+@z(Hbs!o3V>w5Bup-uq&uscH4r*asA;L zlA&}wH_=C(!88s+Uj`4Jk0ICh#xJ+OW8lE8utBI`Uw5YXPg9M_izG@#9T|tEsNq9QySd%( zWd8t+dyX%%vS**Ps;QU(!bMdI0YN?}T<)?g+5?!6DPnL~@E9OJiZYwbpCq-XGv<&i z2%6BFU=;&nHzZuaR5MM!%!Mg2rZD_XadA5a#=Dfx4T_jHFC=J@T{53wM1=`O$l}$LsV1wTkz- ztbbs~k!d5mLxxUZU6L=^;}VrQ^*pCIm771XfLeMOJU=&Ziu8Q5&)v!&bWVy*U*BTa zQm}C<%~O-yum!XY48>9;G**VdL_(apHLwRIr{VUpTrZU6Y0nFjS!wZF6Cby;?w5AU zmD1J`KxPnggIT|v^qs*{*hY!Dx^0DtqS7+rfElZ^nl`hM7q@zB?#(+!`lkFUqn^8p z53mi$yUSjx&)7MF`wd{V+=t$So+=kDH zNAfmdw3#ZF?-Kf4F3LJD`MYx%Jl2m3Jl(9B-;DwSxmhU2ni87IM{x`|O3Hi%(qrLa zDQV#`lb4#|LQA3%hc1JyK3msyIwW!ZhP;g@bb-5W#7EUG^9YtzImsDWIQ|XDfhS8v1RITbrA4K}p@%B|? z++YWZGjzS)zi6eWWv;I7jy|W_!c8*ke(c~9J8Q;x)x|O%O)0qKhhC!+!G`N#DI=-m zOw^Dqu;+J8UmyXPr}py{4t{bL5!7JTPf_KJh2Y&LwnMFM_cGt)M>XFUjo;)6)FL8D zeZm*Bt}4PkPid9OPUZ#*?jPR(IJw_0 zdU-lrfHZ0L-I2meA?0QUF}xukS6WZQA9hR_63WV{>eR(mq(@D1d*@l02u?|*LmunX zr1b5^-3&e#ThiKJS-LB1<%Fet$!@fn>tb}P5%eL z)$=4wyF6fl5(THKrr$0m706jLKbNEa#ee|I;r$XRP(0bJW3eM-{m`aV1VNaE> zyF5w0XGPuMg)9$k0n-q!W!eDHrG%^Lqr$w4pKkr;m zd&ca8DOxfTh?&^x-}(^T_#Ye?CwI`6SZ8=foa{S|4mpY@Z$-)f_OwR`J{A8y4O9T% z&#{gT4`C!M)oEpD!R2&2$UX5}wgsBKS;mz|h}XC$K_=!JD{V{Pp>{e=ecn&g9m1rIdwcS3VcqMKj43-Xi=yDWz%<>YtROy^JgOaZ#{ zvsb(=*MA5XLhg~3NVk-MA-ej7nes>+(EyhUWtV4eef-f;x|U9Wwv!9$kM0*^uVf7egon?U z3W>yRxNE^PN-Q?5K8Z;ai2-%EhB&2c^oAwBj2VCV##)97&1ouEbVg#)Mf#CXIBJ*j zgl2B;&(Y0f*{o;od*!{X)*3hsfwWXjOqYR}lnlJ46F)HZa+V@KpZ1VtV4b;BTpg$Wds3dJqFO-Z#fD(k^T0T!Cky(p`jV9O`Zf(?cxb3bf zEwp3a7Ems?x8X@DP%oX|5G*| zJj>8C0)*@v1r`lYtbO1eG})flBO`fikaSnR0-TuTMqZR2N%8V77STJHmUC)-^IAur zZ$%uEc&dG(N0vi<;}5*(1Rv*E?(}BxBLJ(~QPgf0oj(w&B7c_Sj3G|0*Q0s?uSs`q zvlvdtF`c0)tRy!iX1!HaavWiN7h*Q_CUdo1S3I#n-hZ2;V8Qiu{XGkk7V_?@{C8pO z=6Xc4jDZkP+A?!MZq(JSq&OipsUqZ7F#Ng^^n z%UC8hLJXV_SJ{-^6(N}g-aMd=AoP$!KMU(+lkII!J6b zFTP_v?TlaRM++PQ|FLY1Tq93373BYW2|TU3EE*&i=+{r#5P`sI9+{`uEmoW;1sM!e za`#LR6l(o?m2XpB5uw~1pG5=}PbQ|3!6f(ce_CaBNRVm+87f>3tCs-+BbXm|uacF! z2ZUp(Ebsdg-wG?i9XZR>?Oilgr`mZ`f>Rn`M%J?a#;M`N-vwpSjotm=C%snL38KvG zAM_uirkkuWB3yBlFJ4@`nlsgw`Ramt4aN$@b=ymGvJ3n&4{R+#YB7+<21*7givx7? z?4nVkf)7dM*!^AB)9t|%K+!NiloInhS=Q{nS}5$&0@JB_dA=4?Ve}RDaqcm22o~nC zrP$;TaWji7insr&;PW?v;xLV(m zHnuG1MsXK&1J{rY-ImEkSaYAmeg)O$zhL8>+xKt0XUyTsnKwte4Z`zvnl@tfueMDX(Suc_^V#WX;%izS5mq~Yv#(YRf0HLWl^!@gC*3| zqpY5q998wUANx^oLX%JU)|EWXe5hJ(p5@Y8bkI5;+)oxv$-Z*^x? zm_JWb42VftGk_*1go{4g$v5T9mZ!|{efNlIkG!bK&gdZ*CgBc;3r)jTT}%(OeZ*-l z2(zYzDwk8Mp?|N2?Zl2UcZ;h(sSkQ4P-YG{NqKr|I!$JH@`$Xl46xTJ+IfxEd))6v zR3V#2o63EieC{sGhzUaFe=zFj5iP$#2bvsceJ^m}i`=u2mRilS6(wBx?qK)9aJly4 z8Cpl5G(=l>)pp$cR62Z3CVh2JEWhaooCjQzNSuymWiIF^MGyq!=q$~H-RU&m2_u9{ z;Nzip^>R7Hc~344Kj3Jz@Jfx@Z<{6IHSv3aqEOd#YnM2 zz=1%{Q*7=VZ}rtKX?DJ(WBXmk5_(;{R^F0J76=4c8%Z}zaK?GAMUd})CBqD{1YRPpsaej@mV$z9Mpid(lK7H71FY>_1>pJWh zVc5dnGaiv#vhbmC(SZ=V{AvElXV-gLyr{S4cxcSzZ{jG{>sO4G z%wA}sIGr0n9pmVl1$qCe_3Hq=K#<)r-oBciiNfPnW%_{=*#VR!6n+7 zO$~(<9XDNflpY22)1em>K4- z(K%&R)QJ3jhWew!3Cy8#zr;b}6cwFu9&;vE>bdevKoS)cO3e!u!3EhhgCd~@M z-ZLevV_sWHqlE{S|F{lbC49SX+%T2jERuG-h`B8IQttjuFkc{8;&Q{f@Q4kvIQs%} zGb38M+bhmim%_rQ(Kq5AJnFP(^QJ3w*n^($2RPvKAv3y77Xh*Q$^&$@3pi@$_W37! z6t7fb{k=0x&n8To82V=dF+5%DhDT7VO#D~8qGaG~}WK@iy*dSM8=uY(`l<$}u_Ieyi-w zOx(_dtyu4Hv8D^deK-#gYjb#neSZuB&zV;`=iBbX=>r!jK1)ZM{k#Mncn@POD9ms) zHZE;CE2*PcZ6LR>j+5b-`FnwJb~dHllGfjnS&``HeHd&rf12b5!%ofYcMZ+7*BRK z-n5uWJ!W(no02TAgDFAM)7H%`SYmyp;b9)(BW8=#Kz$OUuGqoR?}B7bB(|G-z(x|~ zIEp|_sT3R^e8jc7Fre|OtUq2MIbywp1-w`z^G!yVVjF>P1rJ@4$k`N?55Zq1<%CF~ zhAMtWkto>K;mt<~4EFWq*y64fx=^8^2`(XR*&@{PA0VX4`cKjgU(Z99)1~j6S_ENT z591?K?%A;4&#vAxAb5ch>&9w1B+DlJDM6{SKgO!H2UgsIe}^57@Fgan@y`psx##Jr z$gQ)lSARU9H(a-fYEDh7_xF#?oK{i8RXcPIXnfXsyY;(6HnoZ3ysEs_F6|&s(sW3e zBqTWS1tE)4;#Ir3(o8YJ&oB~kFY3LH>rzjX=i%~7Pyyq#Dy3qBusA0Q;YMvv$zQA6 zDE}qAt5MgX#wm`|h*TlAiuG(|=RJSiD^t!;4*!DFl)=2P!$&r%yXB~QUm2!mOohfsdK-IcZhj39&^)U1?tqt|!S{zIkw?@G&FNNg^-0{PoNx1m2W-QK zOC{tu@90A^i-XtI<&CokWK1pHCH@BJq%PDc(UEW2nOy6v+36v{-a109;{6Qo4PK;` zgAvkY16!|jQi!mZJN7^`w^=#zwSxyNAIs1fiGv^O?RNO7T|ovLjJtvAdQ5yaU&{32 zc{~)m`A5B9r@CMyHC}1iOjHR6f~FbAchWysxFe(CnyY4RUG<;pdBWRbLy_;@u_RE_ zZN+L4{wMr%iLATt08gaC3R(M zBa*C?b`%2kjaL-XiIpZoIov7^cPz{HVh-Bg+UVq^#=8zsjiy#jYi z*2Vi~{VCZe8?f(s07X+K{&lEdCtyJ$mNt?BHQxFN)^BaiQ8LV2v*Uml#Y1b0udDPX zm``y{TJ*(EzmE=`vm}~;f@AjCkeGGU<{#cXlMD~jT+J*|?*S=~xT~MN1qs%*6Xm!t z3r9}n?6MBnP>+^eRVtoov`$n?#S9r+2DR9j8n)96Aw=J+T;QyaGX*g>e)3%$45XBn zDJ~L3b*8lokCCGqM9R^V9l~^slCOTpEjD0FS8MG`o&jxiV5&m~{;#9XYdwSol`nT( z`;yr@tG2L*Zw*`gJhysj#1Jl9K}0V%Pufe;lw2NI)ALy!$LT? z^|feSwxvm{&~&eP zulzS)yoVqA)i1PN@Tg{*p%U0IE@O*Q1m2PTqVBlT2<>;`?Ssu+W)J+g1d_LlsjFUHmzGzSug!h<9M9Z? zS>qx;BF=FpcFzOCJ3~@M&nHE9M`6R?96Z?^=HfG-O)+{b*S+Gmp*=H?HQpLFreU9@ z^1PpZ45@l)?TYEd5fOT93LP{%Ot$|rmLRe&_DiPTLKE++971mbL|YT1sSBMl>KDk| zIlq*AGpdS0P+7B(w^({Yr^$e!EtD)pFMcGDeRrq~$fgh*f}U72ajZf&$dD@lEvUuIZCduaMo!LMPC+2 zs`bpdNEMeYV=D-4%vi!-m~}oT2lWLI6eFLD?0ok9hil~Gj9h7GQ=$eD~6RZ#KQuYN^KkMqyst~bHIyWfsaUT|t=O3)uv&MmI&kapR7PM;;F zCMtT|#-l8Fz|+aIeqp=QYn!fYhD)s9tY7&9FM@LY>9$hA{xq&|2hul6d0OC3DXUc- zm-=8zFu~<(e{m6mH-z0epB}`he{JQ*A_}QY54H0yB1MM{>E^R?RL z<-Z|_rqJX)jk+erzg`>+s?EtCf3>jPXvSE?o^JO& z&dEs!rMM1KXX@*ZPPZqI=RZtrx;HJDoAZg5($GgzKOW?Bg{1qeW-rNAWj?Z8E3^Wu zgcFz^M_SnaGP*}K2hp{a?--)B~As?T+$UU&637AYSgG?!MUvrsr`iztGo zU^C&pE48NTOk?|pP`K)OM>*&pAi_JF^x@#pYY$F@UiiNM@o(sOop4o@4DaNt-^UIU zg+_}6-HImg4kU1=*AWbXX?rTtbtt)?R8C)zgVoz6$E^rqX{qo{es)@t#Sh=2G~3er zdDBQa;w1}$Q%)u=uMkTz?-N- zwlm&9d;r*`jg-vRs*#v&6(|~ZHAe69EPk*I&M@-5)h{3g5&nMOAnsDYUJ!}0;jHk- zL*r<(1xpmCQd85;yx2uLKwoULm0DepY`u&-qTjWseHXk}03}xKsD3gS9cd=f{sUA< z{{t*N8Q^yIT}UzUV&b;~htd~GvI-A=vx`&i~F`~*j)e=}i ziB%pcEY37+Lc^2Kt7MLsVrN7v9(DT_aT$F_K^I5F+gnuOgR;#Dasb`R0gOhJ(=V0qm_58%Zi05VE8|O#~FxqnSj*^TPRbq91wGSs6?) zlcZc>TIDg{eEg$Myd_I4WBFqn>=Fy4ZURO+13kv;5QmklEpd9}7d?>6?liMOIofbD_Bd|guV3z`YQ#6->6$H> z;%OY*LRmr$*b~4>huc_DL7W-);I}+kk^I*zjm(Yby|ceUFm1IRB;0f>V`!jbS|$U? z!0k%;lC4eC%#gkj4W73~%0S%rMBp}|xB_mk8$a?4PJ$jK&^+?Z+=B5a(Np8V^q8HX z0-1whX0g{hi`T$EO7xop)r(jK)^WOG*e(OQS(L)vyxt zjkYB^tO2KcvVhx!HeU6dm>{wAo}=|s12@3s1loI(?NAn-bvD?tu!cJZgA_`aSG@5skeFirVpV9dM8=S zp=FsM_9TQ(W0o#B0Unvb@_O*g0OP||i*w~I6VL124YJU4^b3TU{F}z(UB4ij23}uZ zhFsa<)BL1om9o#&ZFTaI6HwlvDi#}+H|Hq%s4`svylYY6 zI$&&zUYr}kr`W6vB{L6EQbiI*y_ot+~~FIQ?h`Q}8Z!w5G6g^c^sNH@8bRk!RT zq)e>V0{KGV2}f>_1K-W7{pV9`(no@D65juOgKu z4Ga9NyBl&?#;tzVek?fN))Z*;26|4IhpD1OPw+cr=L_z1HDmem7+7D^JNyGMsGdpk ztR)GKQt@V~&=!O#Rd0j0cb+SM(v&y%UOU262zI{`xrQH66T?|A-A>Aj_4}3rs7Eq` zP8T=ou;TTniryKyr0Q;3$Cey5E>3YCeP}r8nQ9Ta{>H|LUHnGrqNGA<__av4!CRjTt-NvsR0FI{bbX*pC1=?%D=cR7Ikr{2+ z#gj9VcC0syQCk^JqZhy(3Hq<(sbSx4vQCed;yfpdshr@m1}P8IGMp4ZJ%!HDUkDSU z0d(NAt_&Vka-6~}0G3Ln%Ul~$d+<6}`*p?6t&|33@W?-a?!-CvStWiS!Koi?<-PV@ zx!gtzZ*-{kp=e}Dt_X(=`DGHgp7(@wb^cV!0am(q%6G?(+}|9i)_3 zWgSv`G@vEE_P8fLsv=3!dvQ}KbK`Uv-oBzbJ%MFq75%*P!x;4VzDD&1I?nPi%j()2l|aIk(xw_K!%OFuX4DKN(Z z_$}i+ol?3b;xE1%=!k*sO3~J@pvXo4Vq=qiP{&AdAVfeN-pe_wKfq#3w-bi-kv?bY zgL1g`#MVS!Ez<87?0|ZWse=_gJaN)fata*K7=ils-2(Jw>rA}Mr+C(JVr7tA?`d1E zRw?vtCY(h!(v9$xPvo=4J8jkl3RGynAv8-3!NYQL;>G!XA@bL(6wdiwvCDR)D@J8rY$hnmH@U-^rbaQY)(_b1pZp zB17uShPBi)8*as=9`^uR*fhH{Y8kLmJ*FAHu;>MB2N?I`YwhWs?RoS&|B_mCxv&jKYL8+FE zyDU*K-cvsyZ4TYZo1YB~oC#)cmOhUtKU$wuUiGBM{{!H(LL;bkR;eMASUvvQ$ua8m zg=>Wseoa}USXyu;)p9Fz-o4sL)-0P_-6E{SapfZ)p7DXVN`Bw+wJ+S4t8y`zD?GztzpkKU>AItAMWr1^eF<=@CKK$+Jxb zLEX(pk>`o$yI}sMUp@z{CmlQgBm5=<+4xo-uj5@0!;_%ZF8s3K>{J_x{5?JE$F#o~ z&7z5#?)bc0av~=z&FCwSxtme(X0-E_l6$$^2@)x64$23hu60UIUh&b-9$gQir`J3+ z@f*Y1u8jrIFXWj3kN{AQRmVMzc?ZLf6(0+01MPd8PP!o{3;ZXukMq*IpND@7FLacj zP}QX{&v64nB8>dVjn|)3T;8+r{{T_aFQm1%w3_lPZi-l!Ag`hHKGnQntm4dNJ3FJ_ zt*#-uwn^>eSt4@l5(n>*)84%N}r%$S_rhRp+6vT@M_V za}*iis)TWs~)u52?VFiE3DB$T{aA!cGO}e?jvk8edcO~=CgcdsSO{&I;GfRtgw_* z{?mM;`PQ6gCb<6qj1!1{9ZH=H*D)vSpXprmS2U5+UGl`@{0Nr!(?*fbtTzzE1J@ln z9+lc^8YSkA>33|cHM4WNF_RhhuO#?=e)gLC#*o+>P>uxp(u#5&cw@{0A0ZffkKAk_(Y z&1(`QR+W#vQ;~p0S6+lwg{?M9r4XtA(WRwy1vtIbqIW) zBGd1Nd@(+Ne~n^E2}S5DT2RAq3_v`Qo(?LUu;gdHdgiS+)HKq@73^7zoxN)cRK|GY z(AQaXpD6;lyU6g(*P5AXi-WT|i+_m_Po~&d>-v0`X35mYG;e~)KZSw%3iQoW#I}Ab z_;C83oRY^E+C*g}05X*$JwBqoa-DX)dT+zo%>Mun_5D4G`7WZB0QF)20NB@;TAGfi z^=VI<$(1zwM$~+3q}l_W_ba>T4o~O}begn{bKHE&W1L(XD&{jW$qi6VwrDG?{ zk(m89n(wUqCmydpr!~xyKvrC%jsXRUIrhaAR! zH7efRpyESXZQ=P{$c=*c!*r;l(B~Jc4adl&WHAB z*1L#~25Si?dlaUuZ%ZgK0;jJWR&=Uh%Hcpq(^hNu0MtU00YP6TmAv@H}=JZmeaN_U;v!|01!RvslB#_?g?$@jbw&p zD(Kw^Jwu^3Gyeb{&UnkP{VCoaW)=z0%6S;9 zD@)x+RvtQ?Btzc3-{XbBUkvHC`@a3&;yrMu{ArV>3^ejvFS?2l>k}LRb;5eGzRFQscV+S?u1H^iK7eePv^Pz@NOt$`6z;995 zSBL3WNo#T%TWj z$5zKp$NE+kwbjm%slln)oJ|8~Z)P3T`w>?r#F!$hZRZ>4;lB##O+8V_(@xCRfqqfNPvio30nHYX zl|UM;gsHe#iY3QmS0Y+!$5vxX8yrz84mwd%Djac|D?6H%WzA5Uug%<6bmZXHu``*S zJ9E~lGaU34Zf>TkFs_7P#-3^!S0e(X&uVKo7^X>`G+8*?lUZ|1v=LhO1Y)tSqbEF7 zp%f*QHganc^-Hf#htV#W{*kuZKp>+ zi#1E5*3us-J#xh4{zLVy+r_tOqxepALIkto1wF}br}E8nKMx~oe-vvG5sb%{`tBe7 zdav;hTaNEiTNs!ata8#ftb}qu0sPHz(p=I=>a8wR*}%;wnRMg$Q{^AlgSIinMHeU! z@iEE#YCDfG9E{??& z?;2zeBq3P?bQ#Y${3_SQ9SP!!%T|R??gy7J{{Uq4KY{*rL%Vb z;AMfxAI`mN#QJ(&>3&d@WthhB&^5g9K zos`-6WZesWPb2vs&YC92Q(2xQos&j;i1}-GNrpX-Kc;A~Tk*CFG}DDJdX z)Ky2HVBTi$Sjoc=HWaqswFNI)0d`HOC3!Jr?Y6(kzg<^}qyI9q{Vuyo;-5Y_b(>euw#2PvTbEnoZ9j zM|J`HK^4U~^3<{27tITh_~Qpif>ZoA3J=hOQt2^>U|7f99SuX{5M}V@)fr=P7#Q{D zt!cwBrh6!^EJs7q!+b2?wB(BO&x_DNo)o&&ahdL}p|(~h_!Dz+^&-0~o7T30Vub$y zq)vRe!0n!=@UAQ4P9pHWkEf~mjO`|Ukw5zNz7bwX=fyiFcw|Qnj6mb>|_rPRdVTTGBeJobu7w?tBuleXQL(;mH9& z8NkhY_CpMg{?+534daeWo2d5?>Q5N#abCS@`>oSGK*6qR+nrQimKk#jO%o)D83@4y zaa=w2m^Ay(+=9@zA$yN%^adPstjqfpxr~AZDsr>bbzya(%S1yUW$9KFR5RPlXF1+J zQ~X`NwKQiSR_8G~txSTWOx2jw9=&Q?Eh1>fMHMmt2c;FA%~CRNq*k5w;3zl%3e1$} z-mgx*>pEka-k!!49ob4#mN}}g?N(;%Ra@y@2XgT6#%hr)g5p8i zvo56=Ijtv*6(=>7bgIR(oul=uRwT}sXi~&heDZ)eJXP7463lpB!m?#1X$W4Fq|1sS zavbGFG8m>8up3~_a3ofZP-K<|BDVA&4&Lk2GFqUH+B5s>sdz~L0L1$W^vfMHL(puR z&r4rA>Q(*y+@t3s`=itFtS2?HIw2OdX6B7;HI}Qb>eELYO7JV?A9C)-G9l20bRSV@nEnxiiavC6q0r}M8V*3ka|V;l^DjMp_7^0stUmo=G2-gP4s zmpAZRC+{DhN}#UN`AHT8k}y$Acca^x*a;+&hBC#oliwB4s4qN6*0!{R0G@EyPpnfo)Xbw2L9IW`r{ZZ@F|Ud-CX{rx{>4W z2u~4&&i1^5H`|Z_AMWO`pE_D2q7tUvn>@lF2l$rPYs;ZsF?B7@)+7$6u4tWH1Pz%`)X6J?s(STFR%3nCP4u18Uh;=RZ1 zG00Joi2x9I>OUHzdv9xajUC0T62`*}vPK&t-->?K^+Y;Rdmaa(c*0FL5xFk_KXW{f zr?q-+o2y%DHu2nA${i1(LA;X1 zmge2rRX%AH90nfs)U7pib4hYL9hZP6W3OK*04Ux60HK=hJWzqWI?ew8JmrnPhj6YF zz!5rJ&oKk$P)W}@&TF;TE*11EfJiuW{{Ya1aMu1I*+u)MU1CO)LbNBV%@M6DcUZ0W zGX+H%Vt4|fYa@H2+Tnp{%E`&}u6JM58sb#Z?VCG@^T_o6@A%gpNOLo~jO?s*J{h|H z*48J#c$mb4WwAXJcR!Eyu3O^8(7f>tjf!%%wx!$s=RAIc^{)5BwrOXl#yADIU#D|k zN3Gu-OI_5p0!b50LS+8{0`NaUUDz0V*X(iQs%dWLoTeg~)aQmH^{;ODTQPk-(DGLR zH>efjY>h4Ch`mdkes%624D3y`AQ7HcafSmsT zEZ2?SDQLv8oH}w1d%m?7pQ2wxe~#Wo{{Y8LctSs!4m$DPw4mFOGNy}r@5$jQzK&3xhD%h{}aQ~+Px1`OWZ*U<4k9yiY&4P{wsZ$;SCH5&}m zv&Ar)<~oXZ7O656bquDt+3h6LvSBRgneI%(=)#^B7jQqW5tn6!ZHsuJ;;kWRtnfun}p>)!g zRbnIQf!?rg7-Wjtoy@A)Jqj|M8m?Gomf^)#69HXGXmYYwBNd%=4{Ee#2fbIBmAx@k zv=cL);Km4J$GuY2W4N|e`$RIrWL@YbLc5pLRF}nAkPb0b{7vE;opmgY)Ji6{i_2+4 zb1BbKItp#Os{%=hJ6mfD zNNjBxg}f*f0tY9%eNS^qaw0K>AxHy*=}|P`FBOfeYGxQqE^<0naYZ(DJ|A!GO$S=h zcmDa2iUNA>7zgtLy#D1{<4<^Y{N+lOJ&iNNz9pMa@y4BW1gIeAq zwK{K!br=)?tmaf6&(HJCC8XLd>jcgxPPGZS=E!9wMO&C}$x<^<7H=Gk%-m<7q__(b z@!q#c=55@F77OzZYP9ndINSiuESD+=QT43pAAmWm`Qa@tSF@6L zvFSQ~xMd2ZvVFVO+;;_rayj*{70@-diuqxcnl_B#m6U=#i0xjfWqPwjrC1d`#d6h! zwmK=#xvMOTbuO=lC(>j=B~Ofh_FUS+9VqzU+Y}whP0{g=le{XmH^p#5AnC4 z{Ld2y}kPjMkDxZzi{YuF0xblAa;4KC6~@|8=?E)U9`Sk@9<5!(gJ8?#4K)JC76 zUTO+Jc@*1OFS+2i^FM`oMba2;2yOzcb-K;*)Tdbrl!hU;2(!X3_}3w>zAWU&1cP3- z9+$I*r;muM?Pq?6B9Y3nD`1CjnRCuTucJH?o=%T$S}=_9uHZd!&3M0tbh&2KUOR~K zDijJF@VwW(>5+(RAixX2JW(x)zNUrN8s1Py`y=|(0qB~tEle=ji$-v zw#?kfqvingPDD{A<>28~qthYdWTs&4^^*3ZD4Er;}MR+%#l`Yy;_6(p~Bg zsIF;7$5dx@N=-ELNLr)w3KDx#u1?zQRK@?IP4 z%fxDZ3H~qTSXHW_xxFebdl?=J&{Iaxf8is%P;TxA`ZP>A3VI*MuhOj|kRIDXt(J((c2m@DF$c=qTdJ_?4R=?XviPUMR$BTW_?YSY zZFK}4N}sy_078G6uQk7yamQ+(!`ke+2BqhS{QFsu#dU5^6GVEi*SI~6VK=SO(@s9^ zj=Wr}jsVYG8ml$C%H)iFE7J5W6G`z8h2wxIxwa(zj`}bJioHU=qbVd1de;xBS+YE`Qw}rGn59!v+ac^Rh%^(c3P4loA_v!doQqn6-2IURtIK_E|j;C{I@x>H_Zg>P| z@T-vcjpXqYB$5>Y_9#=q1B};~Q=gfhoH=<>>UQJi7y`B;KOoI>))zsL7RT2$u^qt2 zT<4`kUvio=b-xK|THNC5SQabLFfL;NF&>Tm;q|V19Xd-XnjM(fa52#PitSo#uLOfs zrG;cUBPTVdw9>>ncXu)tG7}wZ8&=cB$iVAeB&rk~cI{agw=mkS`A#{kZttP0ic3=; zPSL)@x<}xe;q|?4Ek|CO!rEix#&=61@)?H!XV8B#YeU9-?*Q(IWC@xbv7G028~6fmJb?d@F^{_hXze`Co0 z(7jF9EI;L(eUN=>ZvkmDT-&Cze5Ea78MK6s-`_vdxy?%UO-A~AsZQpRv-BN@Y*rE}JL-6S z{H%OSsxwI!=r|jrfK6mgFj9NgtUz^WNvXb>r51=E zHfHjWkKrU9Ygw65OA-JBB-a(<4OTryXrs4sV2ly)~KU&9)_eMxO*6FD$qn1%|c4e2m0Toq+>$tyj)%tImhOOf*m^ovWd@q?UuSIw~M z1si-)GRQvZ9dli-f#Xd?IF9#kmKWu9UP%0sh)>;_?8fCPC%RI+6Pn_! z{77yP$Xo^YH5K=ZmK5A^kT5XV$*u)Tr(?U?Q|@$n&ar2tY4ToO%e54Nv~pPb4|?%0 z75Ljz)invN;bevYKv5CN106H#^siBW55?k70p4ESW=rXzk)dWhL$^2!>C>;heEn|W zCPrbvAe!o<6unGpJF}#)wel^6#LKm@z%|-<55Us+ru{WdMmCQ~g}zWm4Z{P_f3xa; z3Wvbn620-I(p}u6L83(1XXs`l_*eR#)$eH>Hdd<|Dn$e-fCm9o5Ady}E{Mr4tmk}L zsjicxEv2^9mUS_pU*JyX`d64a0Z*lC>)NT+wOOvRe7x+49>_X=eSaFjyp$pno`SmR zLzX8cS@Sa^SiD&Frg$fw^@*q2HQu2TIu5ucM_ziGw|0KhBuvUeRLe>%~bHhMRQ>|(RCQi?J_86E3h;%_S5iT)Xp6Dr7W=DRZI+o0z8sD6e|Yx#3RAA4tcs^W$=#6Y zdRUDXNstVE*!@1WwVI4^P+r}ooure$L>v!O*jE#7OzlZU89MH)q0N+J@IQJ-{)7HC z%E&`Lp{V{y+JCG>CZo{ZQJ;p z!Fs$>KC7o(={H?>Se?k;z;^trsnvWnVXNBtQ$o2Pb<|;$`x^5-f5G>^MXgaPN?J0OqHO29X453P7TDn3ZQf7(2Z2yWZ!71Kf!?FGV6p&y>Fw!T5X6Oe zJXCYLmf)?iNLjitaOXa_t8vSIK2>yIL$1b!kU(!G~y4D zo;iv8p@+;qzMopg_;K*MUlEw~O<^R1Ne3~;epk8w00sB&+Pd!s{4ckN$8q8*;<wZLz)XGTPn?D_E_qqDWwgLL_J~ss~^zl=05C zE$u(EAaCDJGbta!o}Z8Mu9s20k51F)yqoSqg^15zLHO5<-f9zH-A#FN+ohGaHamI& z><1OK3VhD*p^a(ss}V^cRV4JRd#OaS`CYoV(x$w+3J!W!Y#XI<$1)tBVO{n(nVV7ivp zJhziEK*z2-X1!KPH^NGumB~*AH3{2j%%^es)-g1aH#{=?A&N3a;4xi^e~?!trbHu+ z9~dK^E2CE|fYwr4nnCDDUni|OPrXB)dQ^&WP|HPxHDwJ{Q&uB24AB->nzd`gx6-Pm zS-0W;01D1q8pm?zqLVaMMyC%fW}}NumhII_?Tqz3>!AA_8%Xko4NDYj8h|@i{?|<(ovQ{K7fs$*D@eZ-}nU>lk`=mKP?KNv_nvdD-+BnW# zhAW76CGy>CqLezbp0xTR!pMDT%_kK^$VVXYQOQ54t@%tjg6eIcoQlG^y21mLe>RSzb%ZtcNR zRa0$lQbLius`%WQhL& z-C%3m^oDsf%V#93JZi)@e2j`wihGqN*2hr{dNYtd-E$wap#FHNZZv4FVr93ME4Wx- zqY<2cBUurvl^MaQmN4B^RXBN(&E$EPjQjesPTXS^9RaR?_WL?>QCk zi5AS5*^rTrIv=R-T-L4N{TBJ!Nwmd{`k5@>$tUW(ujnf`P2WRBRa(sR$aM&me65r8 zsRhiJGNdsL?&K<}^(5DQru;qdeC_r*r@FVglz!`V;xvT&ApWAYbk7KB9vYr0Y$cZF z>}NL;y5wv+u1-M(TE`o|ql&~~mq zX^27xTvEuT1_x@bDQfiT!9iIZ6O)s>GUqKLg(UpPpJ7_MbRdH#ALq4Hj?QTr?sgv` z7)PJ>k2oKIt*tu3QdNrJIn7nfKl7NfD$>YVlm!_D}s%PcjBc-&w7+V2n3F_@op-4kzA}0 z=B~gw=AcV@w-Rj?n`Ol^U@F85^VBE{908M8BTb9%S;?D45>ZJ-W@u!CP6mo8jBw_g zGx<=wgm9pqD59DZ*yOGquH-X0<+=|+S=SbdbDU8{c1J^sMpfi~LT>HtRFTZBiYTpC z#xHWm_Cj2I%-w23pv0qw{V1ZPwpxl2^hjPZtj8y~u6I(<%qJ-@z#IZ7qJ+gHtaA4d zN{m(6PtvmH01`1p6r@!>$N(5M>>du(8&vRxtGeP_2|mcp{q)A={JK#^X17M~W^~bH z0h5}xp17il(U}dnoIw$C2?MaND)AqSueCX(wu#ycV8wQ+!EdReicD*BU*SHQllYQr z#>Qcf3Jk?MJ(iq5vily@%* zW2YSe7_A*2!?0gl8^)E8$Uap;_fN0A6jv0dB^#!7C`HcA8biX!7n|7$ z&tEK4zu_2e*SKEayXX1QMMt)ua@{C)2JpOW+}*z4_XcV!j}5~NTdn17>OO7fA-}?k zDJrvXaCD%S#S42jmTxSuTyjXQ5vK$4hd){ zJxMfCSvfOh6k2GJ#vK)hp&6+D#coOc%5&%_qNd}q7xr=k^2p$NaB5L)5Ci4TJ$unb YJF!_3Dk^ZQ2hx^`DHJUfQAiK}*=>JuV*mgE diff --git a/text.json b/text.json deleted file mode 100644 index c65dab33..00000000 --- a/text.json +++ /dev/null @@ -1 +0,0 @@ -{"207": {"inputs": {"add_noise": "enable", "noise_seed": 940648779231253, "steps": 20, "cfg": 8, "sampler_name": "euler_ancestral", "scheduler": "normal", "start_at_step": 0, "end_at_step": 20, "return_with_leftover_noise": "disable", "preview_method": "auto", "vae_decode": "true", "model": ["563", 0], "positive": ["505", 0], "negative": ["505", 1], "latent_image": ["464", 0], "optional_vae": ["458", 0]}, "class_type": "KSampler Adv. (Efficient)", "_meta": {"title": "KSampler Adv. (Efficient), CN sampler"}}, "281": {"inputs": {"frame_rate": 15, "loop_count": 0, "filename_prefix": "steerable-motion/AD_", "format": "video/h264-mp4", "pix_fmt": "yuv420p", "crf": 20, "save_metadata": true, "pingpong": false, "save_output": true, "images": ["559", 0]}, "class_type": "VHS_VideoCombine", "_meta": {"title": "Video Combine \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62"}}, "342": {"inputs": {"context_length": 16, "context_stride": 2, "context_overlap": 4, "context_schedule": "uniform", "closed_loop": false, "fuse_method": "flat", "use_on_equal_length": false, "start_percent": 0, "guarantee_steps": 1}, "class_type": "ADE_AnimateDiffUniformContextOptions", "_meta": {"title": "Context Options\u25c6Looped Uniform \ud83c\udfad\ud83c\udd50\ud83c\udd53"}}, "354": {"inputs": {"split_index": 3, "images": ["207", 5]}, "class_type": "VHS_SplitImages", "_meta": {"title": "Split Image Batch \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62"}}, "369": {"inputs": {"ipadapter_file": "ip-adapter-plus_sd15.bin"}, "class_type": "IPAdapterModelLoader", "_meta": {"title": "Load IPAdapter Model"}}, "370": {"inputs": {"clip_name": "SD1.5/pytorch_model.bin"}, "class_type": "CLIPVisionLoader", "_meta": {"title": "Load CLIP Vision"}}, "389": {"inputs": {"images": ["401", 0]}, "class_type": "PreviewImage", "_meta": {"title": "Preview Image"}}, "401": {"inputs": {"directory": "./ComfyUI/input/", "image_load_cap": 0, "skip_first_images": 0, "select_every_nth": 1}, "class_type": "VHS_LoadImagesPath", "_meta": {"title": "Load Images (Path) \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62"}}, "436": {"inputs": {"images": ["558", 0]}, "class_type": "PreviewImage", "_meta": {"title": "Preview Image"}}, "458": {"inputs": {"vae_name": "vae-ft-mse-840000-ema-pruned.safetensors"}, "class_type": "VAELoader", "_meta": {"title": "Load VAE"}}, "461": {"inputs": {"ckpt_name": "Deliberate_v2.safetensors"}, "class_type": "CheckpointLoaderSimple", "_meta": {"title": "Load Checkpoint"}}, "464": {"inputs": {"width": 512, "height": 512, "batch_size": ["558", 5]}, "class_type": "ADE_EmptyLatentImageLarge", "_meta": {"title": "Empty Latent Image (Big Batch) \ud83c\udfad\ud83c\udd50\ud83c\udd53"}}, "467": {"inputs": {"sparsectrl_name": "v3_sd15_sparsectrl_rgb.ckpt", "use_motion": true, "motion_strength": 1, "motion_scale": 1, "sparse_method": ["558", 4]}, "class_type": "ACN_SparseCtrlLoaderAdvanced", "_meta": {"title": "Load SparseCtrl Model \ud83d\udec2\ud83c\udd50\ud83c\udd52\ud83c\udd5d"}}, "468": {"inputs": {"strength": 0.6, "start_percent": 0, "end_percent": 0.05, "positive": ["558", 1], "negative": ["558", 2], "control_net": ["467", 0], "image": ["469", 0]}, "class_type": "ACN_AdvancedControlNetApply", "_meta": {"title": "Apply Advanced ControlNet \ud83d\udec2\ud83c\udd50\ud83c\udd52\ud83c\udd5d"}}, "469": {"inputs": {"image": ["401", 0], "vae": ["458", 0], "latent_size": ["464", 0]}, "class_type": "ACN_SparseCtrlRGBPreprocessor", "_meta": {"title": "RGB SparseCtrl \ud83d\udec2\ud83c\udd50\ud83c\udd52\ud83c\udd5d"}}, "470": {"inputs": {"lora_name": "v3_sd15_adapter.ckpt", "strength_model": 0.1, "strength_clip": 0.25, "model": ["461", 0], "clip": ["461", 1]}, "class_type": "LoraLoader", "_meta": {"title": "Load LoRA"}}, "505": {"inputs": {"strength": 0.5, "start_percent": 0.6, "end_percent": 0.675, "positive": ["468", 0], "negative": ["468", 1], "control_net": ["467", 0], "image": ["469", 0]}, "class_type": "ACN_AdvancedControlNetApply", "_meta": {"title": "Apply Advanced ControlNet \ud83d\udec2\ud83c\udd50\ud83c\udd52\ud83c\udd5d"}}, "508": {"inputs": {"image": ["401", 0]}, "class_type": "GetImageSize+", "_meta": {"title": "\ud83d\udd27 Get Image Size"}}, "541": {"inputs": {"text": "\"4\": \"\", \"12\": \"\"", "max_frames": 8, "current_frame": 0, "print_output": false, "pre_text": "", "app_text": "", "pw_a": 0, "pw_b": 0, "pw_c": 0, "pw_d": 0, "clip": ["470", 1]}, "class_type": "PromptSchedule", "_meta": {"title": "Positive Prompt"}}, "543": {"inputs": {"text": "\"4\": \"\", \"12\": \"\"", "max_frames": 8, "current_frame": 0, "print_output": false, "pre_text": "", "app_text": "", "pw_a": 0, "pw_b": 0, "pw_c": 0, "pw_d": 0, "clip": ["470", 1]}, "class_type": "PromptSchedule", "_meta": {"title": "Negative Prompt"}}, "544": {"inputs": {"float_val": ["548", 0]}, "class_type": "ADE_MultivalDynamic", "_meta": {"title": "Multival Dynamic \ud83c\udfad\ud83c\udd50\ud83c\udd53"}}, "545": {"inputs": {"start_percent": 0, "end_percent": 1, "motion_model": ["546", 0], "scale_multival": ["544", 0]}, "class_type": "ADE_ApplyAnimateDiffModel", "_meta": {"title": "Apply AnimateDiff Model (Adv.) \ud83c\udfad\ud83c\udd50\ud83c\udd53\u2461"}}, "546": {"inputs": {"model_name": "v3_sd15_mm.ckpt"}, "class_type": "ADE_LoadAnimateDiffModel", "_meta": {"title": "Load AnimateDiff Model \ud83c\udfad\ud83c\udd50\ud83c\udd53\u2461"}}, "547": {"inputs": {"beta_schedule": "sqrt_linear (AnimateDiff)", "model": ["558", 3], "m_models": ["545", 0], "context_options": ["342", 0]}, "class_type": "ADE_UseEvolvedSampling", "_meta": {"title": "Use Evolved Sampling \ud83c\udfad\ud83c\udd50\ud83c\udd53\u2461"}}, "548": {"inputs": {"text": "0:(1.3), 12:(1.3)", "print_output": true, "num_latents": ["464", 0]}, "class_type": "BatchValueScheduleLatentInput", "_meta": {"title": "Batch Value Schedule (Latent Input) \ud83d\udcc5\ud83c\udd55\ud83c\udd5d"}}, "558": {"inputs": {"control_net_name": "SD1.5/animatediff/v3_sd15_sparsectrl_rgb.ckpt", "type_of_frame_distribution": "dynamic", "linear_frame_distribution_value": 16, "dynamic_frame_distribution_values": "0, 8", "type_of_key_frame_influence": "dynamic", "linear_key_frame_influence_value": 1.0, "dynamic_key_frame_influence_values": "(None, 0.8), (0.8, None)", "type_of_strength_distribution": "dynamic", "linear_strength_value": "1.0", "dynamic_strength_values": "(None, 0.7, 0.42), (0.42, 0.7, None)", "soft_scaled_cn_weights_multiplier": 0.85, "buffer": 4, "relative_cn_strength": 0.0, "relative_ipadapter_strength": 1.0, "ipadapter_noise": 0.3, "ipadapter_start_at": 0, "ipadapter_end_at": 0.75, "cn_start_at": 0, "cn_end_at": 0.75, "positive": ["541", 0], "negative": ["543", 1], "images": ["401", 0], "model": ["470", 0], "ipadapter": ["369", 0], "clip_vision": ["370", 0]}, "class_type": "BatchCreativeInterpolation", "_meta": {"title": "Batch Creative Interpolation \ud83c\udf9e\ufe0f\ud83c\udd62\ud83c\udd5c"}}, "559": {"inputs": {"ckpt_name": "film_net_fp32.pt", "clear_cache_after_n_frames": 10, "multiplier": 2, "frames": ["354", 2]}, "class_type": "FILM VFI", "_meta": {"title": "FILM VFI"}}, "560": {"inputs": {"image": "videos/temp/a0d6e31f-c958-4d23-92f6-d9f36f85df2d.png", "upload": "image"}, "class_type": "LoadImage", "_meta": {"title": "Load Image"}}, "563": {"inputs": {"weight": 0.61, "noise": 0.3, "weight_type": "original", "start_at": 0, "end_at": 1, "short_side_tiles": 2, "tile_weight": 0.6, "ipadapter": ["564", 0], "clip_vision": ["370", 0], "image": ["560", 0], "model": ["558", 3]}, "class_type": "IPAdapterTilesMasked", "_meta": {"title": "IPAdapter Masked Tiles (experimental)"}}, "564": {"inputs": {"ipadapter_file": "ip_plus_composition_sd15.safetensors"}, "class_type": "IPAdapterModelLoader", "_meta": {"title": "Load IPAdapter Model"}}} \ No newline at end of file diff --git a/ui_components/components/explorer_page.py b/ui_components/components/explorer_page.py index 9053c71b..4a593f21 100644 --- a/ui_components/components/explorer_page.py +++ b/ui_components/components/explorer_page.py @@ -229,27 +229,27 @@ def handle_image_input(column, type_of_generation, output_value_name, data_repo= output, log = ml_client.predict_model_output_standardized(ML_MODEL.ipadapter_composition, query_obj, queue_inference=QUEUE_INFERENCE_QUERIES) - elif generation_method == InputImageStyling.CONTROLNET_CANNY.value: - edge_pil_img = get_canny_img(st.session_state["input_image_1"], low_threshold=50, high_threshold=150) # redundant incase of local inference - input_img = edge_pil_img if not GPU_INFERENCE_ENABLED else st.session_state["input_image_1"] - input_image_file = save_new_image(input_img, project_uuid) - query_obj = MLQueryObject( - timing_uuid=None, - model_uuid=None, - image_uuid=input_image_file.uuid, - guidance_scale=5, - seed=-1, - num_inference_steps=30, - strength=strength_of_image/100, - adapter_type=None, - prompt=prompt, - negative_prompt=negative_prompt, - height=project_settings.height, - width=project_settings.width, - data={'condition_scale': 1, "shot_uuid": shot_uuid} - ) - - output, log = ml_client.predict_model_output_standardized(ML_MODEL.ipadapter_composition, query_obj, queue_inference=QUEUE_INFERENCE_QUERIES) + # elif generation_method == InputImageStyling.CONTROLNET_CANNY.value: + # edge_pil_img = get_canny_img(st.session_state["input_image_1"], low_threshold=50, high_threshold=150) # redundant incase of local inference + # input_img = edge_pil_img if not GPU_INFERENCE_ENABLED else st.session_state["input_image_1"] + # input_image_file = save_new_image(input_img, project_uuid) + # query_obj = MLQueryObject( + # timing_uuid=None, + # model_uuid=None, + # image_uuid=input_image_file.uuid, + # guidance_scale=5, + # seed=-1, + # num_inference_steps=30, + # strength=strength_of_image/100, + # adapter_type=None, + # prompt=prompt, + # negative_prompt=negative_prompt, + # height=project_settings.height, + # width=project_settings.width, + # data={'condition_scale': 1, "shot_uuid": shot_uuid} + # ) + + # output, log = ml_client.predict_model_output_standardized(ML_MODEL.sdxl_controlnet, query_obj, queue_inference=QUEUE_INFERENCE_QUERIES) elif generation_method == InputImageStyling.IPADAPTER_FACE.value: # validation @@ -332,7 +332,7 @@ def handle_image_input(column, type_of_generation, output_value_name, data_repo= query_obj = MLQueryObject( timing_uuid=None, model_uuid=None, - guidance_scale=8, + guidance_scale=6, seed=-1, num_inference_steps=25, strength=0.5, diff --git a/ui_components/components/video_rendering_page.py b/ui_components/components/video_rendering_page.py index c7c351b8..f949796c 100644 --- a/ui_components/components/video_rendering_page.py +++ b/ui_components/components/video_rendering_page.py @@ -130,7 +130,7 @@ def sm_video_rendering_page(shot_uuid, img_list: List[InternalFileObject]): key="creative_interpolation_type", horizontal=True, index=st.session_state[f"type_of_generation_index_{shot.uuid}"], - help="Detailed generation will around twice as long but provide more detailed results." + help="Detailed generation will be around twice as long but provide more detailed results." ) animate_col_1, _, _ = st.columns([3, 1, 1]) diff --git a/utils/ml_processor/comfy_workflows/ipadapter_face_api.json b/utils/ml_processor/comfy_workflows/ipadapter_face_api.json index d53a3ec8..16042078 100644 --- a/utils/ml_processor/comfy_workflows/ipadapter_face_api.json +++ b/utils/ml_processor/comfy_workflows/ipadapter_face_api.json @@ -127,12 +127,11 @@ "inputs": { "weight": 0.75, "noise": 0.3, + "weight_faceidv2": 0.75, "weight_type": "linear", + "combine_embeds": "concat", "start_at": 0, "end_at": 1, - "faceid_v2": true, - "weight_v2": 0.75, - "unfold_batch": false, "ipadapter": [ "21", 0 @@ -154,33 +153,33 @@ 0 ] }, - "class_type": "IPAdapterApplyFaceID", + "class_type": "IPAdapterFaceID", "_meta": { - "title": "Apply IPAdapter FaceID" + "title": "IPAdapter FaceID" } }, "37": { "inputs": { "provider": "CUDA" }, - "class_type": "InsightFaceLoader", + "class_type": "IPAdapterInsightFaceLoader", "_meta": { - "title": "Load InsightFace" + "title": "IPAdapter InsightFace Loader" } }, "40": { "inputs": { - "crop_position": "center", + "interpolation": "LANCZOS", + "crop_position": "top", "sharpening": 0, - "pad_around": true, "image": [ "24", 0 ] }, - "class_type": "PrepImageForInsightFace", + "class_type": "PrepImageForClipVision", "_meta": { - "title": "Prepare Image For InsightFace" + "title": "Prep Image For ClipVision" } }, "41": { diff --git a/utils/ml_processor/comfy_workflows/ipadapter_face_plus_api.json b/utils/ml_processor/comfy_workflows/ipadapter_face_plus_api.json index 906e0e9c..25973098 100644 --- a/utils/ml_processor/comfy_workflows/ipadapter_face_plus_api.json +++ b/utils/ml_processor/comfy_workflows/ipadapter_face_plus_api.json @@ -130,12 +130,11 @@ }, "27": { "inputs": { - "weight": 0.65, - "noise": 0.3, - "weight_type": "original", + "weight": 1, + "weight_type": "linear", + "combine_embeds": "concat", "start_at": 0, - "end_at": 0.396, - "unfold_batch": false, + "end_at": 1, "ipadapter": [ "26", 0 @@ -153,9 +152,9 @@ 0 ] }, - "class_type": "IPAdapterApply", + "class_type": "IPAdapterAdvanced", "_meta": { - "title": "Apply IPAdapter" + "title": "IPAdapter Advanced" } }, "28": { @@ -185,12 +184,11 @@ "inputs": { "weight": 0.75, "noise": 0.3, + "weight_faceidv2": 0.75, "weight_type": "linear", + "combine_embeds": "concat", "start_at": 0, "end_at": 1, - "faceid_v2": true, - "weight_v2": 0.75, - "unfold_batch": false, "ipadapter": [ "21", 0 @@ -212,24 +210,24 @@ 0 ] }, - "class_type": "IPAdapterApplyFaceID", + "class_type": "IPAdapterFaceID", "_meta": { - "title": "Apply IPAdapter FaceID" + "title": "IPAdapter FaceID" } }, "37": { "inputs": { "provider": "CUDA" }, - "class_type": "InsightFaceLoader", + "class_type": "IPAdapterInsightFaceLoader", "_meta": { - "title": "Load InsightFace" + "title": "IPAdapter InsightFace Loader" } }, "39": { "inputs": { "interpolation": "LANCZOS", - "crop_position": "pad", + "crop_position": "top", "sharpening": 0, "image": [ "28", @@ -243,17 +241,17 @@ }, "40": { "inputs": { - "crop_position": "center", + "interpolation": "LANCZOS", + "crop_position": "top", "sharpening": 0, - "pad_around": true, "image": [ "24", 0 ] }, - "class_type": "PrepImageForInsightFace", + "class_type": "PrepImageForClipVision", "_meta": { - "title": "Prepare Image For InsightFace" + "title": "Prep Image For ClipVision" } }, "41": { diff --git a/utils/ml_processor/comfy_workflows/ipadapter_plus_api.json b/utils/ml_processor/comfy_workflows/ipadapter_plus_api.json index 28f35179..d3e39806 100644 --- a/utils/ml_processor/comfy_workflows/ipadapter_plus_api.json +++ b/utils/ml_processor/comfy_workflows/ipadapter_plus_api.json @@ -111,12 +111,11 @@ }, "27": { "inputs": { - "weight": 0.65, - "noise": 0.3, - "weight_type": "original", + "weight": 1, + "weight_type": "linear", + "combine_embeds": "concat", "start_at": 0, - "end_at": 0.396, - "unfold_batch": false, + "end_at": 1, "ipadapter": [ "26", 0 @@ -134,9 +133,9 @@ 0 ] }, - "class_type": "IPAdapterApply", + "class_type": "IPAdapterAdvanced", "_meta": { - "title": "Apply IPAdapter" + "title": "IPAdapter Advanced" } }, "28": { @@ -165,7 +164,7 @@ "39": { "inputs": { "interpolation": "LANCZOS", - "crop_position": "pad", + "crop_position": "top", "sharpening": 0, "image": [ "28", diff --git a/utils/ml_processor/comfy_workflows/video_upscaler_api.json b/utils/ml_processor/comfy_workflows/video_upscaler_api.json index a9eb43e5..32101d28 100644 --- a/utils/ml_processor/comfy_workflows/video_upscaler_api.json +++ b/utils/ml_processor/comfy_workflows/video_upscaler_api.json @@ -185,11 +185,10 @@ "301": { "inputs": { "weight": 1, - "noise": 0.3, - "weight_type": "original", + "weight_type": "linear", + "combine_embeds": "concat", "start_at": 0, "end_at": 1, - "unfold_batch": true, "ipadapter": [ "250", 0 @@ -207,9 +206,9 @@ 0 ] }, - "class_type": "IPAdapterApply", + "class_type": "IPAdapterAdvanced", "_meta": { - "title": "Apply IPAdapter" + "title": "IPAdapter Advanced" } }, "302": { diff --git a/windows_setup.bat b/windows_setup.bat deleted file mode 100644 index edc27258..00000000 --- a/windows_setup.bat +++ /dev/null @@ -1,26 +0,0 @@ -@echo off -set "folderName=Dough" -for %%I in ("%~dp0.") do set ParentFolderName=%%~nxI -if not exist "%folderName%\" ( - if not "%folderName%"=="%ParentFolderName%" ( - git clone --depth 1 -b main https://github.com/banodoco/Dough.git - cd Dough - git clone --depth 1 -b feature/package https://github.com/piyushK52/comfy_runner.git - git clone https://github.com/comfyanonymous/ComfyUI.git - python -m venv dough-env - call dough-env\Scripts\activate.bat - python.exe -m pip install --upgrade pip - pip install -r requirements.txt - pip install websocket - pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 - pip install -r comfy_runner\requirements.txt - pip install -r ComfyUI\requirements.txt - powershell -Command "(New-Object Net.WebClient).DownloadFile('https://github.com/Gourieff/Assets/raw/main/Insightface/insightface-0.7.3-cp310-cp310-win_amd64.whl', 'insightface-0.7.3-cp310-cp310-win_amd64.whl')" - pip install insightface-0.7.3-cp310-cp310-win_amd64.whl - del insightface-0.7.3-cp310-cp310-win_amd64.whl - call dough-env\Scripts\deactivate.bat - copy .env.sample .env - cd .. - pause - ) -) \ No newline at end of file