Skip to content

Commit

Permalink
Merge pull request #138 from banodoco/staging
Browse files Browse the repository at this point in the history
Staging
  • Loading branch information
piyushK52 committed May 3, 2024
2 parents b300dec + aa55f10 commit 8743345
Show file tree
Hide file tree
Showing 40 changed files with 420 additions and 88 deletions.
160 changes: 152 additions & 8 deletions backend/db_repo.py
Original file line number Diff line number Diff line change
Expand Up @@ -917,6 +917,80 @@ def create_timing(self, **kwargs):

return InternalResponse(payload, 'timing created successfully', True)

def bulk_create_timing(self, timing_data_list):
primary_image_id_list = []
shot_id_list = []
model_id_list = []
source_image_id_list = []
mask_id_list = []
canny_image_id_list = []

for d in timing_data_list:
if 'primary_image_id' in d:
primary_image_id_list.append(d['primary_image_id'])
if 'shot_id' in d:
shot_id_list.append(d['shot_id'])
if 'model_id' in d:
model_id_list.append(d['model_id'])
if 'source_image_id' in d:
source_image_id_list.append(d['source_image_id'])
if 'mask_id' in d:
mask_id_list.append(d['mask_id'])
if 'canny_image_id' in d:
canny_image_id_list.append(d['canny_image_id'])
if 'primary_image_id' in d:
primary_image_id_list.append(d['primary_image_id'])

primary_image_id_list = list(set(primary_image_id_list))
shot_id_list = list(set(shot_id_list))
model_id_list = list(set(model_id_list))
source_image_id_list = list(set(source_image_id_list))
mask_id_list = list(set(mask_id_list))
canny_image_id_list = list(set(canny_image_id_list))

file_list = InternalFileObject.objects.filter(uuid__in=primary_image_id_list + source_image_id_list + mask_id_list + canny_image_id_list, is_disabled=False).all()
model_list = AIModel.objects.filter(uuid__in=model_id_list, is_disabled=False).all()
shot_list = Shot.objects.filter(uuid__in=shot_id_list, is_disabled=False).all()

file_uuid_id_map = {str(file.uuid): file.id for file in file_list}
model_uuid_id_map = {str(model.uuid): model.id for model in model_list}
shot_uuid_id_map = {str(shot.uuid): shot.id for shot in shot_list}

# print("----- file_uuid_map: ", file_uuid_id_map)
res_timing_list = []
for data in timing_data_list:
kwargs = data
if 'primary_image_id' in kwargs and kwargs['primary_image_id'] in file_uuid_id_map:
kwargs['primary_image_id'] = file_uuid_id_map[kwargs['primary_image_id']]

if 'source_image_id' in kwargs and kwargs['source_image_id'] in file_uuid_id_map:
kwargs['source_image_id'] = file_uuid_id_map[kwargs['source_image_id']]

if 'mask_id' in kwargs and kwargs['mask_id'] in file_uuid_id_map:
kwargs['mask_id'] = file_uuid_id_map[kwargs['mask_id']]

if 'canny_image_id' in kwargs and kwargs['canny_image_id'] in file_uuid_id_map:
kwargs['canny_image_id'] = file_uuid_id_map[kwargs['canny_image_id']]

if 'shot_id' in kwargs and kwargs['shot_id'] in shot_uuid_id_map:
kwargs['shot_id'] = shot_uuid_id_map[kwargs['shot_id']]

if 'model_id' in kwargs and kwargs['model_id'] in model_uuid_id_map:
kwargs['model_id'] = model_uuid_id_map[kwargs['model_id']]

# print("---- data: ", kwargs)
timing = Timing(**kwargs)
res_timing_list.append(timing)

with transaction.atomic():
for timing in res_timing_list:
timing.save()

payload = {
'data': [TimingDto(timing).data for timing in res_timing_list]
}
return InternalResponse(payload, 'timing list created successfully', True)

def remove_existing_timing(self, project_uuid):
if project_uuid:
project: Project = Project.objects.filter(uuid=project_uuid, is_disabled=False).first()
Expand Down Expand Up @@ -944,6 +1018,84 @@ def add_interpolated_clip(self, uuid, **kwargs):

return InternalResponse({}, 'success', True)

def update_bulk_timing(self, timing_uuid_list, data_list):
timing_list = Timing.objects.filter(uuid__in=timing_uuid_list, is_disabled=False).all()
if not (timing_list and len(timing_list)) and len(timing_uuid_list):
return InternalResponse({}, 'no timing objs found', False)

primary_image_id_list = []
shot_id_list = []
model_id_list = []
source_image_id_list = []
mask_id_list = []
canny_image_id_list = []

for d in data_list:
if 'primary_image_id' in d:
primary_image_id_list.append(d['primary_image_id'])
if 'shot_id' in d:
shot_id_list.append(d['shot_id'])
if 'model_id' in d:
model_id_list.append(d['model_id'])
if 'source_image_id' in d:
source_image_id_list.append(d['source_image_id'])
if 'mask_id' in d:
mask_id_list.append(d['mask_id'])
if 'canny_image_id' in d:
canny_image_id_list.append(d['canny_image_id'])
if 'primary_image_id' in d:
primary_image_id_list.append(d['primary_image_id'])

primary_image_id_list = list(set(primary_image_id_list))
shot_id_list = list(set(shot_id_list))
model_id_list = list(set(model_id_list))
source_image_id_list = list(set(source_image_id_list))
mask_id_list = list(set(mask_id_list))
canny_image_id_list = list(set(canny_image_id_list))

file_list = InternalFileObject.objects.filter(uuid__in=primary_image_id_list + source_image_id_list + mask_id_list + canny_image_id_list, is_disabled=False).all()
model_list = AIModel.objects.filter(uuid__in=model_id_list, is_disabled=False).all()
shot_list = Shot.objects.filter(uuid__in=shot_id_list, is_disabled=False).all()

file_uuid_id_map = {file.uuid: file.id for file in file_list}
model_uuid_id_map = {model.uuid: model.id for model in model_list}
shot_uuid_id_map = {shot.uuid: shot.id for shot in shot_list}

res_timing_list = []
for timing, update_data in zip(timing_list, data_list):
kwargs = update_data
if 'primary_image_id' in kwargs and kwargs['primary_image_id'] in file_uuid_id_map:
kwargs['primary_image_id'] = file_uuid_id_map[kwargs['primary_image_id']]

if 'source_image_id' in kwargs and kwargs['source_image_id'] in file_uuid_id_map:
kwargs['source_image_id'] = file_uuid_id_map[kwargs['source_image_id']]

if 'mask_id' in kwargs and kwargs['mask_id'] in file_uuid_id_map:
kwargs['mask_id'] = file_uuid_id_map[kwargs['mask_id']]

if 'canny_image_id' in kwargs and kwargs['canny_image_id'] in file_uuid_id_map:
kwargs['canny_image_id'] = file_uuid_id_map[kwargs['canny_image_id']]

if 'shot_id' in kwargs and kwargs['shot_id'] in shot_uuid_id_map:
kwargs['shot_id'] = shot_uuid_id_map[kwargs['shot_id']]

if 'model_id' in kwargs and kwargs['model_id'] in model_uuid_id_map:
kwargs['model_id'] = model_uuid_id_map[kwargs['model_id']]

for attr, value in kwargs.items():
setattr(timing, attr, value)

res_timing_list.append(timing)

with transaction.atomic():
for timing in res_timing_list:
timing.save()

payload = {
'data': [TimingDto(timing).data for timing in res_timing_list]
}
return InternalResponse(payload, 'timing list updated successfully', True)

def update_specific_timing(self, uuid, **kwargs):
timing = Timing.objects.filter(uuid=uuid, is_disabled=False).first()
if not timing:
Expand Down Expand Up @@ -999,15 +1151,7 @@ def update_specific_timing(self, uuid, **kwargs):
return InternalResponse({}, 'invalid canny image uuid', False)

kwargs['canny_image_id'] = canny_image.id


if 'primay_image_id' in kwargs:
if kwargs['primay_image_id'] != None:
primay_image: InternalFileObject = InternalFileObject.objects.filter(uuid=kwargs['primay_image_id'], is_disabled=False).first()
if not primay_image:
return InternalResponse({}, 'invalid primary image uuid', False)

kwargs['primay_image_id'] = primay_image.id

for attr, value in kwargs.items():
setattr(timing, attr, value)
Expand Down
77 changes: 62 additions & 15 deletions readme.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Welcome to Dough v. 0.8.3 (beta)
# Welcome to Dough v. 0.9.0 (beta)

**⬇️ Scroll down for Setup Instructions - Currently available on Linux & Windows, hosted version coming soon.**

Expand All @@ -10,28 +10,27 @@ Below is brief overview and some examples of outputs:

### With Dough, you can makes guidance frames using Stable Diffusion XL, IP-Adapter, Fooocus Inpainting, and more:

<img src="sample_assets/sample_images/generation_example.png" width="800">
<img src="https://github.com/banodoco/Dough/assets/34690994/698d63f5-765c-4cf2-94f4-7943d241a6ea" width="800">

### You can then assemble these frames into shots that you can granularly edit:

<img src="sample_assets/sample_images/shot_example.png" width="800">
<img src="https://github.com/banodoco/Dough/assets/34690994/1080ed90-b829-47cd-b946-de49a7a03b2a" width="800">

### And then animate these shots by defining parameters for each frame and selecting guidance videos via Motion LoRAs:

<img src="sample_assets/sample_images/motion.png" width="800">
<img src="https://github.com/banodoco/Dough/assets/34690994/95ec3ec3-5143-40e9-88ba-941ce7e2dec9" width="800">

### As an example, here's a video that's guided with just images on high strength:


<img src="sample_assets/sample_images/just_images.gif" width="800">
<img src="https://github.com/banodoco/Dough/assets/34690994/cc88ca21-870d-4b96-b9cc-39698fc5fd2f" width="800">

### While here's a more complex one, with low strength images driving it alongside a guidance video:

<img src="sample_assets/sample_images/cat_walking.gif" width="800">
<img src="https://github.com/banodoco/Dough/assets/34690994/5c2edc07-8aa3-402f-b119-345db26df8b9" width="800">

### And here's a more complex example combining high strength guidance with a guidance video strongly influencing the motion:

<img src="sample_assets/sample_images/complex.gif" width="800">
<img src="https://github.com/banodoco/Dough/assets/34690994/e5d70cc3-03e2-450d-8bc7-b6d1a920af4a" width="800">



Expand All @@ -53,28 +52,28 @@ Below is brief overview and some examples of outputs:
3) During setup, open the relevant ports for Dough like below:


<img src="sample_assets/sample_images/runpod_1.png" width="600">
<img src="https://github.com/banodoco/Dough/assets/34690994/102bc6fe-0962-493f-b11a-9dfa22501bdd" width="600">

<img src="sample_assets/sample_images/runpod_2.png" width="600">
<img src="https://github.com/banodoco/Dough/assets/34690994/1b9ff4d7-960e-496c-83ae-306c0dfa623d" width="600">


4) When you’ve launched the pod, click into Jupyter Notebook:

<img src="sample_assets/sample_images/runpod_3.png" width="600">
<img src="https://github.com/banodoco/Dough/assets/34690994/9a0b6b54-ae53-4571-8131-165c4bacc909" width="600">

<img src="sample_assets/sample_images/runpod_4.png" width="600">
<img src="https://github.com/banodoco/Dough/assets/34690994/86b31523-7457-43b2-ad68-99e62689c32f" width="600">


5) Follow the “Setup for Linux” below and come back here when you’ve gone through them.


6) Once you’re done that, grab the IP Address for your instance:

<img src="sample_assets/sample_images/runpod_5.png" width="600">
<img src="https://github.com/banodoco/Dough/assets/34690994/35aed283-fa47-494e-924e-0263b84be2b2" width="600">

<img src="sample_assets/sample_images/runpod_6.png" width="600">
<img src="https://github.com/banodoco/Dough/assets/34690994/2bdb9363-9138-49bd-a2b9-69961e744f7a" width="600">

<img src="sample_assets/sample_images/runpod_7.png" width="600">
<img src="https://github.com/banodoco/Dough/assets/34690994/a2a83ee6-149e-44aa-b00a-d36e42320bb4" width="600">

Then form put these into this form with a : between them like this:

Expand Down Expand Up @@ -155,3 +154,51 @@ cd Dough
---

If you're having any issues, please share them in our [Discord](https://discord.com/invite/8Wx9dFu5tP).

# Troubleshooting

<details>
<summary><b>Common problems (click to expand)</b></summary>

<details>
<summary><b>Issue during installation</b></summary>

- Make sure you are using python3.10
- If you are on Windows, make sure permissions of the Dough folder are not restricted (try to grant full access to everyone)
- Double-check that you are not inside any system-protected folders like system32
- Install the app in admin mode. Open the powershell in the admin mode and run "Set-ExecutionPolicy RemoteSigned". Then follow the installation instructions given in the readme
- If all of the above fail, try to run the following instructions one by one and report which one is throwing the error
```bash
call dough-env\Scripts\activate.bat
python.exe -m pip install --upgrade pip
pip install -r requirements.txt
pip install websocket
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118
pip install -r comfy_runner\requirements.txt
pip install -r ComfyUI\requirements.txt
```
</details>
<details>
<summary><b>Unable to locate credentials</b></summary>
Make a copy of ".env.sample" and rename it to ".env"
</details>
<details>
<summary><b>Issue during runtime</b></summary>

- If a particular node inside Comfy is throwing an error then delete that node and restart the app
- Make sure you are using python3.10 and the virtual environment is activated
- Try doing "git pull origin main" to get the latest code
</details>
<details>
<summary><b>Generations are in progress for a long time</b></summary>

- Check the terminal if any progress is being made (they can be very slow, especially in the case of upscaling)
- Cancel the generations directly from the sidebar if they are stuck
- If you don't see any logs in the terminal, make sure no other program is running at port 12345 on your machine as Dough uses that port
</details>
<details>
<summary><b>Some other error?</b></summary>
Drop in our [Discord](https://discord.com/invite/8Wx9dFu5tP).
</details>
</details>
Binary file removed sample_assets/example_generations/guy-1.png
Binary file not shown.
Binary file removed sample_assets/example_generations/guy-2.png
Binary file not shown.
Binary file removed sample_assets/example_generations/lady-1.png
Binary file not shown.
Binary file removed sample_assets/example_generations/world-1.png
Binary file not shown.
Binary file removed sample_assets/example_generations/world-2.png
Binary file not shown.
Binary file removed sample_assets/example_generations/world-3.png
Binary file not shown.
Binary file removed sample_assets/example_generations/world-4.png
Binary file not shown.
Binary file not shown.
Binary file removed sample_assets/sample_images/cat_walking.gif
Binary file not shown.
Binary file removed sample_assets/sample_images/complex.gif
Binary file not shown.
Binary file removed sample_assets/sample_images/generation_example.png
Binary file not shown.
Binary file removed sample_assets/sample_images/just_images.gif
Binary file not shown.
Binary file removed sample_assets/sample_images/main.png
Binary file not shown.
Binary file removed sample_assets/sample_images/main_example.gif
Binary file not shown.
Binary file removed sample_assets/sample_images/motion.png
Binary file not shown.
Binary file removed sample_assets/sample_images/runpod_1.png
Binary file not shown.
Binary file removed sample_assets/sample_images/runpod_2.png
Binary file not shown.
Binary file removed sample_assets/sample_images/runpod_3.png
Binary file not shown.
Binary file removed sample_assets/sample_images/runpod_4.png
Binary file not shown.
Binary file removed sample_assets/sample_images/runpod_5.png
Binary file not shown.
Binary file removed sample_assets/sample_images/runpod_6.png
Binary file not shown.
Binary file removed sample_assets/sample_images/runpod_7.png
Binary file not shown.
Binary file removed sample_assets/sample_images/shot_example.png
Binary file not shown.
Binary file removed sample_assets/sample_images/tweak_settings.gif
Binary file not shown.
Binary file removed sample_assets/sample_videos/sample.mp4
Binary file not shown.
2 changes: 1 addition & 1 deletion scripts/app_version.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0.8.3
0.9.0
31 changes: 30 additions & 1 deletion ui_components/components/animate_shot_page.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import json
import time
import streamlit as st
from shared.constants import InferenceParamType, InferenceStatus, InferenceType, InternalFileType
from shared.constants import AnimationStyleType, InferenceParamType, InferenceStatus, InferenceType, InternalFileType
from ui_components.components.video_rendering_page import sm_video_rendering_page, two_img_realistic_interpolation_page
from ui_components.models import InternalShotObject
from ui_components.widgets.frame_selector import frame_selector_widget
Expand Down Expand Up @@ -49,6 +49,7 @@ def video_rendering_page(shot_uuid, selected_variant):
log = data_repo.get_inference_log_from_uuid(selected_variant)
shot_data = json.loads(log.input_params)
file_uuid_list = shot_data.get('origin_data', json.dumps({})).get('settings', {}).get('file_uuid_list', [])
st.session_state[f"{shot_uuid}_selected_variant_log_uuid"] = None

else:
# hackish sol, will fix later
Expand All @@ -65,6 +66,34 @@ def video_rendering_page(shot_uuid, selected_variant):
for timing in shot.timing_list:
if timing.primary_image and timing.primary_image.location:
file_uuid_list.append(timing.primary_image.uuid)
else:
# updating the shot timing images
shot_timing_list = shot.timing_list
img_mismatch = False # flag to check if shot images need to be updated
if len(file_uuid_list) == len(shot_timing_list):
for file_uuid, timing in zip(file_uuid_list, shot_timing_list):
if timing.primary_image and timing.primary_image.uuid != file_uuid:
img_mismatch = True
break
else:
img_mismatch = True

if img_mismatch or len(file_uuid_list) != len(shot_timing_list):
# deleting all the current timings
data_repo.update_bulk_timing([timing.uuid for timing in shot_timing_list], [{'is_disabled': True}] * len(shot_timing_list))
# adding new timings
new_timing_data = []
for idx, file_uuid in enumerate(file_uuid_list):
new_timing_data.append(
{
'aux_frame_index': idx,
'shot_id': shot_uuid,
'primary_image_id': file_uuid,
'is_disabled': False
}
)

data_repo.bulk_create_timing(new_timing_data)

img_list = data_repo.get_all_file_list(uuid__in=file_uuid_list, file_type=InternalFileType.IMAGE.value)[0]

Expand Down
6 changes: 4 additions & 2 deletions ui_components/components/explorer_page.py
Original file line number Diff line number Diff line change
Expand Up @@ -491,7 +491,7 @@ def gallery_image_view(project_uuid, shortlist=False, view=["main"], shot=None,
else:
with h1:
project_setting = data_repo.get_project_setting(project_uuid)
page_number = k1.radio("Select page", options=range(1, project_setting.total_shortlist_gallery_pages), horizontal=True, key="shortlist_gallery")
page_number = k1.radio("Select page", options=range(1, project_setting.total_shortlist_gallery_pages + 1), horizontal=True, key="shortlist_gallery")
open_detailed_view_for_all = False

else:
Expand Down Expand Up @@ -620,7 +620,9 @@ def gallery_image_view(project_uuid, shortlist=False, view=["main"], shot=None,
log = gallery_image_list[i + j].inference_log # data_repo.get_inference_log_from_uuid(gallery_image_list[i + j].inference_log.uuid)
if log:
input_params = json.loads(log.input_params)
prompt = input_params.get('prompt', 'No prompt found')
prompt = input_params.get('prompt', None)
if not prompt:
prompt = input_params.get("query_dict", {}).get("prompt", "Prompt not found")
model = json.loads(log.output_details)['model_name'].split('/')[-1]
if 'view_inference_details' in view:
with st.expander("Prompt Details", expanded=open_detailed_view_for_all):
Expand Down
Loading

0 comments on commit 8743345

Please sign in to comment.