Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor diffusers tasks #1947

Merged
merged 23 commits into from
Jul 16, 2024
Merged
Show file tree
Hide file tree
Changes from 19 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions optimum/exporters/onnx/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
"utils": [
"get_decoder_models_for_export",
"get_encoder_decoder_models_for_export",
"get_stable_diffusion_models_for_export",
"get_diffusion_models_for_export",
"MODEL_TYPES_REQUIRING_POSITION_IDS",
],
"__main__": ["main_export"],
Expand All @@ -50,7 +50,7 @@
from .utils import (
get_decoder_models_for_export,
get_encoder_decoder_models_for_export,
get_stable_diffusion_models_for_export,
get_diffusion_models_for_export,
MODEL_TYPES_REQUIRING_POSITION_IDS,
)
from .__main__ import main_export
Expand Down
23 changes: 16 additions & 7 deletions optimum/exporters/onnx/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,13 +221,24 @@ def main_export(
" and passing it is not required anymore."
)
IlyasMoutawwakil marked this conversation as resolved.
Show resolved Hide resolved

if task in ["stable-diffusion", "stable-diffusion-xl"]:
logger.warning(
f"The task `{task}` is deprecated and will be removed in a future release of Optimum. "
"Please use one of the following tasks instead: `text-to-image`, `image-to-image`, `inpainting`."
)

Comment on lines +224 to +229
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We warn and then it gets replaced with text-to-image by default. This is the same as the previous behavior for stable-diffusion but not stable-diffusion-xl as the model loader in that case used to be an image-to-image pipeline.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

_optional_components was introduced in diffusers v0.22.0 https://github.com/huggingface/diffusers/blob/v0.22.0/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py#L144 for StableDiffusionXLPipeline (not the case for StableDiffusionXLImg2ImgPipeline). I think we might need to upgrade DIFFUSERS_MINIMUM_VERSION as well to avoid any potential issue when loading a SDXL model with missing optional components

original_task = task
task = TasksManager.map_from_synonym(task)

framework = TasksManager.determine_framework(model_name_or_path, subfolder=subfolder, framework=framework)
library_name = TasksManager.infer_library_from_model(
model_name_or_path, subfolder=subfolder, library_name=library_name
)
if framework is None:
framework = TasksManager.determine_framework(
model_name_or_path, subfolder=subfolder, revision=revision, cache_dir=cache_dir, token=token
)

if library_name is None:
library_name = TasksManager.infer_library_from_model(
model_name_or_path, subfolder=subfolder, revision=revision, cache_dir=cache_dir, token=token
)

torch_dtype = None
if framework == "pt":
Expand Down Expand Up @@ -321,9 +332,7 @@ def main_export(
)
model.config.pad_token_id = pad_token_id

if "stable-diffusion" in task:
model_type = "stable-diffusion"
elif hasattr(model.config, "export_model_type"):
if hasattr(model.config, "export_model_type"):
model_type = model.config.export_model_type.replace("_", "-")
else:
model_type = model.config.model_type.replace("_", "-")
Expand Down
17 changes: 9 additions & 8 deletions optimum/exporters/onnx/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@
from transformers.modeling_utils import PreTrainedModel

if is_diffusers_available():
from diffusers import ModelMixin
from diffusers import DiffusionPipeline, ModelMixin

if is_tf_available():
from transformers.modeling_tf_utils import TFPreTrainedModel
Expand Down Expand Up @@ -264,7 +264,7 @@ def _run_validation(
atol = config.ATOL_FOR_VALIDATION

if "diffusers" in str(reference_model.__class__) and not is_diffusers_available():
raise ImportError("The pip package `diffusers` is required to validate stable diffusion ONNX models.")
raise ImportError("The pip package `diffusers` is required to validate diffusion ONNX models.")

framework = "pt" if is_torch_available() and isinstance(reference_model, nn.Module) else "tf"

Expand Down Expand Up @@ -388,7 +388,7 @@ def _run_validation(
logger.info(f"\t-[✓] ONNX model output names match reference model ({onnx_output_names})")

if "diffusers" in str(reference_model.__class__) and not is_diffusers_available():
raise ImportError("The pip package `diffusers` is required to validate stable diffusion ONNX models.")
raise ImportError("The pip package `diffusers` is required to validate diffusion ONNX models.")

# Check the shape and values match
shape_failures = []
Expand Down Expand Up @@ -854,7 +854,7 @@ def export(
opset = config.DEFAULT_ONNX_OPSET

if "diffusers" in str(model.__class__) and not is_diffusers_available():
raise ImportError("The pip package `diffusers` is required to export stable diffusion models to ONNX.")
raise ImportError("The pip package `diffusers` is required to export diffusion models to ONNX.")

if not config.is_transformers_support_available:
import transformers
Expand Down Expand Up @@ -912,7 +912,7 @@ def export(


def onnx_export_from_model(
model: Union["PreTrainedModel", "TFPreTrainedModel"],
model: Union["PreTrainedModel", "TFPreTrainedModel", "DiffusionPipeline"],
output: Union[str, Path],
opset: Optional[int] = None,
optimize: Optional[str] = None,
Expand Down Expand Up @@ -999,15 +999,16 @@ def onnx_export_from_model(
>>> onnx_export_from_model(model, output="gpt2_onnx/")
```
"""
library_name = TasksManager._infer_library_from_model(model)

TasksManager.standardize_model_attributes(model, library_name)
TasksManager.standardize_model_attributes(model)

if hasattr(model.config, "export_model_type"):
model_type = model.config.export_model_type.replace("_", "-")
else:
model_type = model.config.model_type.replace("_", "-")

library_name = TasksManager.infer_library_from_model(model)

custom_architecture = library_name == "transformers" and model_type not in TasksManager._SUPPORTED_MODEL_TYPE

if task is not None:
Expand Down Expand Up @@ -1191,7 +1192,7 @@ def onnx_export_from_model(
optimizer.optimize(save_dir=output, optimization_config=optimization_config, file_suffix="")

# Optionally post process the obtained ONNX file(s), for example to merge the decoder / decoder with past if any
# TODO: treating stable diffusion separately is quite ugly
# TODO: treating diffusion separately is quite ugly
if not no_post_process and library_name != "diffusers":
try:
logger.info("Post-processing the exported models...")
Expand Down
16 changes: 8 additions & 8 deletions optimum/exporters/onnx/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,9 @@
from ..utils import (
get_decoder_models_for_export as _get_decoder_models_for_export,
)
from ..utils import (
get_diffusion_models_for_export as _get_diffusion_models_for_export,
)
from ..utils import (
get_encoder_decoder_models_for_export as _get_encoder_decoder_models_for_export,
)
Expand All @@ -43,9 +46,6 @@
from ..utils import (
get_speecht5_models_for_export as _get_speecht5_models_for_export,
)
from ..utils import (
get_stable_diffusion_models_for_export as _get_stable_diffusion_models_for_export,
)


logger = logging.get_logger()
Expand All @@ -68,7 +68,7 @@
from transformers.modeling_tf_utils import TFPreTrainedModel

if is_diffusers_available():
from diffusers import ModelMixin, StableDiffusionPipeline
from diffusers import DiffusionPipeline, ModelMixin


MODEL_TYPES_REQUIRING_POSITION_IDS = {
Expand Down Expand Up @@ -219,13 +219,13 @@ def _get_submodels_and_onnx_configs(
DEPRECATION_WARNING_GET_MODEL_FOR_EXPORT = "The usage of `optimum.exporters.onnx.utils.get_{model_type}_models_for_export` is deprecated and will be removed in a future release, please use `optimum.exporters.utils.get_{model_type}_models_for_export` instead."


def get_stable_diffusion_models_for_export(
pipeline: "StableDiffusionPipeline",
def get_diffusion_models_for_export(
pipeline: "DiffusionPipeline",
int_dtype: str = "int64",
float_dtype: str = "fp32",
) -> Dict[str, Tuple[Union["PreTrainedModel", "ModelMixin"], "ExportConfig"]]:
logger.warning(DEPRECATION_WARNING_GET_MODEL_FOR_EXPORT.format(model_type="stable_diffusion"))
return _get_stable_diffusion_models_for_export(pipeline, int_dtype, float_dtype, exporter="onnx")
logger.warning(DEPRECATION_WARNING_GET_MODEL_FOR_EXPORT.format(model_type="diffusion"))
return _get_diffusion_models_for_export(pipeline, int_dtype, float_dtype, exporter="onnx")


def get_sam_models_for_export(model: Union["PreTrainedModel", "TFPreTrainedModel"], config: "ExportConfig"):
Expand Down
Loading
Loading