Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add mmflow to mmdeploy #1606

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ repos:
- id: flake8
args: ["--exclude=*/client/inference_pb2.py,*/client/inference_pb2_grpc.py"]
- repo: https://github.com/PyCQA/isort
rev: 5.10.1
rev: 5.11.5
hooks:
- id: isort
- repo: https://github.com/pre-commit/mirrors-yapf
Expand Down
3 changes: 3 additions & 0 deletions configs/mmflow/_base_/opticalflow_static.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
_base_ = ['../../_base_/onnx_config.py']

codebase_config = dict(type='mmflow', task='OpticalFlow')
5 changes: 5 additions & 0 deletions configs/mmflow/raft/opticalflow_onnxruntime_static.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
_base_ = [
Copy link
Collaborator

@RunningLeon RunningLeon Feb 8, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

structure of model config is configs/{codebase}/{deploy_config_filename}. No algo name in the directory path, pls .change it

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

structure of model config is configs/{codebase}/{deploy_config_filename}. No algo name in the directory path, pls .change it
I am sorry, what do you mean?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

rename file from configs/mmflow/raft/opticalflow_onnxruntime_static.py to configs/mmflow/opticalflow_onnxruntime_static.py

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

rename file from configs/mmflow/raft/opticalflow_onnxruntime_static.py to configs/mmflow/opticalflow_onnxruntime_static.py
ok,I have renamed these configs

'../_base_/opticalflow_static.py', '../../_base_/backends/onnxruntime.py'
]

onnx_config = dict(verbose=True)
15 changes: 15 additions & 0 deletions configs/mmflow/raft/opticalflow_tensorrt_static-440x1024.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
_base_ = [
'../_base_/opticalflow_static.py', '../../_base_/backends/tensorrt.py'
]

onnx_config = dict(input_shape=[440, 1024])
RunningLeon marked this conversation as resolved.
Show resolved Hide resolved
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 440, 1024],
opt_shape=[1, 3, 440, 1024],
max_shape=[1, 3, 440, 1024])))
])
2 changes: 2 additions & 0 deletions mmdeploy/codebase/mmflow/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# Copyright (c) OpenMMLab. All rights reserved.
from .deploy import * # noqa: F401,F403
5 changes: 5 additions & 0 deletions mmdeploy/codebase/mmflow/deploy/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# Copyright (c) OpenMMLab. All rights reserved.
from .flow import Flow
from .mmflow import MMFlow # noqa: F401,F403

__all__ = ['MMFlow', 'Flow']
276 changes: 276 additions & 0 deletions mmdeploy/codebase/mmflow/deploy/flow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,276 @@
# Copyright (c) OpenMMLab. All rights reserved.
import copy
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union

import mmcv
import numpy as np
import torch
from mmcv.parallel import collate, scatter
from torch.utils.data import Dataset

from mmdeploy.codebase.base import BaseTask
from mmdeploy.utils import Task
from .mmflow import MMFLOW_TASK


def process_model_config(model_cfg: mmcv.Config,
imgs: Union[Sequence[str], Sequence[np.ndarray]],
input_shape: Optional[Sequence[int]] = None):
"""Process the model config.

Args:
model_cfg (mmcv.Config): The model config.
imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted
data type are List[str], List[np.ndarray].
input_shape (list[int]): A list of two integer in (width, height)
format specifying input shape. Default: None.

Returns:
mmcv.Config: the model config after processing.
"""
if model_cfg.data.test.type == 'ConcatDataset':
cfg = copy.deepcopy(model_cfg.data.test.datasets[0])
else:
cfg = copy.deepcopy(model_cfg.data.test)

# there is no need to load annotation.
if dict(type='LoadAnnotations') in cfg.pipeline:
cfg.pipeline.remove(dict(type='LoadAnnotations'))
if dict(type='LoadAnnotations', sparse=True) in cfg.pipeline:
cfg.pipeline.remove(dict(type='LoadAnnotations', sparse=True))

if 'flow_gt' in cfg.pipeline[-1]['meta_keys']:
cfg.pipeline[-1]['meta_keys'].remove('flow_gt')
if 'flow_fw_gt' in cfg.pipeline[-1]['meta_keys']:
cfg.pipeline[-1]['meta_keys'].remove('flow_fw_gt')
if 'flow_bw_gt' in cfg.pipeline[-1]['meta_keys']:
cfg.pipeline[-1]['meta_keys'].remove('flow_bw_gt')

return cfg


@MMFLOW_TASK.register_module(Task.OPTICAL_FLOW.value)
class Flow(BaseTask):
"""BaseTask class of super resolution task.

Args:
model_cfg (mmcv.Config): Model config file.
deploy_cfg (mmcv.Config): Deployment config file.
device (str): A string specifying device type.
"""

def __init__(self, model_cfg: mmcv.Config, deploy_cfg: mmcv.Config,
device: str):
super().__init__(model_cfg, deploy_cfg, device)

def init_backend_model(self,
model_files: Sequence[str] = None,
**kwargs) -> torch.nn.Module:
"""Initialize backend model.

Args:
model_files (Sequence[str]): Input model files. Default is None.

Returns:
nn.Module: An initialized backend model.
"""
from .mmflow_model import build_mmflow_model
model = build_mmflow_model(
model_files,
self.model_cfg,
self.deploy_cfg,
device=self.device,
**kwargs)
return model

def init_pytorch_model(self,
model_checkpoint: Optional[str] = None,
**kwargs) -> torch.nn.Module:
"""Initialize torch model.

Args:
model_checkpoint (str): The checkpoint file of torch model,
defaults to `None`.

Returns:
nn.Module: An initialized torch model generated by other OpenMMLab
codebases.
"""
from mmflow.apis import init_model
model = init_model(self.model_cfg, model_checkpoint, self.device)
return model.eval()

def create_input(self,
imgs: Union[str, np.ndarray],
input_shape: Optional[Sequence[int]] = None,
pipeline_updater: Optional[Callable] = None,
**kwargs) -> Tuple[Dict, torch.Tensor]:
"""Create input for mmflow processor.

Args:
imgs (str | np.ndarray): Input image(s).
input_shape (Sequence[int] | None): Input shape of image in
(width, height) format, defaults to `None`.
pipeline_updater (function | None): A function to get a new
pipeline.

Returns:
tuple: (data, img), meta information for the input image and input.
"""
from mmflow.datasets.pipelines import Compose
valid = None
if isinstance(imgs, (list, tuple)):
if not isinstance(imgs[0], (np.ndarray, str)):
raise AssertionError('imgs must be strings or numpy arrays')
elif isinstance(imgs, (np.ndarray, str)):
imgs = [imgs]
else:
raise AssertionError('imgs must be strings or numpy arrays')

cfg = process_model_config(self.model_cfg, imgs, input_shape)
test_pipeline = Compose(cfg.pipeline)
valid_masks = []
data_list = []
if isinstance(imgs[0], np.ndarray) and isinstance(imgs[1], np.ndarray):
# directly add img and valid mask
data = dict(img1=imgs[0], img2=imgs[1])
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

does model inputs require two images for inference? What if imgs has only one image or np.ndarray?

Copy link
Author

@pedroHuang123 pedroHuang123 Feb 8, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

does model inputs require two images for inference? What if imgs has only one image or np.ndarray?

Yes, two images are required for inference

else:
# add information into dict
data = dict(
img_info=dict(filename1=imgs[0], filename2=imgs[1]),
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

torch2onnx script fails at here

python tools/torch2onnx.py \
configs/mmflow/raft/opticalflow_onnxruntime_static.py \
../mmflow/configs/raft/raft_8x2_100k_mixed_368x768.py \
https://download.openmmlab.com/mmflow/raft/raft_8x2_100k_mixed_368x768.pth  \
../mmflow/demo/frame_0001.png --work-dir ./workdir/raft --device cpu
Traceback (most recent call last):
  File "tools/torch2onnx.py", line 85, in <module>
    main()
  File "tools/torch2onnx.py", line 47, in main
    torch2onnx(
  File "/root/workspace/mmdeploy/mmdeploy/apis/core/pipeline_manager.py", line 356, in _wrap
    return self.call_function(func_name_, *args, **kwargs)
  File "/root/workspace/mmdeploy/mmdeploy/apis/core/pipeline_manager.py", line 326, in call_function
    return self.call_function_local(func_name, *args, **kwargs)
  File "/root/workspace/mmdeploy/mmdeploy/apis/core/pipeline_manager.py", line 275, in call_function_local
    return pipe_caller(*args, **kwargs)
  File "/root/workspace/mmdeploy/mmdeploy/apis/core/pipeline_manager.py", line 107, in __call__
    ret = func(*args, **kwargs)
  File "/root/workspace/mmdeploy/mmdeploy/apis/pytorch2onnx.py", line 65, in torch2onnx
    data, model_inputs = task_processor.create_input(img, input_shape)
  File "/root/workspace/mmdeploy/mmdeploy/codebase/mmflow/deploy/flow.py", line 140, in create_input
    img_info=dict(filename1=imgs[0], filename2=imgs[1]),
IndexError: list index out of range

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

torch2onnx script fails at here

python tools/torch2onnx.py \
configs/mmflow/raft/opticalflow_onnxruntime_static.py \
../mmflow/configs/raft/raft_8x2_100k_mixed_368x768.py \
https://download.openmmlab.com/mmflow/raft/raft_8x2_100k_mixed_368x768.pth  \
../mmflow/demo/frame_0001.png --work-dir ./workdir/raft --device cpu
Traceback (most recent call last):
  File "tools/torch2onnx.py", line 85, in <module>
    main()
  File "tools/torch2onnx.py", line 47, in main
    torch2onnx(
  File "/root/workspace/mmdeploy/mmdeploy/apis/core/pipeline_manager.py", line 356, in _wrap
    return self.call_function(func_name_, *args, **kwargs)
  File "/root/workspace/mmdeploy/mmdeploy/apis/core/pipeline_manager.py", line 326, in call_function
    return self.call_function_local(func_name, *args, **kwargs)
  File "/root/workspace/mmdeploy/mmdeploy/apis/core/pipeline_manager.py", line 275, in call_function_local
    return pipe_caller(*args, **kwargs)
  File "/root/workspace/mmdeploy/mmdeploy/apis/core/pipeline_manager.py", line 107, in __call__
    ret = func(*args, **kwargs)
  File "/root/workspace/mmdeploy/mmdeploy/apis/pytorch2onnx.py", line 65, in torch2onnx
    data, model_inputs = task_processor.create_input(img, input_shape)
  File "/root/workspace/mmdeploy/mmdeploy/codebase/mmflow/deploy/flow.py", line 140, in create_input
    img_info=dict(filename1=imgs[0], filename2=imgs[1]),
IndexError: list index out of range

I have modified torch2onnx.py, you can run :
python tools/torch2onnx.py configs/mmflow/opticalflow_onnxruntime_static.py ../mmflow/configs/raft/raft_8x2_100k_mixed_368x768.py https://download.openmmlab.com/mmflow/raft/raft_8x2_100k_mixed_368x768.pth D:\KdWork\OpenmmLab\mmflow\demo\frame_0001.png D:\KdWork\OpenmmLab\mmflow\demo\frame_0002.png --work-dir ./workdir/raft --device cuda

img1_prefix=None,
img2_prefix=None,
valid=valid)
data['img_fields'] = ['img1', 'img2']
data = test_pipeline(data_list)
data_list.append(data)
valid_masks.append(valid)
data = collate(data_list, samples_per_gpu=len(data_list))
# just get the actual data from DataContainer
data['img_metas'] = data['img_metas'].data[0]
data['imgs'] = data['imgs'].data[0]
if self.device != 'cpu':
data = scatter(data, [self.device])[0]
return data, data['imgs']

# TODO
def visualize(self,
model: torch.nn.Module,
image: Union[str, np.ndarray],
result: Union[list, np.ndarray],
output_file: str,
window_name: str = '',
show_result: bool = False,
**kwargs) -> np.ndarray:
"""Visualize result of a model.

Args:
model (nn.Module): Input model.
image (str | np.ndarray): Input image to draw predictions on.
result (list | np.ndarray): A list of result.
output_file (str): Output file to save drawn image.
window_name (str): The name of visualization window. Defaults to
an empty string.
show_result (bool): Whether to show result in windows, defaults
to `False`.
"""
pass

# TODO
@staticmethod
def run_inference(model: torch.nn.Module,
model_inputs: Dict[str, torch.Tensor]) -> list:
"""Run inference once for a super resolution model of mmflow.

Args:
model (nn.Module): Input model.
model_inputs (dict): A dict containing model inputs tensor and
meta info.

Returns:
list: The predictions of model inference.
"""
pass
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this function should be implemented to do inference for both pytorch models and backend models.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this function should be implemented to do inference for both pytorch models and backend models.

it is ok


@staticmethod
def get_partition_cfg(partition_type: str, **kwargs) -> Dict:
"""Get a certain partition config for mmedit.

Args:
partition_type (str): A string specifying partition type.

Returns:
dict: A dictionary of partition config.
"""
raise NotImplementedError

# TODO
@staticmethod
def get_tensor_from_input(input_data: Dict[str, Any]) -> torch.Tensor:
"""Get input tensor from input data.

Args:
input_data (dict): Input data containing meta info
and image tensor.
Returns:
torch.Tensor: An image in `Tensor`.
"""
pass

# TODO
@staticmethod
def evaluate_outputs(model_cfg,
outputs: list,
dataset: Dataset,
metrics: Optional[str] = None,
out: Optional[str] = None,
metric_options: Optional[dict] = None,
format_only: bool = False,
log_file: Optional[str] = None,
json_file: Optional[str] = None,
**kwargs) -> None:
"""Evaluation function implemented in mmflow.

Args:
model_cfg (mmcv.Config): The model config.
outputs (list): A list of result of model inference.
dataset (Dataset): Input dataset to run test.
metrics (str): Evaluation metrics, which depends on
the codebase and the dataset, e.g., "EPE", "Fl-all" in mmflow.
out (str): Output result file in pickle format, defaults to `None`.
metric_options (dict): Custom options for evaluation, will be
kwargs for dataset.evaluate() function. Defaults to `None`.
format_only (bool): Format the output results without perform
evaluation. It is useful when you want to format the result
to a specific format and submit it to the test server. Defaults
to `False`.
log_file (str | None): The file to write the evaluation results.
Defaults to `None` and the results will only print on stdout.
"""
RunningLeon marked this conversation as resolved.
Show resolved Hide resolved
pass

def get_preprocess(self) -> Dict:
"""Get the preprocess information for SDK.

Return:
dict: Composed of the preprocess information.
"""
raise NotImplementedError

def get_postprocess(self) -> Dict:
"""Get the postprocess information for SDK.

Return:
dict: Nonthing for super resolution.
"""
raise NotImplementedError

def get_model_name(self) -> str:
"""Get the model name.

Return:
str: the name of the model.
"""
assert 'type' in self.model_cfg.model, 'type not in model config'
name = self.model_cfg.model.type.lower()
return name
Loading