-
Notifications
You must be signed in to change notification settings - Fork 636
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add mmflow to mmdeploy #1606
base: master
Are you sure you want to change the base?
Add mmflow to mmdeploy #1606
Changes from 2 commits
776f762
a3e7eb7
8d8af68
d45819c
834a56c
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
_base_ = ['../../_base_/onnx_config.py'] | ||
|
||
codebase_config = dict(type='mmflow', task='OpticalFlow') |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
_base_ = [ | ||
'../_base_/opticalflow_static.py', '../../_base_/backends/onnxruntime.py' | ||
] | ||
|
||
onnx_config = dict(verbose=True) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,15 @@ | ||
_base_ = [ | ||
'../_base_/opticalflow_static.py', '../../_base_/backends/tensorrt.py' | ||
] | ||
|
||
onnx_config = dict(input_shape=[440, 1024]) | ||
RunningLeon marked this conversation as resolved.
Show resolved
Hide resolved
|
||
backend_config = dict( | ||
common_config=dict(max_workspace_size=1 << 30), | ||
model_inputs=[ | ||
dict( | ||
input_shapes=dict( | ||
input=dict( | ||
min_shape=[1, 3, 440, 1024], | ||
opt_shape=[1, 3, 440, 1024], | ||
max_shape=[1, 3, 440, 1024]))) | ||
]) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
# Copyright (c) OpenMMLab. All rights reserved. | ||
from .deploy import * # noqa: F401,F403 |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
# Copyright (c) OpenMMLab. All rights reserved. | ||
from .flow import Flow | ||
from .mmflow import MMFlow # noqa: F401,F403 | ||
|
||
__all__ = ['MMFlow', 'Flow'] |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,276 @@ | ||
# Copyright (c) OpenMMLab. All rights reserved. | ||
import copy | ||
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union | ||
|
||
import mmcv | ||
import numpy as np | ||
import torch | ||
from mmcv.parallel import collate, scatter | ||
from torch.utils.data import Dataset | ||
|
||
from mmdeploy.codebase.base import BaseTask | ||
from mmdeploy.utils import Task | ||
from .mmflow import MMFLOW_TASK | ||
|
||
|
||
def process_model_config(model_cfg: mmcv.Config, | ||
imgs: Union[Sequence[str], Sequence[np.ndarray]], | ||
input_shape: Optional[Sequence[int]] = None): | ||
"""Process the model config. | ||
|
||
Args: | ||
model_cfg (mmcv.Config): The model config. | ||
imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted | ||
data type are List[str], List[np.ndarray]. | ||
input_shape (list[int]): A list of two integer in (width, height) | ||
format specifying input shape. Default: None. | ||
|
||
Returns: | ||
mmcv.Config: the model config after processing. | ||
""" | ||
if model_cfg.data.test.type == 'ConcatDataset': | ||
cfg = copy.deepcopy(model_cfg.data.test.datasets[0]) | ||
else: | ||
cfg = copy.deepcopy(model_cfg.data.test) | ||
|
||
# there is no need to load annotation. | ||
if dict(type='LoadAnnotations') in cfg.pipeline: | ||
cfg.pipeline.remove(dict(type='LoadAnnotations')) | ||
if dict(type='LoadAnnotations', sparse=True) in cfg.pipeline: | ||
cfg.pipeline.remove(dict(type='LoadAnnotations', sparse=True)) | ||
|
||
if 'flow_gt' in cfg.pipeline[-1]['meta_keys']: | ||
cfg.pipeline[-1]['meta_keys'].remove('flow_gt') | ||
if 'flow_fw_gt' in cfg.pipeline[-1]['meta_keys']: | ||
cfg.pipeline[-1]['meta_keys'].remove('flow_fw_gt') | ||
if 'flow_bw_gt' in cfg.pipeline[-1]['meta_keys']: | ||
cfg.pipeline[-1]['meta_keys'].remove('flow_bw_gt') | ||
|
||
return cfg | ||
|
||
|
||
@MMFLOW_TASK.register_module(Task.OPTICAL_FLOW.value) | ||
class Flow(BaseTask): | ||
"""BaseTask class of super resolution task. | ||
|
||
Args: | ||
model_cfg (mmcv.Config): Model config file. | ||
deploy_cfg (mmcv.Config): Deployment config file. | ||
device (str): A string specifying device type. | ||
""" | ||
|
||
def __init__(self, model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, | ||
device: str): | ||
super().__init__(model_cfg, deploy_cfg, device) | ||
|
||
def init_backend_model(self, | ||
model_files: Sequence[str] = None, | ||
**kwargs) -> torch.nn.Module: | ||
"""Initialize backend model. | ||
|
||
Args: | ||
model_files (Sequence[str]): Input model files. Default is None. | ||
|
||
Returns: | ||
nn.Module: An initialized backend model. | ||
""" | ||
from .mmflow_model import build_mmflow_model | ||
model = build_mmflow_model( | ||
model_files, | ||
self.model_cfg, | ||
self.deploy_cfg, | ||
device=self.device, | ||
**kwargs) | ||
return model | ||
|
||
def init_pytorch_model(self, | ||
model_checkpoint: Optional[str] = None, | ||
**kwargs) -> torch.nn.Module: | ||
"""Initialize torch model. | ||
|
||
Args: | ||
model_checkpoint (str): The checkpoint file of torch model, | ||
defaults to `None`. | ||
|
||
Returns: | ||
nn.Module: An initialized torch model generated by other OpenMMLab | ||
codebases. | ||
""" | ||
from mmflow.apis import init_model | ||
model = init_model(self.model_cfg, model_checkpoint, self.device) | ||
return model.eval() | ||
|
||
def create_input(self, | ||
imgs: Union[str, np.ndarray], | ||
input_shape: Optional[Sequence[int]] = None, | ||
pipeline_updater: Optional[Callable] = None, | ||
**kwargs) -> Tuple[Dict, torch.Tensor]: | ||
"""Create input for mmflow processor. | ||
|
||
Args: | ||
imgs (str | np.ndarray): Input image(s). | ||
input_shape (Sequence[int] | None): Input shape of image in | ||
(width, height) format, defaults to `None`. | ||
pipeline_updater (function | None): A function to get a new | ||
pipeline. | ||
|
||
Returns: | ||
tuple: (data, img), meta information for the input image and input. | ||
""" | ||
from mmflow.datasets.pipelines import Compose | ||
valid = None | ||
if isinstance(imgs, (list, tuple)): | ||
if not isinstance(imgs[0], (np.ndarray, str)): | ||
raise AssertionError('imgs must be strings or numpy arrays') | ||
elif isinstance(imgs, (np.ndarray, str)): | ||
imgs = [imgs] | ||
else: | ||
raise AssertionError('imgs must be strings or numpy arrays') | ||
|
||
cfg = process_model_config(self.model_cfg, imgs, input_shape) | ||
test_pipeline = Compose(cfg.pipeline) | ||
valid_masks = [] | ||
data_list = [] | ||
if isinstance(imgs[0], np.ndarray) and isinstance(imgs[1], np.ndarray): | ||
# directly add img and valid mask | ||
data = dict(img1=imgs[0], img2=imgs[1]) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. does model inputs require two images for inference? What if There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Yes, two images are required for inference |
||
else: | ||
# add information into dict | ||
data = dict( | ||
img_info=dict(filename1=imgs[0], filename2=imgs[1]), | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. torch2onnx script fails at here
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
I have modified torch2onnx.py, you can run : |
||
img1_prefix=None, | ||
img2_prefix=None, | ||
valid=valid) | ||
data['img_fields'] = ['img1', 'img2'] | ||
data = test_pipeline(data_list) | ||
data_list.append(data) | ||
valid_masks.append(valid) | ||
data = collate(data_list, samples_per_gpu=len(data_list)) | ||
# just get the actual data from DataContainer | ||
data['img_metas'] = data['img_metas'].data[0] | ||
data['imgs'] = data['imgs'].data[0] | ||
if self.device != 'cpu': | ||
data = scatter(data, [self.device])[0] | ||
return data, data['imgs'] | ||
|
||
# TODO | ||
def visualize(self, | ||
model: torch.nn.Module, | ||
image: Union[str, np.ndarray], | ||
result: Union[list, np.ndarray], | ||
output_file: str, | ||
window_name: str = '', | ||
show_result: bool = False, | ||
**kwargs) -> np.ndarray: | ||
"""Visualize result of a model. | ||
|
||
Args: | ||
model (nn.Module): Input model. | ||
image (str | np.ndarray): Input image to draw predictions on. | ||
result (list | np.ndarray): A list of result. | ||
output_file (str): Output file to save drawn image. | ||
window_name (str): The name of visualization window. Defaults to | ||
an empty string. | ||
show_result (bool): Whether to show result in windows, defaults | ||
to `False`. | ||
""" | ||
pass | ||
|
||
# TODO | ||
@staticmethod | ||
def run_inference(model: torch.nn.Module, | ||
model_inputs: Dict[str, torch.Tensor]) -> list: | ||
"""Run inference once for a super resolution model of mmflow. | ||
|
||
Args: | ||
model (nn.Module): Input model. | ||
model_inputs (dict): A dict containing model inputs tensor and | ||
meta info. | ||
|
||
Returns: | ||
list: The predictions of model inference. | ||
""" | ||
pass | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this function should be implemented to do inference for both pytorch models and backend models. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
it is ok |
||
|
||
@staticmethod | ||
def get_partition_cfg(partition_type: str, **kwargs) -> Dict: | ||
"""Get a certain partition config for mmedit. | ||
|
||
Args: | ||
partition_type (str): A string specifying partition type. | ||
|
||
Returns: | ||
dict: A dictionary of partition config. | ||
""" | ||
raise NotImplementedError | ||
|
||
# TODO | ||
@staticmethod | ||
def get_tensor_from_input(input_data: Dict[str, Any]) -> torch.Tensor: | ||
"""Get input tensor from input data. | ||
|
||
Args: | ||
input_data (dict): Input data containing meta info | ||
and image tensor. | ||
Returns: | ||
torch.Tensor: An image in `Tensor`. | ||
""" | ||
pass | ||
|
||
# TODO | ||
@staticmethod | ||
def evaluate_outputs(model_cfg, | ||
outputs: list, | ||
dataset: Dataset, | ||
metrics: Optional[str] = None, | ||
out: Optional[str] = None, | ||
metric_options: Optional[dict] = None, | ||
format_only: bool = False, | ||
log_file: Optional[str] = None, | ||
json_file: Optional[str] = None, | ||
**kwargs) -> None: | ||
"""Evaluation function implemented in mmflow. | ||
|
||
Args: | ||
model_cfg (mmcv.Config): The model config. | ||
outputs (list): A list of result of model inference. | ||
dataset (Dataset): Input dataset to run test. | ||
metrics (str): Evaluation metrics, which depends on | ||
the codebase and the dataset, e.g., "EPE", "Fl-all" in mmflow. | ||
out (str): Output result file in pickle format, defaults to `None`. | ||
metric_options (dict): Custom options for evaluation, will be | ||
kwargs for dataset.evaluate() function. Defaults to `None`. | ||
format_only (bool): Format the output results without perform | ||
evaluation. It is useful when you want to format the result | ||
to a specific format and submit it to the test server. Defaults | ||
to `False`. | ||
log_file (str | None): The file to write the evaluation results. | ||
Defaults to `None` and the results will only print on stdout. | ||
""" | ||
RunningLeon marked this conversation as resolved.
Show resolved
Hide resolved
|
||
pass | ||
|
||
def get_preprocess(self) -> Dict: | ||
"""Get the preprocess information for SDK. | ||
|
||
Return: | ||
dict: Composed of the preprocess information. | ||
""" | ||
raise NotImplementedError | ||
|
||
def get_postprocess(self) -> Dict: | ||
"""Get the postprocess information for SDK. | ||
|
||
Return: | ||
dict: Nonthing for super resolution. | ||
""" | ||
raise NotImplementedError | ||
|
||
def get_model_name(self) -> str: | ||
"""Get the model name. | ||
|
||
Return: | ||
str: the name of the model. | ||
""" | ||
assert 'type' in self.model_cfg.model, 'type not in model config' | ||
name = self.model_cfg.model.type.lower() | ||
return name |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
structure of model config is
configs/{codebase}/{deploy_config_filename}
. No algo name in the directory path, pls .change itThere was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
rename file from
configs/mmflow/raft/opticalflow_onnxruntime_static.py
toconfigs/mmflow/opticalflow_onnxruntime_static.py
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.