Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Research Efficiency Part and two efficiency tools to paddle community #294

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions CC/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# Cognitive Computing

4 changes: 4 additions & 0 deletions RE/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# Research Efficiency



21 changes: 21 additions & 0 deletions RE/paddleext/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
Changelog
===
以下记录了项目中所有值得关注的变更内容,其格式基于[Keep a Changelog]。

本项目版本遵守[Semantic Versioning]和[PEP-440]。

## [v1.0]- 2022-07-04
---
### Added
- Support the testing of some classification modules for paddlemetric
### Changed





[v1.0]: https://console.cloud.baidu-int.com/devops/icode/repos/baidu/ccl/torch2paddle/commits/7476c4f8477d6161f8d5aaaf78f47d6bee990d42

[Keep a Changelog]: https://keepachangelog.com/zh-CN/1.0.0/
[Semantic Versioning]: https://semver.org/lang/zh-CN/
[PEP-440]: https://www.python.org/dev/peps/pep-0440/
103 changes: 103 additions & 0 deletions RE/paddleext/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
# Paddle Extension

Paddle extensions, including implementation for torch apis.

## Install

* Clone the repo
* Add the path of paddleext folder to PYTHONPATH

## Document

### Seameless shift backend between Paddle and PyTorch

* Add following code to the root __init__.py of your project
(assume your project name is PROJECT):

```python

import importlib
import sys
import os

BACKEND = os.environ.get('BACKEND', 'paddle')

if BACKEND == "paddle":

from paddleext import torchapi
sys.modules["PROJECT.backend"] = torchapi

try:
import paddlemetrics
sys.modules["PROJECT.metrics"] = paddlemetrics
except Exception as e:
pass

elif BACKEND == "torch":
try:
import torch
import types

class VirtualModule(types.ModuleType):
def __init__(self, module_name, sub_modules):

super().__init__(module_name)
try:
import sys
sys.modules[module_name] = self
self._module_name = module_name
self._sub_modules = sub_modules
for sub_name, module in sub_modules.items():
if sub_name is None:
sys.modules[f"{module_name}"] = module
else:
sys.modules[f"{module_name}.{sub_name}"] = module
except ImportError as err:
raise err # please signal error in some useful way :-)

def __repr__(self):
return "Virtual module for " + self._module_name

def __getattr__(self, attrname):

if attrname in self._sub_modules.keys():
import sys
return self._sub_modules[attrname]
else:
return super().__getattr__(attrname)


import pkgutil

sub_modules = {None: torch}
for module_info in pkgutil.iter_modules(torch.__path__):
if not module_info.name.startswith("_"):
try:
module = importlib.import_module("torch." + module_info.name)
sub_modules[module_info.name] = module
except:
pass

VirtualModule("PROJECT.backend", sub_modules)


except Exception as e:
raise e

try:
import torchmetrics

sys.modules["PROJECT.metrics"] = torchmetrics
except Exception as e:
pass

```
* set the environment variable BACKEND to "paddle" or "torch" to switch backend
* import the backend module in your code

```python
import PROJECT.backend as B
from PROJECT.backend import nn
import PROJECT.metrics as M
```
* replace all "torch." or "paddle." with "B." in your code
Empty file added RE/paddleext/__init__.py
Empty file.
3 changes: 3 additions & 0 deletions RE/paddleext/paddleext/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@


from . import torchapi
74 changes: 74 additions & 0 deletions RE/paddleext/paddleext/torchapi/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
import inspect

from .core import *
from .tensor_ import *
from .functional import *
from . import sampler
from . import data
from . import nn
from . import distributed
from . import cuda
from . import optim

#from . import paddle_func

this_module = sys.modules[__name__]


def get_module_attribute(module, *args, **kwargs):
# Perform custom logic here

obj = object.__getattribute__(module, *args, **kwargs)

print("input module:", module)
print("result object", obj)
if isinstance(obj, types.FunctionType):
if not obj.__module__.startswith("paddleext.torchapi."):
return partial(paddle_delegate_func, obj)
else:
return obj
elif isinstance(obj, types.ModuleType):
print("result module: " + obj.__name__)
return ModuleDelegate(obj)
elif inspect.isclass(obj):
print("result class: " + obj.__name__)
return obj
else:
return obj

class ModuleDelegate(object):
def __init__(self, module):
self.module = module

def __getattribute__(self, *args, **kwargs):

module = object.__getattribute__(self, "module")
result = object.__getattribute__(module, *args, **kwargs)
if isinstance(result, types.ModuleType):
return ModuleDelegate(result)
elif isinstance(result, types.FunctionType):
if not result.__module__.startswith("paddleext.torchapi."):
return partial(paddle_delegate_func, result)
else:
return result
elif inspect.isclass(result):
if result.__module__.startswith("paddle."):
return make_delegate_class(result)
else:
return result
else:
return result


# def __getattr__(self, *args, **kwargs):
# return get_module_attribute(self.module, *args, **kwargs),

# def __delattr__(self, *args, **kwargs):
# return object.__delattr__(self.module, *args, **kwargs)
#
# def __dir__(self):
# return dir(self.module)



sys.modules[__name__] = ModuleDelegate(sys.modules[__name__])
115 changes: 115 additions & 0 deletions RE/paddleext/paddleext/torchapi/core.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
"""
paddle core
"""
import sys
import types
from functools import partial
from types import MethodType
from typing import Any

import paddle
import random
import numpy as np

Module = paddle.nn.Layer
ModuleBase = paddle.nn.Layer
ModuleDict = paddle.nn.LayerDict
ModuleList = paddle.nn.LayerList
device=str

dtype=paddle.dtype

def load_state_dict(module: Module, state_dict, *args, **kwargs):
module.set_state_dict(state_dict, *args, **kwargs)


Module.load_state_dict = load_state_dict

from paddle import *

def deterministic(seed=0):
seed = 0
random.seed(seed)
paddle.seed(seed)
np.random.seed(seed)


import paddle

from paddle import bool, int32, int64, int8, float32, float64, float16

long = paddle.int64
int = paddle.int32
float = paddle.float32
double = paddle.float64


def platform():
"""

Returns:

"""

return "paddle"



from paddle import no_grad, autograd

class set_detect_anomaly(object):
r"""Context-manager that sets the anomaly detection for the autograd engine on or off.
``set_detect_anomaly`` will enable or disable the autograd anomaly detection
based on its argument :attr:`mode`.
It can be used as a context-manager or as a function.
See ``detect_anomaly`` above for details of the anomaly detection behaviour.
Args:
mode (bool): Flag whether to enable anomaly detection (``True``),
or disable (``False``).
"""

def __init__(self, mode: bool) -> None:
pass

def __enter__(self) -> None:
pass

def __exit__(self, *args: Any) -> None:
pass


setattr(autograd, "set_detect_anomaly", set_detect_anomaly)


def paddle_delegate_func(func, *args, **kwargs):
if "dim" in kwargs:
kwargs["axis"] = kwargs["dim"]
del kwargs["dim"]

if "device" in kwargs:
del kwargs["device"]

return func(*args, **kwargs)

def make_delegate_class(class_):

class DelegateClass(class_):
def __init__(self, *args, **kwargs):

if class_.__name__.endswith("Linear"):
if "bias" in kwargs:
kwargs["bias_attr"] = kwargs["bias"]
del kwargs["bias"]
if "weight" in kwargs:
kwargs["weight_attr"] = kwargs["weight"]
del kwargs["weight"]
if class_.__name__.endswith("LayerNorm"):
if "eps" in kwargs:
kwargs["epsilon"] = kwargs["eps"]
del kwargs["eps"]
super().__init__(*args, **kwargs)
# self.__class__ = class_

return DelegateClass


27 changes: 27 additions & 0 deletions RE/paddleext/paddleext/torchapi/cuda.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@

import paddle

_initialized=True
def is_available():

return paddle.device.cuda.device_count() > 0

def manual_seed_all(seed):
paddle.seed(seed)


def manual_seed(seed):
paddle.seed(seed)


def set_device(device):
return paddle.set_device(device)


def empty_cache():
return


def device_count():

return paddle.device.cuda.device_count()
5 changes: 5 additions & 0 deletions RE/paddleext/paddleext/torchapi/data.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
"""
data for paddle
"""

from paddle.io import DataLoader, Dataset
17 changes: 17 additions & 0 deletions RE/paddleext/paddleext/torchapi/distributed.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@

import paddle


def is_available():
return True

DISTRIBUTED = False

def is_initialized():
return DISTRIBUTED


def init_process_group(*args, **kwargs):

pass

Loading