Skip to content

Commit

Permalink
Merge branch 'main' into sklearn-decisiontree
Browse files Browse the repository at this point in the history
  • Loading branch information
HaiderSultanArc authored Sep 4, 2023
2 parents a8a4ef7 + 9e0d736 commit 30625dd
Show file tree
Hide file tree
Showing 387 changed files with 62,929 additions and 56,318 deletions.
2 changes: 1 addition & 1 deletion .devcontainer/build_multiversion/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
"dockerfile": "../../docker/DockerfileMultiversion",
"context": "../..",
"args": {
"fw": ["numpy/1.24.2 tensorflow/2.11.0 tensorflow/2.12.0 jax/0.4.10 jax/0.4.8"]
"fw": ["numpy/1.24.2 tensorflow/2.11.0 tensorflow/2.12.0 jax/0.4.10 jax/0.4.8"]

}
},
Expand Down
22 changes: 22 additions & 0 deletions .github/workflows/synchronize-db.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
name: Synchronize DB
on:
workflow_dispatch:
permissions:
actions: read
jobs:
synchronize-db:
runs-on: ubuntu-latest
steps:
- name: Checkout Ivy 🛎
uses: actions/checkout@v3
with:
path: ivy
persist-credentials: false
submodules: "recursive"
fetch-depth: 1

- name: Synchronize DB
run: |
pip install pymongo
cd ivy
python run_tests_CLI/synchronize_db.py ${{ secrets.MONGODB_PASSWORD }}
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,6 @@ repos:
# Exclude everything in frontends except __init__.py, and func_wrapper.py
exclude: 'ivy/functional/(frontends|backends)/(?!.*/func_wrapper\.py$).*(?!__init__\.py$)'
- repo: https://github.com/unifyai/lint-hook
rev: b9a103a9f7991fec0ed636a2bcd4497691761e78
rev: 2ea80bc854c7f74b09620151028579083ff92ec2
hooks:
- id: ivy-lint
11 changes: 7 additions & 4 deletions docker/DockerfileMultiversion
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
FROM debian:buster
WORKDIR /ivy




ARG fw
ARG pycon=3.8.10
# Install miniconda
Expand All @@ -29,6 +26,7 @@ RUN apt-get update && \
apt-get install -y rsync && \
apt-get install -y libusb-1.0-0 && \
apt-get install -y libglib2.0-0 && \
pip3 install pip-autoremove && \
pip3 install --upgrade pip && \
pip3 install setuptools==58.5.3

Expand All @@ -42,10 +40,15 @@ RUN git clone --progress --recurse-submodules https://github.com/unifyai/ivy --d

COPY /docker/multiversion_framework_directory.py .
COPY /docker/requirement_mappings_multiversion.json .
COPY /docker/multiversion_testing_requirements.txt .

# requirement mappings directs which dependency to be installed and where
SHELL ["/bin/bash", "-c"]
RUN python3 multiversion_framework_directory.py $fw
RUN python3 multiversion_framework_directory.py $fw && \
pip install -r multiversion_testing_requirements.txt && \
pip-autoremove torch -y && \
pip-autoremove tensorflow -y && \
pip-autoremove jax -y


ENV PATH=/opt/miniconda/envs/multienv/bin:$PATH
Expand Down
10 changes: 5 additions & 5 deletions docker/multiversion_framework_directory.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,7 @@ def install_deps(pkgs, path_to_json, base="/opt/fw/"):
# check to see if this pkg has specific version dependencies
with open(path_to_json, "r") as file:
json_data = json.load(file)
print(json_data.keys())
for keys in json_data[fw]:
print(keys, "here")
# check if key is dict
if isinstance(keys, dict):
# this is a dep with just one key
Expand All @@ -70,7 +68,8 @@ def install_deps(pkgs, path_to_json, base="/opt/fw/"):
)
else:
subprocess.run(
f"pip3 install {keys} --target"
"pip3 install "
f" {keys} {f'-f https://data.pyg.org/whl/torch-{ver}%2Bcpu.html' if keys=='torch-scatter' else ''} --target"
f" {path} --default-timeout=100 --no-cache-dir",
shell=True,
)
Expand All @@ -79,8 +78,9 @@ def install_deps(pkgs, path_to_json, base="/opt/fw/"):
if __name__ == "__main__":
arg_lis = sys.argv

json_path = ( # path to the json file storing version specific deps
"requirement_mappings_multiversion.json"
json_path = os.path.join( # path to the json file storing version specific deps
os.path.dirname(os.path.realpath(sys.argv[0])),
"requirement_mappings_multiversion.json",
)

directory_generator(arg_lis[1:])
Expand Down
17 changes: 13 additions & 4 deletions docker/multiversion_testing_requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,7 @@ pymongo==4.3.3
redis==4.3.4
matplotlib==3.5.2
opencv-python==4.6.0.66 # mod_name=cv2
tensorflow-probability==0.17.0 # mod_name=tensorflow_probability
functorch==0.1.1
scipy==1.8.1
dm-haiku==0.0.6 # mod_name=haiku
pydriller
tqdm
coverage
Expand All @@ -20,4 +17,16 @@ colorama
packaging
nvidia-ml-py<=11.495.46 # mod_name=pynvml
paddle-bfloat
jsonpickle
jsonpickle
ml_dtypes
diskcache
google-auth # mod_name=google.auth
requests
pyvis
dill
scikit-learn # mod_name=sklearn
pandas
pyspark
autoflake # for backend generation
snakeviz # for profiling
cryptography
2 changes: 1 addition & 1 deletion docker/requirement_mappings_multiversion.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@

{
"tensorflow": [
"tensorflow-probability"
{"tensorflow-probability":{"2.12.0":"0.20.0","2.11.0":"0.19.0"}}
],
"jax": ["dm-haiku", "flax",{"jaxlib": {"0.4.10": "0.4.10","0.4.8": "0.4.7"}}],
"numpy": ["numpy"],
Expand Down
4 changes: 2 additions & 2 deletions docs/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
:caption: Users

overview/background.rst
overview/design.rst
overview/related_work.rst
overview/extensions.rst

Expand All @@ -30,8 +29,9 @@
:maxdepth: -1
:caption: Contributors

overview/deep_dive.rst
overview/design.rst
overview/contributing.rst
overview/deep_dive.rst


.. toctree::
Expand Down
9 changes: 5 additions & 4 deletions docs/overview/contributing/the_basics.rst
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ c. Comment on the ToDo list issue with a reference to your new issue like so:
At some point after your comment is made, your issue will automatically be added to the ToDo list and the comment will be deleted.
No need to wait for this to happen before progressing to the next stage. Don’t comment anything else on these ToDo issues, which should be kept clean with comments only as described above.

d. Start working on the task, and create a PR as soon as you have a full or partial solution, and then directly reference the issue in the pull request by adding the following content to the description of the PR:
d. Start working on the task, and open a PR as soon as you have a full or partial solution, when you open the PR make sure to follow the `conventional commits format <https://www.conventionalcommits.org/en/v1.0.0/>`_, and then directly reference the issue in the pull request by adding the following content to the description of the PR:

:code:`Close #Issue_number`

Expand Down Expand Up @@ -532,7 +532,7 @@ with PyCharm
1. Click the gutter at the executable line of code where you want to set the breakpoint or place the caret at the line and press :code:`Ctrl+F8`

.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/contributing/the_basics/getting_most_out_of_IDE/adding_breakpoint.png?raw=true
:aligh: center
:align: center

2. Enter into the debug mode:
1. Click on Run icon and Select **Debug test** or press :code:`Shift+F9`.
Expand Down Expand Up @@ -577,10 +577,11 @@ with PyCharm
1. Select the breakpoint-fragment of code, press :code:`Alt+shift+E` Start debugging!

.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/contributing/the_basics/getting_most_out_of_IDE/console_coding.png?raw=true
:aligh: center
:align: center


5. Using **try-except**:
1. PyChram is great at pointing the lines of code which are causing tests to fail.
1. PyCharm is great at pointing the lines of code which are causing tests to fail.
Navigating to that line, you can add Try-Except block with breakpoints to get in depth understanding of the errors.

.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/contributing/the_basics/getting_most_out_of_IDE/try_except.png?raw=true
Expand Down
6 changes: 4 additions & 2 deletions docs/overview/deep_dive/containers.rst
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ The *nestable* behaviour is added to any function which is decorated with the `h
This wrapper causes the function to be applied at each leaf of any containers passed in the input.
More information on this can be found in the `Function Wrapping <https://github.com/unifyai/ivy/blob/b725ed10bca15f6f10a0e5154af10231ca842da2/docs/partial_source/deep_dive/function_wrapping.rst>`_ section of the Deep Dive.

Additionally, any nestable function which returns multiple arrays, will return the same number of containers for it's container counterpart.
Additionally, any nestable function which returns multiple arrays, will return the same number of containers for its container counterpart.
This property makes the function symmetric with regards to the input-output behavior, irrespective of whether :class:`ivy.Array` or :class:`ivy.Container` instances are based used.
Any argument in the input can be replaced with a container without changing the number of inputs, and the presence or absence of ivy.Container instances in the input should not change the number of return values of the function.
In other words, if containers are detected in the input, then we should return a separate container for each array that the function would otherwise return.
Expand Down Expand Up @@ -246,8 +246,10 @@ The functions :func:`ivy.clip`, :func:`ivy.log`, :func:`ivy.sum` and :func:`ivy.

Therefore, our approach is to **not** wrap any compositional functions which are already *implicitly nestable* as a result of the *nestable* functions called internally.

**Explicitly Nestable Compositional Functions**

There may be some compositional functions which are not implicitly nestable for some reason, and in such cases adding the explicit `handle_nestable <https://github.com/unifyai/ivy/blob/5f58c087906a797b5cb5603714d5e5a532fc4cd4/ivy/func_wrapper.py#L407>`_ wrapping may be necessary.
One such example is the :func:`ivy.linear` function which is not implicitly nestable despite being compositional. This is because of the use of special functions like :func:`__len__` which is not nestable and can't be made nestable.
One such example is the :func:`ivy.linear` function which is not implicitly nestable despite being compositional. This is because of the use of special functions like :func:`__len__` and :func:`__list__` which, among other functions, are not nestable and can't be made nestable.
But we should try to avoid this, in order to make the flow of computation as intuitive to the user as possible.

When compiling the code, the computation graph is **identical** in either case, and there will be no implications on performance whatsoever.
Expand Down
30 changes: 19 additions & 11 deletions docs/overview/deep_dive/devices.rst
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ doesn't care about this, it moves all the tensors to the same device before perf
In Ivy, users can control the device on which the operation is to be executed using `ivy.set_soft_device_mode`_ flag. There are two cases for this,
either the soft device mode is set to :code:`True` or :code:`False`.

1. When :code:`ivy.set_soft_device_mode(True)`:
**When ivy.set_soft_device_mode(True)**:

a. All the input arrays are moved to :code:`ivy.default_device()` while performing an operation. If the array is already present
in the default device, no device shifting is done.
Expand All @@ -174,7 +174,14 @@ are moved to :code:`ivy.default_device()` while performing :code:`ivy.add` opera
y = ivy.array([34], device="gpu:0")
ivy.add(x, y)
2. When :code:`ivy.set_soft_device_mode(False)`:
The priority of device shifting is following in this mode:

#. The ``device`` argument.
#. device the arrays are on.
#. :code:`default_device`


**When ivy.set_soft_device_mode(False)**:

a. If any of the input arrays are on a different device, a device exception is raised.

Expand Down Expand Up @@ -226,18 +233,16 @@ The code to handle all these cases are present inside `@handle_device_shifting`_
all the functions that accept at least one array as input(except mixed and compositional functions) in `ivy.functional.ivy`_ submodule. The decorator calls
:code:`ivy.handle_soft_device_variable` function under the hood to handle device shifting for each backend.
**Soft Device Handling Function**
The priority of device shifting is following in this mode:
There is a backend specific implementation of :code:`ivy.handle_soft_device_variable` function for numpy and tensorflow. The reason being, for numpy there
is no need for device shifting as it only support 'cpu' device, whereas, tensorflow automatically moves the inputs to 'gpu' if one is available and there is no way to turn this
off globally.
#. The ``device`` argument.
#. :code:`default_device`
The `numpy soft device handling function`_ just returns the inputs of the operation as it is without making any changes.
Whereas the `tensorflow soft device handling function`_ move the input arrays to :code:`ivy.default_device()` using
`tf.device`_ context manager.
**Soft Device Handling Function**
This is a function which plays a crucial role in the :code:`handle_device_shifting` decorator. The purpose of this function is to ensure that the function :code:`fn` passed to it is executed on the device passed in :code:`device_shifting_dev` argument. If it is passed as :code:`None`, then the function will be executed on the default device.
For the rest of the frameworks, the `ivy implementation`_ of soft device handling function is used, which loops through
the inputs of the function and move the arrays to :code:`ivy.default_device()`, if not already on that device.
Most of the backend implementations are very similar, first they move all the arrays to the desired device using :code:`ivy.nested_map` and then execute the function inside the device handling context manager from that native framework. The prupose of executing the function inside the context manager is to handle the functions that do not accept any arrays, the only way in that case to let the native framework know on which device we want the function to be executed on is through the context manager. This approach is used in most backend implementations with the exceptions being tensorflow, where we dont have to move all the tensors to the desired device because just using its context manager is enough, it moves all the tensors itself internally, and numpy, since it only accepts `cpu` as device.
**Forcing Operations on User Specified Device**
Expand All @@ -258,6 +263,9 @@ context manager. So from now on, all the operations will be executed on 'cpu' de
On exiting the context manager(`__exit__`_ method), the default device and soft device mode is reset to the previous state using `ivy.unset_default_device()`_ and
`ivy.unset_soft_device_mode()`_ respectively, to move back to the previous state.
There are some functions(mostly creation function) which accept a :code:`device` argument. This is for specifying on which device the function is executed on and the device of the returned array. :code:`handle_device_shifting` deals with this argument by first checking if it exists and then setting :code:`device_shifting_dev` to that which is then passed to the :code:`handle_soft_device_variable` function depending on the :code:`soft_device` mode.
**Round Up**
This should have hopefully given you a good feel for devices, and how these are handled in Ivy.
Expand Down
8 changes: 6 additions & 2 deletions docs/overview/deep_dive/ivy_frontends_tests.rst
Original file line number Diff line number Diff line change
Expand Up @@ -629,7 +629,11 @@ for example, :code:`ndarray.__add__` would expect an array as input, despite the
- :code:`init_tree` A full path to initialization function.
- :code:`method_name` The name of the method to test.
:func:`helpers.test_frontend_method` is used to test frontend instance methods. It is used in the same way as :func:`helpers.test_frontend_function`.
:func:`helpers.test_frontend_method` is used to test frontend instance methods. It is used in the same way as :func:`helpers.test_frontend_function`. A few important arguments for this function are following:
- :code:`init_input_dtypes` Input dtypes of the arguments on which we are initializing the array on.
- :code:`init_all_as_kwargs_np` The data to be passed when intializing, this will be a dictionary in which the numpy array which will contain the data will be passed in the :code:`data` key.
- :code:`method_input_dtypes` The input dtypes of the arguemnt which are to be passed to the instance method after the intialization of the array.
- :code:`method_all_as_kwargs_np` All the arguments which are to be passed to instance method.
Frontend Instance Method Test Examples
Expand Down Expand Up @@ -822,4 +826,4 @@ If you have any questions, please feel free to reach out on `discord`_ in the `i
<iframe width="420" height="315" allow="fullscreen;"
src="https://www.youtube.com/embed/iS7QFsQa9bI" class="video">
</iframe>
</iframe>
4 changes: 2 additions & 2 deletions docs/overview/design/building_blocks.rst
Original file line number Diff line number Diff line change
Expand Up @@ -218,10 +218,10 @@ The contents of this function are as follows:
# if no global backend exists, we try to infer the backend from the arguments
f = _determine_backend_from_args(list(args) + list(kwargs.values()))
if f is not None:
if verbosity.level > 0:
verbosity.cprint("Using backend from type: {}".format(f))
implicit_backend = f.current_backend_str()
return f
if verbosity.level > 0:
verbosity.cprint("Using backend from type: {}".format(f))
return importlib.import_module(_backend_dict[implicit_backend])
If a global backend framework has been previously set using for example :code:`ivy.set_backend('tensorflow')`, then this globally set backend is returned.
Expand Down
2 changes: 2 additions & 0 deletions ivy/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -935,6 +935,7 @@ def __deepcopy__(self, memo):
"warning_level_stack": warning_level_stack,
"queue_timeout_stack": general.queue_timeout_stack,
"array_mode_stack": general.array_mode_stack,
"inplace_mode_stack": general.inplace_mode_stack,
"soft_device_mode_stack": device.soft_device_mode_stack,
"shape_array_mode_stack": general.shape_array_mode_stack,
"show_func_wrapper_trace_mode_stack": (
Expand Down Expand Up @@ -1415,6 +1416,7 @@ def cast_data_types(val=True):
"nan_policy",
"array_mode",
"nestable_mode",
"inplace_mode",
"exception_trace_mode",
"show_func_wrapper_trace_mode",
"min_denominator",
Expand Down
3 changes: 3 additions & 0 deletions ivy/compiler/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ def compile(
Examples
--------
>>> import ivy, time
>>> ivy.set_backend("torch")
>>> x = ivy.array([1.])
Expand All @@ -85,8 +86,10 @@ def compile(
... k = ivy.ceil(c)
... return i, j, k
>>> graph = ivy.compile(fn, args=(x,))
Notice how the time taken to execute the compiled function is lower than
the original function. A typical run:
>>> start = time.time()
>>> fn(x)
>>> print(time.time() - start)
Expand Down
Loading

0 comments on commit 30625dd

Please sign in to comment.