From b5b8839c49400c82849bd76789577391b57288fb Mon Sep 17 00:00:00 2001 From: Jackie Chen Date: Fri, 9 Sep 2022 13:14:52 -0400 Subject: [PATCH 1/5] Fix incorrect examples on split --- ivy/functional/ivy/manipulation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ivy/functional/ivy/manipulation.py b/ivy/functional/ivy/manipulation.py index 1521fafd2e69c..d4127b216cf0a 100644 --- a/ivy/functional/ivy/manipulation.py +++ b/ivy/functional/ivy/manipulation.py @@ -1061,12 +1061,12 @@ def split( [ivy.array([1]),ivy.array([2]),ivy.array([3])] >>> x = ivy.array([[3, 2, 1], [4, 5, 6]]) - >>> y = ivy.split(x, 2, 1, False) + >>> y = ivy.split(x, num_or_size_splits=2, axis=1, with_remainder=True) >>> print(y) [ivy.array([[3,2],[4,5]]),ivy.array([[1],[6]])] >>> x = ivy.array([4, 6, 5, 3]) - >>> y = ivy.split(x, [1, 2], 0, True) + >>> y = x.split(num_or_size_splits=[1, 3], axis=0, with_remainder=False) >>> print(y) ivy.array([[4], [6, 5, 3]]) From cee7abb815a72edff6b99f2f5aeb88f6462df796 Mon Sep 17 00:00:00 2001 From: Jackie Chen Date: Sat, 10 Sep 2022 16:07:01 -0400 Subject: [PATCH 2/5] Ensure _get_functions_from_string gets a function --- ivy/functional/ivy/data_type.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ivy/functional/ivy/data_type.py b/ivy/functional/ivy/data_type.py index 99c63a61a5389..9b7644ee69e08 100644 --- a/ivy/functional/ivy/data_type.py +++ b/ivy/functional/ivy/data_type.py @@ -72,9 +72,9 @@ def _get_functions_from_string(func_names, module): ret = set() # We only care about the functions in the ivy or the same module for func_name in func_names: - if hasattr(ivy, func_name): + if hasattr(ivy, func_name) and callable(getattr(ivy, func_name)): ret.add(getattr(ivy, func_name)) - elif hasattr(module, func_name): + elif hasattr(module, func_name) and callable(getattr(ivy, func_name)): ret.add(getattr(module, func_name)) return ret From 07a0f24e048f6429bb3d865cee2734fff248cfe6 Mon Sep 17 00:00:00 2001 From: Jackie Chen Date: Sun, 23 Oct 2022 12:35:37 -0400 Subject: [PATCH 3/5] Add new example for different backends compiler --- .../partial_source/design/building_blocks.rst | 48 +++++++++++++++++-- 1 file changed, 44 insertions(+), 4 deletions(-) diff --git a/docs/partial_source/design/building_blocks.rst b/docs/partial_source/design/building_blocks.rst index cdf0b365e3276..5204e21aedfe3 100644 --- a/docs/partial_source/design/building_blocks.rst +++ b/docs/partial_source/design/building_blocks.rst @@ -109,12 +109,12 @@ Calling the different backend files explicitly would work okay, but it would mea axis axis or axes along which products must be computed. By default, the product must be computed over the entire array. If a tuple of integers, products must be - computed over multiple axes. Default: ``None``. + computed over multiple axes. Default: None. keepdims bool, if True, the reduced axes (dimensions) must be included in the result as singleton dimensions, and, accordingly, the result must be compatible with the input array (see Broadcasting). Otherwise, if False, the reduced axes - (dimensions) must not be included in the result. Default: ``False``. + (dimensions) must not be included in the result. Default: False. dtype data type of the returned array. If None, if the default data type corresponding to the data type โ€œkindโ€ (integer or @@ -129,7 +129,7 @@ Calling the different backend files explicitly would work okay, but it would mea integer data type (e.g., if the default integer data type is int32, the returned array must have a uint32 data type). If the data type (either specified or resolved) differs from the data type of x, the input array should be cast to the - specified data type before computing the product. Default: ``None``. + specified data type before computing the product. Default: None. out optional output array, for writing the result to. @@ -433,8 +433,48 @@ The graph compiler does not compile to C++, CUDA or any other lower level langua Therefore, the backend code can always be run with maximal efficiency by compiling into an efficient low-level backend-specific computation graph. +This compilation is not restricted to just PyTorch. For example, let's take another example, but compile to Tensorflow, NumPy and JAX: + ++------------------------------------+ +|.. code-block:: python | +| | +| def ivy_func(x, y): | +| w = ivy.diag(x) | +| z = ivy.matmul(w, y) | +| return z | +| | +| # input | +| x = ivy.array([[1., 2., 3.]]) | +| y = ivy.array([[2., 3., 4.]]) | +| # create graph | +| graph = ivy.compile_graph( | +| ivy_func, x, y) | +| | +| # call graph | +| ret = graph(x, y) | ++------------------------------------+ + +Converting this code to a graph, we get a slightly different graph for each backend: + +.. image:: https://github.com/unifyai/unifyai.github.io/blob/master/img/externally_linked/design/compiled_graph_tf.png?raw=true + :align: center + :width: 75% + +.. image:: https://github.com/unifyai/unifyai.github.io/blob/master/img/externally_linked/design/compiled_graph_numpy.png?raw=true + :align: center + :width: 75% + +.. image:: https://github.com/unifyai/unifyai.github.io/blob/master/img/externally_linked/design/compiled_graph_jax.png?raw=true + :align: center + :width: 75% + +The example above further emphasizes that the graph compiler creates a computation graph consisting of backend functions, not Ivy functions. +Specifically, the same Ivy code compiles to different graphs depending on the selected backend. However, when compiling native framework code, we are only able to compile a graph for that same framework. +For example, we cannot take torch code and compile this into tensorflow code. However, we can transpile torch code into tensorflow code (see :ref:Ivy as a Transpiler for more details). + + **Round Up** Hopefully this has painted a clear picture of the fundamental building blocks underpinning the Ivy framework, being the backend functional APIs, Ivy functional API, backend handler and graph compiler ๐Ÿ™‚ -Please reach out on `discord `_ if you have any questions! +Please reach out on `discord `_ if you have any questions! From 9b125aaf8888f087c10b397857ecadd646465123 Mon Sep 17 00:00:00 2001 From: Jackie Chen Date: Sun, 23 Oct 2022 12:42:17 -0400 Subject: [PATCH 4/5] Make writing more clear and have examples flow better --- .../partial_source/design/building_blocks.rst | 61 ++++++++++--------- 1 file changed, 31 insertions(+), 30 deletions(-) diff --git a/docs/partial_source/design/building_blocks.rst b/docs/partial_source/design/building_blocks.rst index 5204e21aedfe3..97564a435e327 100644 --- a/docs/partial_source/design/building_blocks.rst +++ b/docs/partial_source/design/building_blocks.rst @@ -344,7 +344,9 @@ The compiler takes in any Ivy function, backend function, or composition, and re :align: center :width: 75% -As an example, the following 3 pieces of code all compile to the exact same computation graph as shown: +Let's look at a few examples, and observe the compiled graph of the Ivy code against the native backend code. +First, let's set our desired backend as PyTorch. When we compile the three functions below, despite the fact that each +has a different mix of Ivy and PyTorch code, they all compile to the same graph: +----------------------------------------+-----------------------------------------+-----------------------------------------+ |.. code-block:: python |.. code-block:: python |.. code-block:: python | @@ -405,35 +407,7 @@ For all existing ML frameworks, the functional API is the backbone which underpi :align: center :width: 75% -The graph compiler does not compile to C++, CUDA or any other lower level language. It simply traces the backend functional methods in the graph, stores this graph, and then efficiently traverses this graph at execution time, all in Python. Compiling to lower level languages (C++, CUDA, TorchScript etc.) is supported for most backend frameworks via :func:`ivy.compile`, which wraps backend-specific compilation code, for example: - -.. code-block:: python - - # ivy/functional/backends/tensorflow/compilation.py - compile = lambda fn, dynamic=True, example_inputs=None,\ - static_argnums=None, static_argnames=None:\ - tf.function(fn) - -.. code-block:: python - - # ivy/functional/backends/torch/compilation.py - def compile(fn, dynamic=True, example_inputs=None, - static_argnums=None, static_argnames=None): - if dynamic: - return torch.jit.script(fn) - return torch.jit.trace(fn, example_inputs) - -.. code-block:: python - - # ivy/functional/backends/jax/compilation.py - compile = lambda fn, dynamic=True, example_inputs=None,\ - static_argnums=None, static_argnames=None:\ - jax.jit(fn, static_argnums=static_argnums, - static_argnames=static_argnames) - -Therefore, the backend code can always be run with maximal efficiency by compiling into an efficient low-level backend-specific computation graph. - -This compilation is not restricted to just PyTorch. For example, let's take another example, but compile to Tensorflow, NumPy and JAX: +This compilation is not restricted to just PyTorch. Let's take another example, but compile to Tensorflow, NumPy and JAX: +------------------------------------+ |.. code-block:: python | @@ -472,6 +446,33 @@ The example above further emphasizes that the graph compiler creates a computati Specifically, the same Ivy code compiles to different graphs depending on the selected backend. However, when compiling native framework code, we are only able to compile a graph for that same framework. For example, we cannot take torch code and compile this into tensorflow code. However, we can transpile torch code into tensorflow code (see :ref:Ivy as a Transpiler for more details). +The graph compiler does not compile to C++, CUDA or any other lower level language. It simply traces the backend functional methods in the graph, stores this graph, and then efficiently traverses this graph at execution time, all in Python. Compiling to lower level languages (C++, CUDA, TorchScript etc.) is supported for most backend frameworks via :func:`ivy.compile`, which wraps backend-specific compilation code, for example: + +.. code-block:: python + + # ivy/functional/backends/tensorflow/compilation.py + compile = lambda fn, dynamic=True, example_inputs=None,\ + static_argnums=None, static_argnames=None:\ + tf.function(fn) + +.. code-block:: python + + # ivy/functional/backends/torch/compilation.py + def compile(fn, dynamic=True, example_inputs=None, + static_argnums=None, static_argnames=None): + if dynamic: + return torch.jit.script(fn) + return torch.jit.trace(fn, example_inputs) + +.. code-block:: python + + # ivy/functional/backends/jax/compilation.py + compile = lambda fn, dynamic=True, example_inputs=None,\ + static_argnums=None, static_argnames=None:\ + jax.jit(fn, static_argnums=static_argnums, + static_argnames=static_argnames) + +Therefore, the backend code can always be run with maximal efficiency by compiling into an efficient low-level backend-specific computation graph. **Round Up** From daddef4ba556dd80160d6eb28ee1bacaa7eb03e2 Mon Sep 17 00:00:00 2001 From: Jackie Chen Date: Sun, 23 Oct 2022 12:52:45 -0400 Subject: [PATCH 5/5] Revert accidental changes --- docs/partial_source/design/building_blocks.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/partial_source/design/building_blocks.rst b/docs/partial_source/design/building_blocks.rst index 97564a435e327..26e03b76a5b15 100644 --- a/docs/partial_source/design/building_blocks.rst +++ b/docs/partial_source/design/building_blocks.rst @@ -109,12 +109,12 @@ Calling the different backend files explicitly would work okay, but it would mea axis axis or axes along which products must be computed. By default, the product must be computed over the entire array. If a tuple of integers, products must be - computed over multiple axes. Default: None. + computed over multiple axes. Default: ``None``. keepdims bool, if True, the reduced axes (dimensions) must be included in the result as singleton dimensions, and, accordingly, the result must be compatible with the input array (see Broadcasting). Otherwise, if False, the reduced axes - (dimensions) must not be included in the result. Default: False. + (dimensions) must not be included in the result. Default: ``False``. dtype data type of the returned array. If None, if the default data type corresponding to the data type โ€œkindโ€ (integer or @@ -129,7 +129,7 @@ Calling the different backend files explicitly would work okay, but it would mea integer data type (e.g., if the default integer data type is int32, the returned array must have a uint32 data type). If the data type (either specified or resolved) differs from the data type of x, the input array should be cast to the - specified data type before computing the product. Default: None. + specified data type before computing the product. Default: ``None``. out optional output array, for writing the result to. @@ -478,4 +478,4 @@ Therefore, the backend code can always be run with maximal efficiency by compili Hopefully this has painted a clear picture of the fundamental building blocks underpinning the Ivy framework, being the backend functional APIs, Ivy functional API, backend handler and graph compiler ๐Ÿ™‚ -Please reach out on `discord `_ if you have any questions! +Please reach out on `discord `_ if you have any questions!