Skip to content

Commit

Permalink
Update: compile updated to trace in ivy repo
Browse files Browse the repository at this point in the history
  • Loading branch information
HaiderSultanArc committed Aug 31, 2023
1 parent 13d99f9 commit 5d749d7
Show file tree
Hide file tree
Showing 16 changed files with 188 additions and 184 deletions.
20 changes: 10 additions & 10 deletions ivy/compiler/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ class LazyGraph:
pass


def compile(
def trace(
*objs: Callable,
stateful: Optional[List] = None,
arg_stateful_idxs: Optional[List] = None,
Expand All @@ -32,16 +32,16 @@ def compile(
kwargs: Optional[dict] = None,
) -> Union[Graph, LazyGraph]:
if python_version[1] == "8":
from ._compiler_38 import compile as _compile
from ._compiler_38 import trace as _trace
else:
from ._compiler import compile as _compile
from ._compiler import trace as _trace
"""
Take `fn` and compiles it into a more efficient composition of backend operations.
Take `fn` and decomposes it into a more efficient composition of backend operations.
Parameters
----------
objs
callable(s) to compile and create a graph of
callable(s) to trace and create a graph of
stateful
list of instances to be considered stateful during the graph compilation
arg_stateful_idxs
Expand All @@ -59,15 +59,15 @@ def compile(
static_argnames
for jax's jit compilation
graph_caching
whether to cache the compiled graph
whether to cache the traced graph
args
positional arguments for `obj`
kwargs
keyword arguments for `obj`
Returns
-------
the compiled `Graph` object.
the traced `Graph` object.
Examples
--------
Expand All @@ -84,8 +84,8 @@ def compile(
... j = ivy.floor(b)
... k = ivy.ceil(c)
... return i, j, k
>>> graph = ivy.compile(fn, args=(x,))
Notice how the time taken to execute the compiled function is lower than
>>> graph = ivy.trace(fn, args=(x,))
Notice how the time taken to execute the traced function is lower than
the original function. A typical run:
>>> start = time.time()
>>> fn(x)
Expand All @@ -96,7 +96,7 @@ def compile(
>>> print(time.time() - start)
0.0001785755157470703
"""
return _compile(
return _trace(
*objs,
stateful=stateful,
arg_stateful_idxs=arg_stateful_idxs,
Expand Down
2 changes: 1 addition & 1 deletion ivy/functional/backends/tensorflow/control_flow_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def if_else(cond, body_fn, orelse_fn, vars):
cond = bool(cond(*vars))
# return tf.cond(cond, lambda: body_fn(*vars), lambda: orelse_fn(*vars))

# use pythonic placeholder until the graph compiler supports callable arguments
# use pythonic placeholder until the tracer supports callable arguments

if cond:
return body_fn(*vars)
Expand Down
2 changes: 1 addition & 1 deletion ivy/functional/ivy/general.py
Original file line number Diff line number Diff line change
Expand Up @@ -1662,7 +1662,7 @@ def arg_names(receiver):
>>> x = ivy.arg_names(ivy.optimizers.Adam)
>>> print(x)
['lr', 'beta1', 'beta2', 'epsilon', 'inplace',
'stop_gradients', 'compile_on_next_step', 'device']
'stop_gradients', 'trace_on_next_step', 'device']
"""
return list(inspect.signature(receiver).parameters.keys())

Expand Down
60 changes: 30 additions & 30 deletions ivy/stateful/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,12 +51,12 @@ def __init__(
v=None,
buffers=None,
build_mode="on_init",
compile_on_next_step=False,
trace_on_next_step=False,
store_vars=True,
stateful=None,
arg_stateful_idxs=None,
kwarg_stateful_idxs=None,
fallback_to_non_compiled=False,
fallback_to_non_traced=False,
with_partial_v=False,
devices=None,
dtype=None,
Expand All @@ -79,8 +79,8 @@ def __init__(
How the Module is built, either on initialization (now),
explicitly by the user by calling build(), or the first
time the __call__ method is run. Default is on initialization.
compile_on_next_step
Whether to compile the network on the next forward pass.
trace_on_next_step
Whether to trace the network in a graph on the next forward pass.
Default is ``False``.
store_vars
Whether or not to store the variables created. Default is ``True``.
Expand All @@ -94,9 +94,9 @@ def __init__(
kwarg_stateful_idxs
The nested keyword argument indices of stateful items to track as part of
the forward pass. Used when graph compiling, default is ``None``.
fallback_to_non_compiled
Whether to fall back to non-compiled forward call in the case that an error
is raised during the compiled forward pass. Default is ``True``.
fallback_to_non_traced
Whether to fall back to non-traced forward call in the case that an error
is raised during the traced forward pass. Default is ``True``.
with_partial_v
Whether to allow partial specification of variables. Default is ``False``.
training
Expand All @@ -121,13 +121,13 @@ def __init__(
self._stateful = stateful
self._arg_stateful_idxs = arg_stateful_idxs
self._kwarg_stateful_idxs = kwarg_stateful_idxs
self._fallback_to_non_compiled = fallback_to_non_compiled
self._fallback_to_non_traced = fallback_to_non_traced
self._with_partial_v = with_partial_v
self._store_vars = store_vars
self._built = False
self._compiled = False
self._compiled_fn = None
self._compile_on_next_step = compile_on_next_step
self._traced = False
self._traced_fn = None
self._trace_on_next_step = trace_on_next_step
self._v_in = v if isinstance(v, Container) or v is None else Container(v)
self.v = v
self.top_v = None
Expand All @@ -147,7 +147,7 @@ def __init__(
self._kwargs = kwargs
self._module_graph = None
self._target = None
self._lazy_compiled = False
self._lazy_traced = False
self._dynamic_backend = dynamic_backend
self.training = training
if build_mode != "on_init":
Expand Down Expand Up @@ -627,17 +627,17 @@ def __call__(
-------
ret
"""
if self._lazy_compiled:
# we are compiling since we want to transpile module,
if self._lazy_traced:
# we are creating graph since we want to transpile module,
# so set the appropriate backend
if self._target:
ivy.set_backend(self._target)
self.compile(args=args, kwargs=kwargs)
self.trace(args=args, kwargs=kwargs)
if self._target:
ivy.previous_backend()

if self._module_graph:
# we need `v` in kwargs, since this is a compiled call
# we need `v` in kwargs, since this is a traced call
v = v if v else self.v
return self._module_graph(*args, v=v, **kwargs)

Expand Down Expand Up @@ -857,7 +857,7 @@ def show_graph(
fname: Optional[str] = None,
):
if not ivy.exists(self._module_graph):
raise ValueError("You must compile the module to display the graph.")
raise ValueError("You must trace the module to display the graph.")

return self._module_graph.show(
save_to_disk=save_to_disk,
Expand Down Expand Up @@ -897,28 +897,28 @@ def __delattr__(self, name):
else:
super().__delattr__(name)

def compile(
def trace(
self,
args: Optional[Tuple] = None,
kwargs: Optional[Dict] = None,
**compile_kwargs,
**trace_kwargs,
):
"""
Compile the `ivy.Module`'s `_unified_ivy_graph` or `_call` method to the target
Trace the `ivy.Module`'s `_unified_ivy_graph` or `_call` method to the target
backend.
Parameters
----------
args:
arguments used to compile. Defaults to None.
arguments used to trace. Defaults to None.
kwargs:
keyword arguments used to compile. Defaults to None.
compile_kwargs:
keyword arguments passed to the compile function.
keyword arguments used to trace. Defaults to None.
trace_kwargs:
keyword arguments passed to the trace function.
"""
# no arguments given to compile, so delay the compilation
# no arguments given to trace, so delay the compilation
if not (args or kwargs):
self._lazy_compiled = True
self._lazy_traced = True
return

# we do not need convert the args to source
Expand All @@ -929,13 +929,13 @@ def compile(
kwargs = copy.copy(kwargs)
kwargs["v"] = self.v

fn_to_compile = ivy.default(self._module_graph, self._call)
fn_to_trace = ivy.default(self._module_graph, self._call)

self._module_graph = ivy.compile(
fn_to_compile, **compile_kwargs, args=args, kwargs=kwargs
self._module_graph = ivy.trace(
fn_to_trace, **trace_kwargs, args=args, kwargs=kwargs
)

self._lazy_compiled = False
self._lazy_traced = False

def save(self, filename):
"""
Expand Down
56 changes: 28 additions & 28 deletions ivy/stateful/optimizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ def __init__(
inplace: bool = True,
stop_gradients: bool = True,
init_on_first_step: bool = False,
compile_on_next_step: bool = False,
fallback_to_non_compiled: bool = False,
trace_on_next_step: bool = False,
fallback_to_non_traced: bool = False,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
):
"""
Expand All @@ -42,11 +42,11 @@ def __init__(
init_on_first_step
Whether the optimizer is initialized on the first step.
Default is ``False``.
compile_on_next_step
Whether to compile the optimizer on the next step. Default is ``False``.
fallback_to_non_compiled
Whether to fall back to non-compiled forward call in the case that an error
is raised during the compiled forward pass. Default is ``True``.
trace_on_next_step
Whether to trace the optimizer on the next step. Default is ``False``.
fallback_to_non_traced
Whether to fall back to non-traced forward call in the case that an error
is raised during the traced forward pass. Default is ``True``.
device
Device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. (Default value = None)
Expand All @@ -56,12 +56,12 @@ def __init__(
self._stop_gradients = stop_gradients
self._init_on_first_step = init_on_first_step
self._initialized = not init_on_first_step
self._compile_on_next_step = compile_on_next_step
self._fallback_to_non_compiled = fallback_to_non_compiled
self._trace_on_next_step = trace_on_next_step
self._fallback_to_non_traced = fallback_to_non_traced
self._dev = ivy.default(device, ivy.default_device())
self._count = ivy.array([0], device=self._dev)
self._compiled_step_fn = None
self._compiled = False
self._traced_step_fn = None
self._traced = False

# Private #
# --------#
Expand Down Expand Up @@ -167,7 +167,7 @@ def __init__(
lr: float = 1e-4,
inplace: bool = True,
stop_gradients: bool = True,
compile_on_next_step: bool = False,
trace_on_next_step: bool = False,
):
"""
Construct a Stochastic-Gradient-Descent (SGD) optimizer.
Expand All @@ -184,11 +184,11 @@ def __init__(
stop_gradients
Whether to stop the gradients of the variables after each gradient step.
Default is ``True``.
compile_on_next_step
Whether to compile the optimizer on the next step. Default is ``False``.
trace_on_next_step
Whether to trace the optimizer on the next step. Default is ``False``.
"""
Optimizer.__init__(
self, lr, inplace, stop_gradients, compile_on_next_step=compile_on_next_step
self, lr, inplace, stop_gradients, trace_on_next_step=trace_on_next_step
)

# Custom Step
Expand Down Expand Up @@ -240,7 +240,7 @@ def __init__(
decay_lambda: float = 0,
inplace: bool = True,
stop_gradients: bool = True,
compile_on_next_step: bool = False,
trace_on_next_step: bool = False,
):
"""
Construct a Layer-wise Adaptive Rate Scaling (LARS) optimizer.
Expand All @@ -259,12 +259,12 @@ def __init__(
stop_gradients
Whether to stop the gradients of the variables after each gradient step.
Default is ``True``.
compile_on_next_step
Whether to compile the optimizer on the next step. Default is ``False``.
trace_on_next_step
Whether to trace the optimizer on the next step. Default is ``False``.
"""
self._decay_lambda = decay_lambda
Optimizer.__init__(
self, lr, inplace, stop_gradients, compile_on_next_step=compile_on_next_step
self, lr, inplace, stop_gradients, trace_on_next_step=trace_on_next_step
)

# Custom Step
Expand Down Expand Up @@ -319,7 +319,7 @@ def __init__(
epsilon: float = 1e-07,
inplace: bool = True,
stop_gradients: bool = True,
compile_on_next_step: bool = False,
trace_on_next_step: bool = False,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
):
"""
Expand All @@ -344,8 +344,8 @@ def __init__(
stop_gradients
Whether to stop the gradients of the variables after each gradient step.
Default is ``True``.
compile_on_next_step
Whether to compile the optimizer on the next step. Default is ``False``.
trace_on_next_step
Whether to trace the optimizer on the next step. Default is ``False``.
device
Device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. (Default value = None)
Expand All @@ -356,10 +356,10 @@ def __init__(
self._mw = None
self._vw = None
self._first_pass = True
self._should_compile = False
self._should_trace = False

Optimizer.__init__(
self, lr, inplace, stop_gradients, True, compile_on_next_step, device=device
self, lr, inplace, stop_gradients, True, trace_on_next_step, device=device
)

# Custom Step
Expand Down Expand Up @@ -428,7 +428,7 @@ def __init__(
decay_lambda: float = 0,
inplace: bool = True,
stop_gradients: bool = True,
compile_on_next_step: bool = False,
trace_on_next_step: bool = False,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
):
"""
Expand Down Expand Up @@ -458,14 +458,14 @@ def __init__(
stop_gradients
Whether to stop the gradients of the variables after each gradient step.
Default is ``True``.
compile_on_next_step
Whether to compile the optimizer on the next step. Default is ``False``.
trace_on_next_step
Whether to trace the optimizer on the next step. Default is ``False``.
device
Device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. (Default value = None)
"""
Optimizer.__init__(
self, lr, inplace, stop_gradients, True, compile_on_next_step, device=device
self, lr, inplace, stop_gradients, True, trace_on_next_step, device=device
)
self._beta1 = beta1
self._beta2 = beta2
Expand Down
Loading

0 comments on commit 5d749d7

Please sign in to comment.