Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft: Tobi Dance #22

Draft
wants to merge 23 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from 7 commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
20df0c1
added logging.info to inform about not compiling
tobidelbruck Nov 26, 2022
9e7d4fa
to fix tensorflow JIT compile case insentivity problem, renamed cart …
tobidelbruck Nov 27, 2022
8c60986
generate pycharm link in log output.
tobidelbruck Nov 27, 2022
4c0bd92
added nan value to all libs
tobidelbruck Nov 28, 2022
1681fdd
rename config_cost_function.yml to config_cost_functions.yml for cons…
tobidelbruck Nov 28, 2022
4776ed4
finally the dynamically modifiable control cost parameters are workin…
tobidelbruck Dec 11, 2022
9e72b37
now spin and balance both work! and so does changing the policy and …
tobidelbruck Dec 12, 2022
3c1895f
Pull from master branch
frehe Dec 14, 2022
0c6ea2a
Rename tensorflow compilation flags
frehe Dec 15, 2022
8d5eddd
add equal and pow methods
tobidelbruck Dec 18, 2022
1ddc244
Merge remote-tracking branch 'origin/Tobi_Dance' into Tobi_Dance
tobidelbruck Dec 18, 2022
d343f31
renamed s to state for clariy in many of the classes.
Dec 24, 2022
91a611e
Merge remote-tracking branch 'origin/main' into Tobi_Dance
tobidelbruck Jan 31, 2023
a6fda56
Merge remote-tracking branch 'origin/master' into Tobi_Dance
tobidelbruck Jan 31, 2023
7f7659d
moved get_logger to own file in SI_Toolkit
tobidelbruck Feb 6, 2023
4d34dde
update path to config_cost_functions.yml
tobidelbruck Feb 7, 2023
76d455d
Merge remote-tracking branch 'origin/Tobi_Dance' into Tobi_Dance
tobidelbruck Feb 7, 2023
f13330c
move get_logger.py to Control_Toolkit so that it can be used by physi…
tobidelbruck Feb 8, 2023
5155fe9
cartpole_dancer.py starts to work. Music starts and stops, some steps…
tobidelbruck Feb 10, 2023
f63ab96
added primitive ability to record the predictor_ODE_tf.py predictions…
tobidelbruck Feb 13, 2023
ab87fec
added 'cartwheel' step to cartpole_trajectory_generator.py.
tobidelbruck Feb 16, 2023
3dd5102
fixed some logic and reduced some loggers to debug level
tobidelbruck Feb 19, 2023
89f4ead
major changes to cartpole_dancer_cost and cartpole_trajectory_generat…
tobidelbruck Feb 28, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 13 additions & 3 deletions src/SI_Toolkit/Functions/TF/Compile.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,21 @@
import tensorflow as tf
import torch

from Control_Toolkit.others.globals_and_utils import get_logger
log=get_logger(__name__)

from SI_Toolkit.computation_library import ComputationLibrary



try:
from SI_Toolkit_ASF import GLOBALLY_DISABLE_COMPILATION, USE_JIT_COMPILATION
except ImportError:
logging.warn("No compilation option set in SI_Toolkit_ASF. Setting GLOBALLY_DISABLE_COMPILATION to True.")
log.warn("No compilation option set in SI_Toolkit_ASF/__init.py__. Setting GLOBALLY_DISABLE_COMPILATION to True.")
GLOBALLY_DISABLE_COMPILATION = True

def tf_function_jit(func):
return tf.function(func=func, jit_compile=True)
return tf.function(func=func, jit_compile=True,)


def tf_function_experimental(func):
Expand All @@ -25,6 +30,7 @@ def identity(func):


if GLOBALLY_DISABLE_COMPILATION:
log.info('TensorFlow compilation is disabled by GLOBALLY_DISABLE_COMPILATION=True')
CompileTF = identity
else:
if platform.machine() == 'arm64' and platform.system() == 'Darwin': # For M1 Apple processor
Expand All @@ -33,9 +39,12 @@ def identity(func):
CompileTF = tf.function
else:
CompileTF = tf_function_jit
log.info(f'using {CompileTF} compilation')
# CompileTF = tf_function_experimental # Should be same as tf_function_jit, not appropriate for newer version of TF

def CompileAdaptive(fun):
""" TODO add docstring to explain what it does and where it is used
"""
instance = fun.__self__
assert hasattr(instance, "lib"), "Instance with this method has no computation library defined"
computation_library: "type[ComputationLibrary]" = instance.lib
Expand All @@ -44,7 +53,8 @@ def CompileAdaptive(fun):
if GLOBALLY_DISABLE_COMPILATION:
return identity(fun)
elif lib_name == 'TF':
log.info(f'compiling tensorflow {fun}')
return CompileTF(fun)
else:
print('Jit compilation for Pytorch not yet implemented.')
log.warning(f'JIT compilation for {lib_name} not yet implemented.')
return identity(fun)
14 changes: 10 additions & 4 deletions src/SI_Toolkit/Predictors/predictor_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ def __init__(self):

def configure(self, batch_size: int, horizon: int, dt: float, computation_library: "Optional[type[ComputationLibrary]]"=None, predictor_specification=None, compile_standalone=False):


self.update_predictor_config_from_specification(predictor_specification)

compile_standalone = {'disable_individual_compilation': not compile_standalone}
Expand All @@ -53,11 +54,16 @@ def configure(self, batch_size: int, horizon: int, dt: float, computation_librar
self.predictor = predictor_ODE_tf(horizon=self.horizon, dt=dt, batch_size=self.batch_size, **self.predictor_config, **compile_standalone)

else:
raise NotImplementedError('Type of the predictor not recognised.')
raise NotImplementedError(f'Type of the predictor {self.predictor_type} is not recognised.')

# computation_library defaults to None. In that case, do not check for conformity.
if computation_library is not None and computation_library not in self.predictor.supported_computation_libraries:
raise ValueError(f"Predictor {self.predictor.__class__.__name__} does not support {computation_library.__name__}")
# in other cases, check after we configure it to make sure it supports itself
if not computation_library is None and computation_library not in self.predictor.supported_computation_libraries:
raise ValueError(
f"Predictor {self.predictor.__class__.__name__} does not support {computation_library.__name__}")
Comment on lines +83 to +85
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
if not computation_library is None and computation_library not in self.predictor.supported_computation_libraries:
raise ValueError(
f"Predictor {self.predictor.__class__.__name__} does not support {computation_library.__name__}")
if computation_library is not None and computation_library not in self.predictor.supported_computation_libraries:
raise ValueError(
f"Predictor {self.predictor.__class__.__name__} does not support {computation_library.__name__}"
)


self.predictor.lib=computation_library # set the library type on the predictor object so we can use it to assign attributes later


def configure_with_compilation(self, batch_size, horizon, dt, predictor_specification=None):
"""
Expand Down
17 changes: 14 additions & 3 deletions src/SI_Toolkit/computation_library.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ class ComputationLibrary:
gather: Callable[[TensorType, TensorType, int], TensorType] = None
gather_last: Callable[[TensorType, TensorType], TensorType] = None
arange: Callable[[Optional[NumericType], NumericType, Optional[NumericType]], TensorType] = None
zeros: Callable[["tuple[int]"], TensorType] = None
zeros: Callable[["tuple[int,...]"], TensorType] = None
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Very nice! I had wrong assumptions about how to type subscript tuples. Learned something again.

zeros_like: Callable[[TensorType], TensorType] = None
ones: Callable[["tuple[int]"], TensorType] = None
sign: Callable[[TensorType], TensorType] = None
Expand Down Expand Up @@ -88,6 +88,9 @@ class ComputationLibrary:
dot: Callable[[TensorType, TensorType], TensorType] = None
stop_gradient: Callable[[TensorType], TensorType] = None
assign: Callable[[Union[TensorType, tf.Variable], TensorType], Union[TensorType, tf.Variable]] = None
nan:TensorType=None
isnan:Callable[[TensorType],bool]=None
string = None


class NumpyLibrary(ComputationLibrary):
Expand Down Expand Up @@ -158,14 +161,16 @@ class NumpyLibrary(ComputationLibrary):
dot = np.dot
stop_gradient = lambda x: x
assign = LibraryHelperFunctions.set_to_value

nan = np.nan
isnan=np.isnan
string=str

class TensorFlowLibrary(ComputationLibrary):
lib = 'TF'
reshape = tf.reshape
permute = tf.transpose
newaxis = tf.newaxis
shape = lambda x: x.get_shape() # .as_list()
shape = tf.shape # tobi does not understand reason for this previous definition: # lambda x: x.get_shape() # .as_list()
Copy link
Member Author

@frehe frehe Dec 14, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No difference: If x is a tf.Tensor, then x.shape and x.get_shape() are identical https://www.tensorflow.org/api_docs/python/tf/Tensor#get_shape

Using tf.shape(x) seems to be the best choice here, which is a third option.
From https://www.tensorflow.org/api_docs/python/tf/shape:
tf.shape and Tensor.shape should be identical in eager mode. Within tf.function or within a compat.v1 context, not all dimensions may be known until execution time. Hence, when defining custom layers and models for graph mode, prefer the dynamic tf.shape(x) over the static x.shape.

to_numpy = lambda x: x.numpy()
to_variable = lambda x, dtype: tf.Variable(x, dtype=dtype)
to_tensor = lambda x, dtype: tf.convert_to_tensor(x, dtype=dtype)
Expand Down Expand Up @@ -228,6 +233,9 @@ class TensorFlowLibrary(ComputationLibrary):
dot = lambda a, b: tf.tensordot(a, b, 1)
stop_gradient = tf.stop_gradient
assign = LibraryHelperFunctions.set_to_variable
nan=tf.constant(np.nan)
isnan=tf.math.is_nan
string=tf.string


class PyTorchLibrary(ComputationLibrary):
Expand Down Expand Up @@ -307,3 +315,6 @@ def gather_last_pytorch(a, index_vector):
dot = torch.dot
stop_gradient = tf.stop_gradient # FIXME: How to imlement this in torch?
assign = LibraryHelperFunctions.set_to_value
nan=torch.nan
isnan=torch.isnan
string=lambda x: torch.ByteTensor(bytes(x,'utf8'))