From 20df0c14924e22258fa8a5be6db86c811e544cae Mon Sep 17 00:00:00 2001 From: tobidelbruck Date: Sat, 26 Nov 2022 07:21:10 +0100 Subject: [PATCH 01/19] added logging.info to inform about not compiling --- src/SI_Toolkit/Functions/TF/Compile.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/SI_Toolkit/Functions/TF/Compile.py b/src/SI_Toolkit/Functions/TF/Compile.py index 31997947..76a98e86 100644 --- a/src/SI_Toolkit/Functions/TF/Compile.py +++ b/src/SI_Toolkit/Functions/TF/Compile.py @@ -25,6 +25,7 @@ def identity(func): if GLOBALLY_DISABLE_COMPILATION: + logging.info('TensorFlow compilation is disabled by GLOBALLY_DISABLE_COMPILATION=True') CompileTF = identity else: if platform.machine() == 'arm64' and platform.system() == 'Darwin': # For M1 Apple processor From 9e7d4fa057205278dfd11b9a3bf221a4df91069c Mon Sep 17 00:00:00 2001 From: tobidelbruck Date: Sun, 27 Nov 2022 09:26:16 +0100 Subject: [PATCH 02/19] to fix tensorflow JIT compile case insentivity problem, renamed cart mass M to m_cart and cart pole mass m to m_pole added comments about meaning of "stage" in MPPI renamed TargetPositionGenerator to TargetTrajectoryGenerator, since we target a complete trajectory, not just cart position. renamed my_logger() to get_logger() --- src/SI_Toolkit/Functions/TF/Compile.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/SI_Toolkit/Functions/TF/Compile.py b/src/SI_Toolkit/Functions/TF/Compile.py index 76a98e86..4989f53e 100644 --- a/src/SI_Toolkit/Functions/TF/Compile.py +++ b/src/SI_Toolkit/Functions/TF/Compile.py @@ -6,14 +6,17 @@ from SI_Toolkit.computation_library import ComputationLibrary +from others.globals_and_utils import get_logger + +log=get_logger(__name__) try: from SI_Toolkit_ASF import GLOBALLY_DISABLE_COMPILATION, USE_JIT_COMPILATION except ImportError: - logging.warn("No compilation option set in SI_Toolkit_ASF. Setting GLOBALLY_DISABLE_COMPILATION to True.") + log.warn("No compilation option set in SI_Toolkit_ASF/__init.py__. Setting GLOBALLY_DISABLE_COMPILATION to True.") GLOBALLY_DISABLE_COMPILATION = True def tf_function_jit(func): - return tf.function(func=func, jit_compile=True) + return tf.function(func=func, jit_compile=True,) def tf_function_experimental(func): @@ -25,7 +28,7 @@ def identity(func): if GLOBALLY_DISABLE_COMPILATION: - logging.info('TensorFlow compilation is disabled by GLOBALLY_DISABLE_COMPILATION=True') + log.info('TensorFlow compilation is disabled by GLOBALLY_DISABLE_COMPILATION=True') CompileTF = identity else: if platform.machine() == 'arm64' and platform.system() == 'Darwin': # For M1 Apple processor From 8c60986443f48565032cd3600951fd1f550285c3 Mon Sep 17 00:00:00 2001 From: tobidelbruck Date: Sun, 27 Nov 2022 09:39:04 +0100 Subject: [PATCH 03/19] generate pycharm link in log output. say which compiler we are using --- src/SI_Toolkit/Functions/TF/Compile.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/SI_Toolkit/Functions/TF/Compile.py b/src/SI_Toolkit/Functions/TF/Compile.py index 4989f53e..7a8bb01d 100644 --- a/src/SI_Toolkit/Functions/TF/Compile.py +++ b/src/SI_Toolkit/Functions/TF/Compile.py @@ -37,6 +37,7 @@ def identity(func): CompileTF = tf.function else: CompileTF = tf_function_jit + log.info(f'using {CompileTF} compilation') # CompileTF = tf_function_experimental # Should be same as tf_function_jit, not appropriate for newer version of TF def CompileAdaptive(fun): @@ -50,5 +51,5 @@ def CompileAdaptive(fun): elif lib_name == 'TF': return CompileTF(fun) else: - print('Jit compilation for Pytorch not yet implemented.') + log.warning(f'JIT compilation for {lib_name} not yet implemented.') return identity(fun) From 4c0bd92116ebbe55a11914df631f3549476aa15c Mon Sep 17 00:00:00 2001 From: tobidelbruck Date: Mon, 28 Nov 2022 10:46:00 +0100 Subject: [PATCH 04/19] added nan value to all libs --- src/SI_Toolkit/computation_library.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/SI_Toolkit/computation_library.py b/src/SI_Toolkit/computation_library.py index cd71714e..2391e9a5 100644 --- a/src/SI_Toolkit/computation_library.py +++ b/src/SI_Toolkit/computation_library.py @@ -88,6 +88,7 @@ class ComputationLibrary: dot: Callable[[TensorType, TensorType], TensorType] = None stop_gradient: Callable[[TensorType], TensorType] = None assign: Callable[[Union[TensorType, tf.Variable], TensorType], Union[TensorType, tf.Variable]] = None + nan:TensorType=None class NumpyLibrary(ComputationLibrary): @@ -158,7 +159,7 @@ class NumpyLibrary(ComputationLibrary): dot = np.dot stop_gradient = lambda x: x assign = LibraryHelperFunctions.set_to_value - + nan = np.nan class TensorFlowLibrary(ComputationLibrary): lib = 'TF' @@ -228,6 +229,7 @@ class TensorFlowLibrary(ComputationLibrary): dot = lambda a, b: tf.tensordot(a, b, 1) stop_gradient = tf.stop_gradient assign = LibraryHelperFunctions.set_to_variable + nan=np.nan class PyTorchLibrary(ComputationLibrary): @@ -307,3 +309,4 @@ def gather_last_pytorch(a, index_vector): dot = torch.dot stop_gradient = tf.stop_gradient # FIXME: How to imlement this in torch? assign = LibraryHelperFunctions.set_to_value + nan=torch.nan From 1681fdde4063d53afa4d4de399f719d427b89203 Mon Sep 17 00:00:00 2001 From: tobidelbruck Date: Mon, 28 Nov 2022 11:27:00 +0100 Subject: [PATCH 05/19] rename config_cost_function.yml to config_cost_functions.yml for consistency with other config files. added more docstrings for undocumented methods and constructors. added updated_attributes to barebones controller. improved formatting of logging output added dictdiffer to requirements.txt renamed target_position cost function to cartpole_trajectory_cost.py. replaced some load_config with the load_or_reload_config_if_modified, so that the configs are cached for change checking. added logger to Compile.py --- src/SI_Toolkit/Functions/TF/Compile.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/SI_Toolkit/Functions/TF/Compile.py b/src/SI_Toolkit/Functions/TF/Compile.py index 7a8bb01d..f705277b 100644 --- a/src/SI_Toolkit/Functions/TF/Compile.py +++ b/src/SI_Toolkit/Functions/TF/Compile.py @@ -4,11 +4,13 @@ import tensorflow as tf import torch +from Control_Toolkit.others.globals_and_utils import get_logger +log=get_logger(__name__) + from SI_Toolkit.computation_library import ComputationLibrary -from others.globals_and_utils import get_logger -log=get_logger(__name__) + try: from SI_Toolkit_ASF import GLOBALLY_DISABLE_COMPILATION, USE_JIT_COMPILATION except ImportError: @@ -41,6 +43,8 @@ def identity(func): # CompileTF = tf_function_experimental # Should be same as tf_function_jit, not appropriate for newer version of TF def CompileAdaptive(fun): + """ TODO add docstring to explain what it does and where it is used + """ instance = fun.__self__ assert hasattr(instance, "lib"), "Instance with this method has no computation library defined" computation_library: "type[ComputationLibrary]" = instance.lib From 4776ed422e5f56cd073414d995394b6566405a9e Mon Sep 17 00:00:00 2001 From: tobidelbruck Date: Sun, 11 Dec 2022 15:10:15 +0100 Subject: [PATCH 06/19] finally the dynamically modifiable control cost parameters are working and the cartpole is at least balancing itself and swinging up from down position. Big changes all over! Now anytime any of the config files is modified during runtime, the using class has its tf.variable assigned the new value. To get this work, the variable MUST NOT EXIST before it is set this way the first time. Otherwise the compiler just uses the field at compile time and the changes are never seen inside. The cartpole window now does not reset the initial position and angle sliders on each start. --- src/SI_Toolkit/Functions/TF/Compile.py | 1 + src/SI_Toolkit/Predictors/predictor_wrapper.py | 14 ++++++++++---- src/SI_Toolkit/computation_library.py | 10 +++++++--- 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/src/SI_Toolkit/Functions/TF/Compile.py b/src/SI_Toolkit/Functions/TF/Compile.py index f705277b..0ddc4936 100644 --- a/src/SI_Toolkit/Functions/TF/Compile.py +++ b/src/SI_Toolkit/Functions/TF/Compile.py @@ -53,6 +53,7 @@ def CompileAdaptive(fun): if GLOBALLY_DISABLE_COMPILATION: return identity(fun) elif lib_name == 'TF': + log.info(f'compiling tensorflow {fun}') return CompileTF(fun) else: log.warning(f'JIT compilation for {lib_name} not yet implemented.') diff --git a/src/SI_Toolkit/Predictors/predictor_wrapper.py b/src/SI_Toolkit/Predictors/predictor_wrapper.py index 58656546..35868c5f 100644 --- a/src/SI_Toolkit/Predictors/predictor_wrapper.py +++ b/src/SI_Toolkit/Predictors/predictor_wrapper.py @@ -29,6 +29,7 @@ def __init__(self): def configure(self, batch_size: int, horizon: int, dt: float, computation_library: "Optional[type[ComputationLibrary]]"=None, predictor_specification=None, compile_standalone=False): + self.update_predictor_config_from_specification(predictor_specification) compile_standalone = {'disable_individual_compilation': not compile_standalone} @@ -53,11 +54,16 @@ def configure(self, batch_size: int, horizon: int, dt: float, computation_librar self.predictor = predictor_ODE_tf(horizon=self.horizon, dt=dt, batch_size=self.batch_size, **self.predictor_config, **compile_standalone) else: - raise NotImplementedError('Type of the predictor not recognised.') - + raise NotImplementedError(f'Type of the predictor {self.predictor_type} is not recognised.') + # computation_library defaults to None. In that case, do not check for conformity. - if computation_library is not None and computation_library not in self.predictor.supported_computation_libraries: - raise ValueError(f"Predictor {self.predictor.__class__.__name__} does not support {computation_library.__name__}") + # in other cases, check after we configure it to make sure it supports itself + if not computation_library is None and computation_library not in self.predictor.supported_computation_libraries: + raise ValueError( + f"Predictor {self.predictor.__class__.__name__} does not support {computation_library.__name__}") + + self.predictor.lib=computation_library # set the library type on the predictor object so we can use it to assign attributes later + def configure_with_compilation(self, batch_size, horizon, dt, predictor_specification=None): """ diff --git a/src/SI_Toolkit/computation_library.py b/src/SI_Toolkit/computation_library.py index 2391e9a5..d981078e 100644 --- a/src/SI_Toolkit/computation_library.py +++ b/src/SI_Toolkit/computation_library.py @@ -54,7 +54,7 @@ class ComputationLibrary: gather: Callable[[TensorType, TensorType, int], TensorType] = None gather_last: Callable[[TensorType, TensorType], TensorType] = None arange: Callable[[Optional[NumericType], NumericType, Optional[NumericType]], TensorType] = None - zeros: Callable[["tuple[int]"], TensorType] = None + zeros: Callable[["tuple[int,...]"], TensorType] = None zeros_like: Callable[[TensorType], TensorType] = None ones: Callable[["tuple[int]"], TensorType] = None sign: Callable[[TensorType], TensorType] = None @@ -89,6 +89,7 @@ class ComputationLibrary: stop_gradient: Callable[[TensorType], TensorType] = None assign: Callable[[Union[TensorType, tf.Variable], TensorType], Union[TensorType, tf.Variable]] = None nan:TensorType=None + isnan:Callable[[TensorType],bool]=None class NumpyLibrary(ComputationLibrary): @@ -160,13 +161,14 @@ class NumpyLibrary(ComputationLibrary): stop_gradient = lambda x: x assign = LibraryHelperFunctions.set_to_value nan = np.nan + isnan=np.isnan class TensorFlowLibrary(ComputationLibrary): lib = 'TF' reshape = tf.reshape permute = tf.transpose newaxis = tf.newaxis - shape = lambda x: x.get_shape() # .as_list() + shape = tf.shape # tobi does not understand reason for this previous definition: # lambda x: x.get_shape() # .as_list() to_numpy = lambda x: x.numpy() to_variable = lambda x, dtype: tf.Variable(x, dtype=dtype) to_tensor = lambda x, dtype: tf.convert_to_tensor(x, dtype=dtype) @@ -229,7 +231,8 @@ class TensorFlowLibrary(ComputationLibrary): dot = lambda a, b: tf.tensordot(a, b, 1) stop_gradient = tf.stop_gradient assign = LibraryHelperFunctions.set_to_variable - nan=np.nan + nan=tf.constant(np.nan) + isnan=tf.math.is_nan class PyTorchLibrary(ComputationLibrary): @@ -310,3 +313,4 @@ def gather_last_pytorch(a, index_vector): stop_gradient = tf.stop_gradient # FIXME: How to imlement this in torch? assign = LibraryHelperFunctions.set_to_value nan=torch.nan + isnan=torch.isnan From 9e72b371691e52024d1d5cc8e95bf73c7a1e7052 Mon Sep 17 00:00:00 2001 From: tobidelbruck Date: Mon, 12 Dec 2022 08:52:35 +0100 Subject: [PATCH 07/19] now spin and balance both work! and so does changing the policy and changing the cost weights during runtime! Yay, finally! Trick was to make sure that all values are actually propagated to tf variables, e.g. numpy arrays, ints, string, not just float Changed logging format to start with level for better readability. added string type to computation_library.py --- src/SI_Toolkit/computation_library.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/SI_Toolkit/computation_library.py b/src/SI_Toolkit/computation_library.py index d981078e..8ddcff3a 100644 --- a/src/SI_Toolkit/computation_library.py +++ b/src/SI_Toolkit/computation_library.py @@ -90,6 +90,7 @@ class ComputationLibrary: assign: Callable[[Union[TensorType, tf.Variable], TensorType], Union[TensorType, tf.Variable]] = None nan:TensorType=None isnan:Callable[[TensorType],bool]=None + string = None class NumpyLibrary(ComputationLibrary): @@ -162,6 +163,7 @@ class NumpyLibrary(ComputationLibrary): assign = LibraryHelperFunctions.set_to_value nan = np.nan isnan=np.isnan + string=str class TensorFlowLibrary(ComputationLibrary): lib = 'TF' @@ -233,6 +235,7 @@ class TensorFlowLibrary(ComputationLibrary): assign = LibraryHelperFunctions.set_to_variable nan=tf.constant(np.nan) isnan=tf.math.is_nan + string=tf.string class PyTorchLibrary(ComputationLibrary): @@ -314,3 +317,4 @@ def gather_last_pytorch(a, index_vector): assign = LibraryHelperFunctions.set_to_value nan=torch.nan isnan=torch.isnan + string=lambda x: torch.ByteTensor(bytes(x,'utf8')) From 0c6ea2a207ddebfec4e55307557f050e1bee01da Mon Sep 17 00:00:00 2001 From: heetmeyer Date: Thu, 15 Dec 2022 10:27:18 +0100 Subject: [PATCH 08/19] Rename tensorflow compilation flags --- SI_Toolkit_ASF_Template/__init__.py | 15 +++++++++++++-- src/SI_Toolkit/Functions/TF/Compile.py | 14 +++++++------- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/SI_Toolkit_ASF_Template/__init__.py b/SI_Toolkit_ASF_Template/__init__.py index 51fa92b6..62daba1c 100644 --- a/SI_Toolkit_ASF_Template/__init__.py +++ b/SI_Toolkit_ASF_Template/__init__.py @@ -1,2 +1,13 @@ -GLOBALLY_DISABLE_COMPILATION = False # Set to False to use tf.function -USE_JIT_COMPILATION = True # XLA ignores random seeds. Set to False for reproducibility +### Choose whether to run TensorFlow in eager mode (slow, interpreted) or graph mode (fast, compiled) +# Set `USE_TENSORFLOW_EAGER_MODE=False` to... +# - decorate functions in optimizers and predictors with `@tf.function`. +# - and thereby enable TensorFlow graph mode. This is much faster than the standard eager mode. +USE_TENSORFLOW_EAGER_MODE = False + + +### Choose whether to use TensorFlow Accelerated Linear Algebra (XLA). +# XLA uses machine-specific conversions to speed up the compiled TensorFlow graph. +# Set USE_TENSORFLOW_XLA to True to accelerate the execution (for real-time). +# If `USE_TENSORFLOW_XLA=True`, this adds `jit_compile=True` to the `tf.function` decorator. +# However, XLA ignores random seeds. Set to False for guaranteed reproducibility, such as for simulations. +USE_TENSORFLOW_XLA = True diff --git a/src/SI_Toolkit/Functions/TF/Compile.py b/src/SI_Toolkit/Functions/TF/Compile.py index 0ddc4936..60d0e266 100644 --- a/src/SI_Toolkit/Functions/TF/Compile.py +++ b/src/SI_Toolkit/Functions/TF/Compile.py @@ -12,10 +12,10 @@ try: - from SI_Toolkit_ASF import GLOBALLY_DISABLE_COMPILATION, USE_JIT_COMPILATION + from SI_Toolkit_ASF import USE_TENSORFLOW_EAGER_MODE, USE_TENSORFLOW_XLA except ImportError: - log.warn("No compilation option set in SI_Toolkit_ASF/__init.py__. Setting GLOBALLY_DISABLE_COMPILATION to True.") - GLOBALLY_DISABLE_COMPILATION = True + log.warn("No compilation option set in SI_Toolkit_ASF/__init.py__. Setting USE_TENSORFLOW_EAGER_MODE to True.") + USE_TENSORFLOW_EAGER_MODE = True def tf_function_jit(func): return tf.function(func=func, jit_compile=True,) @@ -29,13 +29,13 @@ def identity(func): return func -if GLOBALLY_DISABLE_COMPILATION: - log.info('TensorFlow compilation is disabled by GLOBALLY_DISABLE_COMPILATION=True') +if USE_TENSORFLOW_EAGER_MODE: + log.info('TensorFlow compilation is disabled by USE_TENSORFLOW_EAGER_MODE=True') CompileTF = identity else: if platform.machine() == 'arm64' and platform.system() == 'Darwin': # For M1 Apple processor CompileTF = tf.function - elif not USE_JIT_COMPILATION: + elif not USE_TENSORFLOW_XLA: CompileTF = tf.function else: CompileTF = tf_function_jit @@ -50,7 +50,7 @@ def CompileAdaptive(fun): computation_library: "type[ComputationLibrary]" = instance.lib lib_name = computation_library.lib - if GLOBALLY_DISABLE_COMPILATION: + if USE_TENSORFLOW_EAGER_MODE: return identity(fun) elif lib_name == 'TF': log.info(f'compiling tensorflow {fun}') From 8d5eddd5d98c0e1c8c87771e991f780b04b14a20 Mon Sep 17 00:00:00 2001 From: tobidelbruck Date: Sun, 18 Dec 2022 11:39:41 +0100 Subject: [PATCH 09/19] add equal and pow methods --- src/SI_Toolkit/computation_library.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/SI_Toolkit/computation_library.py b/src/SI_Toolkit/computation_library.py index 8ddcff3a..3e363881 100644 --- a/src/SI_Toolkit/computation_library.py +++ b/src/SI_Toolkit/computation_library.py @@ -91,6 +91,8 @@ class ComputationLibrary: nan:TensorType=None isnan:Callable[[TensorType],bool]=None string = None + equal= lambda x,y: x==y + pow=lambda x,p: x**p class NumpyLibrary(ComputationLibrary): @@ -164,6 +166,9 @@ class NumpyLibrary(ComputationLibrary): nan = np.nan isnan=np.isnan string=str + equal= lambda x,y: x==y + cond= lambda cond, t, f: t if cond else f + pow=lambda x,p: np.power(x,p) class TensorFlowLibrary(ComputationLibrary): lib = 'TF' @@ -236,7 +241,9 @@ class TensorFlowLibrary(ComputationLibrary): nan=tf.constant(np.nan) isnan=tf.math.is_nan string=tf.string - + equal= lambda x,y: tf.math.equal(x,y) + cond= lambda cond, t, f: tf.cond(cond,t,f) + pow=lambda x,p: tf.pow(x,p) class PyTorchLibrary(ComputationLibrary): @@ -318,3 +325,5 @@ def gather_last_pytorch(a, index_vector): nan=torch.nan isnan=torch.isnan string=lambda x: torch.ByteTensor(bytes(x,'utf8')) + equal=lambda x,y: torch.equal(x,y) + pow=lambda x,p: torch.pow(x,p) From d343f31a48f26c3a785cb9cff36a0e7ce5cef250 Mon Sep 17 00:00:00 2001 From: tobidelbruck Date: Sat, 24 Dec 2022 07:42:54 -0800 Subject: [PATCH 10/19] renamed s to state for clariy in many of the classes. added prefs to cartpole gui for initial position and angle increased num_rollouts to 1000, decreased horizon to 25 time steps, changed cost function for spin to pure cart angle speed. Now spin, balance, and shimmy all work quite well. moved cartpole_trajectory_generator back out outside compiled TF code for ease of development and debugging, it is cheap so this is OK. added more logging of what gets compiled. added time to arguments for the base controller so that contained methods can access current time. added ruamel.yaml to requirements (this is the yaml parser that can handle scientific notation numbers) --- src/SI_Toolkit/Functions/TF/Compile.py | 10 +++++++++- src/SI_Toolkit/Predictors/__init__.py | 4 +++- src/SI_Toolkit/Predictors/predictor_ODE_tf.py | 4 ++-- src/SI_Toolkit/Predictors/predictor_wrapper.py | 4 ++-- src/SI_Toolkit/computation_library.py | 4 ++++ 5 files changed, 20 insertions(+), 6 deletions(-) diff --git a/src/SI_Toolkit/Functions/TF/Compile.py b/src/SI_Toolkit/Functions/TF/Compile.py index 60d0e266..0e58ba45 100644 --- a/src/SI_Toolkit/Functions/TF/Compile.py +++ b/src/SI_Toolkit/Functions/TF/Compile.py @@ -18,6 +18,7 @@ USE_TENSORFLOW_EAGER_MODE = True def tf_function_jit(func): + log.info(f'compiling tf.function from {func}') return tf.function(func=func, jit_compile=True,) @@ -34,16 +35,23 @@ def identity(func): CompileTF = identity else: if platform.machine() == 'arm64' and platform.system() == 'Darwin': # For M1 Apple processor + log.info('TensorFlow compilation (but not JIT) is enabled by tf.function by USE_TENSORFLOW_EAGER_MODE=False and USE_TENSORFLOW_XLA = False') CompileTF = tf.function elif not USE_TENSORFLOW_XLA: + log.info('TensorFlow compilation (but not JIT) is enabled by tf.function by USE_TENSORFLOW_EAGER_MODE=False and USE_TENSORFLOW_XLA = False') CompileTF = tf.function else: + log.info('TensorFlow compilation and JIT are both enabled by tf.function_jit by USE_TENSORFLOW_EAGER_MODE=False and USE_TENSORFLOW_XLA = True') CompileTF = tf_function_jit log.info(f'using {CompileTF} compilation') # CompileTF = tf_function_experimental # Should be same as tf_function_jit, not appropriate for newer version of TF def CompileAdaptive(fun): - """ TODO add docstring to explain what it does and where it is used + """ + Compiles the function using options for TensorFlow and XLA JIT, according to global flags USE_TENSORFLOW_EAGER_MODE. + + See SI_Toolkit_ASF\__init__.py + """ instance = fun.__self__ assert hasattr(instance, "lib"), "Instance with this method has no computation library defined" diff --git a/src/SI_Toolkit/Predictors/__init__.py b/src/SI_Toolkit/Predictors/__init__.py index af7ecb9a..794fbe20 100644 --- a/src/SI_Toolkit/Predictors/__init__.py +++ b/src/SI_Toolkit/Predictors/__init__.py @@ -16,13 +16,15 @@ def __init__(self, horizon: float, batch_size: int) -> None: self.predictor_external_input_features = CONTROL_INPUTS self.predictor_output_features = STATE_VARIABLES - def predict_tf(self, s: tf.Tensor, Q: tf.Tensor): + def predict_tf(self, s: tf.Tensor, Q: tf.Tensor, time:float=None): """Predict the whole MPC horizon using tensorflow :param s: Initial state [batch_size x state_dim] :type s: tf.Tensor :param Q: Control inputs [batch_size x horizon_length x control_dim] :type Q: tf.Tensor + :param time: time in seconds + :type time: float """ raise NotImplementedError() diff --git a/src/SI_Toolkit/Predictors/predictor_ODE_tf.py b/src/SI_Toolkit/Predictors/predictor_ODE_tf.py index 69375ba8..0532b662 100644 --- a/src/SI_Toolkit/Predictors/predictor_ODE_tf.py +++ b/src/SI_Toolkit/Predictors/predictor_ODE_tf.py @@ -51,7 +51,7 @@ def __init__(self, horizon: int, dt: float, intermediate_steps=10, disable_indiv self.predict_tf = CompileTF(self._predict_tf) - def predict(self, initial_state, Q): + def predict(self, initial_state, Q, time:float=None): initial_state, Q = convert_to_tensors(initial_state, Q) initial_state, Q = check_dimensions(initial_state, Q) @@ -63,7 +63,7 @@ def predict(self, initial_state, Q): return output.numpy() - def _predict_tf(self, initial_state, Q, params=None): + def _predict_tf(self, initial_state, Q, params=None, time:float=None): self.output = tf.TensorArray(tf.float32, size=self.horizon + 1, dynamic_size=False) self.output = self.output.write(0, initial_state) diff --git a/src/SI_Toolkit/Predictors/predictor_wrapper.py b/src/SI_Toolkit/Predictors/predictor_wrapper.py index 6923bebc..b1e73c17 100644 --- a/src/SI_Toolkit/Predictors/predictor_wrapper.py +++ b/src/SI_Toolkit/Predictors/predictor_wrapper.py @@ -139,8 +139,8 @@ def update_predictor_config_from_specification(self, predictor_specification: st def predict(self, s, Q): return self.predictor.predict(s, Q) - def predict_tf(self, s, Q): # TODO: This function should disappear: predict() should manage the right library - return self.predictor.predict_tf(s, Q) + def predict_tf(self, state, Q, time=None): # TODO: This function should disappear: predict() should manage the right library + return self.predictor.predict_tf(state, Q, time=time) def update(self, Q0, s): if self.predictor_type == 'neural': diff --git a/src/SI_Toolkit/computation_library.py b/src/SI_Toolkit/computation_library.py index 6de13f0b..938542b3 100644 --- a/src/SI_Toolkit/computation_library.py +++ b/src/SI_Toolkit/computation_library.py @@ -97,6 +97,7 @@ class ComputationLibrary: logical_and: Callable[[TensorType, TensorType], TensorType] = None logical_or: Callable[[TensorType, TensorType], TensorType] = None dtype=lambda x: x.dtype + fill = None class NumpyLibrary(ComputationLibrary): @@ -176,6 +177,7 @@ class NumpyLibrary(ComputationLibrary): equal= lambda x,y: x==y cond= lambda cond, t, f: t if cond else f pow=lambda x,p: np.power(x,p) + fill = lambda x,y: x.np.fill(y) class TensorFlowLibrary(ComputationLibrary): lib = 'TF' @@ -254,6 +256,7 @@ class TensorFlowLibrary(ComputationLibrary): equal= lambda x,y: tf.math.equal(x,y) cond= lambda cond, t, f: tf.cond(cond,t,f) pow=lambda x,p: tf.pow(x,p) + fill = lambda dims,value: tf.fill(dims,value) class PyTorchLibrary(ComputationLibrary): @@ -340,3 +343,4 @@ def gather_last_pytorch(a, index_vector): logical_or = torch.logical_or equal=lambda x,y: torch.equal(x,y) pow=lambda x,p: torch.pow(x,p) + fill = lambda x,y: x.torch.Tensor.fill(x,y) From 91a611edee726f43edb7921ddadffeffa5937e3a Mon Sep 17 00:00:00 2001 From: Tobi Delbruck Date: Tue, 31 Jan 2023 17:15:50 +0100 Subject: [PATCH 11/19] Merge remote-tracking branch 'origin/main' into Tobi_Dance # Conflicts: # Control_Toolkit_ASF_Template/Cost_Functions/EnvironmentName/cost_function_barebone.py # Control_Toolkit_ASF_Template/config_optimizers.yml # Controllers/__init__.py # Controllers/controller_neural_imitator_tf.py # Cost_Functions/__init__.py # Cost_Functions/cost_function_wrapper.py # Optimizers/optimizer_mppi.py # Optimizers/optimizer_rpgd_me_tf.py # Optimizers/optimizer_rpgd_ml_tf.py # Optimizers/optimizer_rpgd_particle_tf.py # Optimizers/optimizer_rpgd_tf.py # requirements.txt --- src/SI_Toolkit/Functions/General/Dataset.py | 2 +- src/SI_Toolkit/Functions/Pytorch/Training.py | 2 +- src/SI_Toolkit/GP/DataSelector.py | 4 ++-- src/SI_Toolkit/GP/Train_GPR.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/SI_Toolkit/Functions/General/Dataset.py b/src/SI_Toolkit/Functions/General/Dataset.py index 853016fa..c53e5709 100644 --- a/src/SI_Toolkit/Functions/General/Dataset.py +++ b/src/SI_Toolkit/Functions/General/Dataset.py @@ -180,7 +180,7 @@ def get_batch(self, idx_batch): def reset_batch_size(self, batch_size=None): if batch_size is None: - self.batch_size = self.args.batch_size + self.batch_size = self.args.num_rollouts else: self.batch_size = batch_size diff --git a/src/SI_Toolkit/Functions/Pytorch/Training.py b/src/SI_Toolkit/Functions/Pytorch/Training.py index 34438d39..bcd255a0 100644 --- a/src/SI_Toolkit/Functions/Pytorch/Training.py +++ b/src/SI_Toolkit/Functions/Pytorch/Training.py @@ -35,7 +35,7 @@ def train_network_core(net, net_info, training_dfs_norm, validation_dfs_norm, te del training_dfs_norm, validation_dfs_norm, test_dfs_norm # Create PyTorch dataloaders for train and dev set - training_generator = data.DataLoader(dataset=training_dataset, batch_size=a.batch_size, shuffle=True) + training_generator = data.DataLoader(dataset=training_dataset, batch_size=a.num_rollouts, shuffle=True) validation_generator = data.DataLoader(dataset=validation_dataset, batch_size=512, shuffle=False) print('') diff --git a/src/SI_Toolkit/GP/DataSelector.py b/src/SI_Toolkit/GP/DataSelector.py index f5194c1a..708998eb 100644 --- a/src/SI_Toolkit/GP/DataSelector.py +++ b/src/SI_Toolkit/GP/DataSelector.py @@ -138,8 +138,8 @@ def return_dataset_for_training(self, raw=False ): - if batch_size is None and self.args.batch_size is not None: - batch_size = self.args.batch_size + if batch_size is None and self.args.num_rollouts is not None: + batch_size = self.args.num_rollouts if inputs is None and self.args.inputs is not None: inputs = self.args.inputs diff --git a/src/SI_Toolkit/GP/Train_GPR.py b/src/SI_Toolkit/GP/Train_GPR.py index e0b5e0bc..b642946e 100644 --- a/src/SI_Toolkit/GP/Train_GPR.py +++ b/src/SI_Toolkit/GP/Train_GPR.py @@ -26,7 +26,7 @@ a.wash_out_len = 0 a.post_wash_out_len = 1 outputs = a.outputs -batch_size = a.batch_size +batch_size = a.num_rollouts number_of_inducing_points = 10 From 7f7659dcf59d252ce74bc6aaba2ac7a72e583d02 Mon Sep 17 00:00:00 2001 From: Tobi Delbruck Date: Mon, 6 Feb 2023 16:19:19 +0100 Subject: [PATCH 12/19] moved get_logger to own file in SI_Toolkit Started merging from remote/origin/Tobi_Dance --- src/SI_Toolkit/Functions/TF/Compile.py | 2 +- src/get_logger.py | 50 ++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) create mode 100644 src/get_logger.py diff --git a/src/SI_Toolkit/Functions/TF/Compile.py b/src/SI_Toolkit/Functions/TF/Compile.py index 0e58ba45..dba2bd93 100644 --- a/src/SI_Toolkit/Functions/TF/Compile.py +++ b/src/SI_Toolkit/Functions/TF/Compile.py @@ -4,7 +4,7 @@ import tensorflow as tf import torch -from Control_Toolkit.others.globals_and_utils import get_logger +from get_logger import get_logger log=get_logger(__name__) from SI_Toolkit.computation_library import ComputationLibrary diff --git a/src/get_logger.py b/src/get_logger.py new file mode 100644 index 00000000..ce468fe5 --- /dev/null +++ b/src/get_logger.py @@ -0,0 +1,50 @@ +import logging +# general logger for all control/si_toolkit users. Produces nice output format with live hyperlinks for pycharm users +# to use it, just call log=get_logger(__name__) at the top of your python file + +LOGGING_LEVEL = logging.DEBUG # usually INFO is good +class CustomFormatter(logging.Formatter): + """Logging Formatter to add colors and count warning / errors""" + # see https://stackoverflow.com/questions/384076/how-can-i-color-python-logging-output/7995762#7995762 + + grey = "\x1b[38;21m" + yellow = "\x1b[33;21m" + cyan = "\x1b[1;36m" # dark green + green = "\x1b[31;21m" # dark green + red = "\x1b[31;21m" + bold_red = "\x1b[31;1m" + light_blue = "\x1b[1;36m" + blue = "\x1b[1;34m" + reset = "\x1b[0m" + # File "{file}", line {max(line, 1)}'.replace("\\", "/") + format = '[%(levelname)s]: %(asctime)s - %(name)s - %(message)s (File "%(pathname)s", line %(lineno)d, in %(funcName)s)' + + FORMATS = { + logging.DEBUG: light_blue + format + reset, + logging.INFO: cyan + format + reset, + logging.WARNING: red + format + reset, + logging.ERROR: bold_red + format + reset, + logging.CRITICAL: bold_red + format + reset + } + + def format(self, record): + log_fmt = self.FORMATS.get(record.levelno) + formatter = logging.Formatter(log_fmt) + return formatter.format(record).replace("\\", "/") #replace \ with / for pycharm links + + +def get_logger(name): + """ Use get_logger to define a logger with useful color output and info and warning turned on according to the global LOGGING_LEVEL. + + :param name: the name of this logger. Use __name__ to give it the name of the module that instantiates it. + + :returns: the logger. + """ + # logging.basicConfig(stream=sys.stdout, level=logging.INFO) + logger = logging.getLogger(name) + logger.setLevel(LOGGING_LEVEL) + # create console handler + ch = logging.StreamHandler() + ch.setFormatter(CustomFormatter()) + logger.addHandler(ch) + return logger \ No newline at end of file From 4d34dde093d45c9331ad08993272d3dfd2d4d5dd Mon Sep 17 00:00:00 2001 From: Tobi Delbruck Date: Tue, 7 Feb 2023 08:51:42 +0100 Subject: [PATCH 13/19] update path to config_cost_functions.yml --- src/SI_Toolkit/Predictors/predictor_wrapper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/SI_Toolkit/Predictors/predictor_wrapper.py b/src/SI_Toolkit/Predictors/predictor_wrapper.py index 9ccb842a..1338ccf6 100644 --- a/src/SI_Toolkit/Predictors/predictor_wrapper.py +++ b/src/SI_Toolkit/Predictors/predictor_wrapper.py @@ -33,7 +33,7 @@ def __init__(self): self.predictor_type: str = self.predictor_config['predictor_type'] self.model_name: str = self.predictor_config['model_name'] - def configure(self, batch_size: int, horizon: int, dt: float, computation_library: "Optional[type[ComputationLibrary]]"=None, predictor_specification=None, compile_standalone=False, mode=None): + def configure(self, batch_size: int, horizon: int, dt: float, computation_library: Optional[ComputationLibrary]=None, predictor_specification=None, compile_standalone=False, mode=None): """Assign optimization-specific parameters to finalize instance creation. From f13330c54beee42d084bd558ecb52689c4020d71 Mon Sep 17 00:00:00 2001 From: Tobi Delbruck Date: Wed, 8 Feb 2023 12:40:58 +0100 Subject: [PATCH 14/19] move get_logger.py to Control_Toolkit so that it can be used by physical-cartpole repo. Fix some cartpole parameter import. Fix rename of config_cost_functions.yml. Add timer.py and yes_or_no.py utility classes. --- src/SI_Toolkit/Functions/TF/Compile.py | 4 +-- src/get_logger.py | 50 -------------------------- 2 files changed, 1 insertion(+), 53 deletions(-) delete mode 100644 src/get_logger.py diff --git a/src/SI_Toolkit/Functions/TF/Compile.py b/src/SI_Toolkit/Functions/TF/Compile.py index dba2bd93..0a17a5f3 100644 --- a/src/SI_Toolkit/Functions/TF/Compile.py +++ b/src/SI_Toolkit/Functions/TF/Compile.py @@ -1,10 +1,8 @@ -import logging import platform import tensorflow as tf -import torch -from get_logger import get_logger +from Control_Toolkit.others.get_logger import get_logger log=get_logger(__name__) from SI_Toolkit.computation_library import ComputationLibrary diff --git a/src/get_logger.py b/src/get_logger.py deleted file mode 100644 index ce468fe5..00000000 --- a/src/get_logger.py +++ /dev/null @@ -1,50 +0,0 @@ -import logging -# general logger for all control/si_toolkit users. Produces nice output format with live hyperlinks for pycharm users -# to use it, just call log=get_logger(__name__) at the top of your python file - -LOGGING_LEVEL = logging.DEBUG # usually INFO is good -class CustomFormatter(logging.Formatter): - """Logging Formatter to add colors and count warning / errors""" - # see https://stackoverflow.com/questions/384076/how-can-i-color-python-logging-output/7995762#7995762 - - grey = "\x1b[38;21m" - yellow = "\x1b[33;21m" - cyan = "\x1b[1;36m" # dark green - green = "\x1b[31;21m" # dark green - red = "\x1b[31;21m" - bold_red = "\x1b[31;1m" - light_blue = "\x1b[1;36m" - blue = "\x1b[1;34m" - reset = "\x1b[0m" - # File "{file}", line {max(line, 1)}'.replace("\\", "/") - format = '[%(levelname)s]: %(asctime)s - %(name)s - %(message)s (File "%(pathname)s", line %(lineno)d, in %(funcName)s)' - - FORMATS = { - logging.DEBUG: light_blue + format + reset, - logging.INFO: cyan + format + reset, - logging.WARNING: red + format + reset, - logging.ERROR: bold_red + format + reset, - logging.CRITICAL: bold_red + format + reset - } - - def format(self, record): - log_fmt = self.FORMATS.get(record.levelno) - formatter = logging.Formatter(log_fmt) - return formatter.format(record).replace("\\", "/") #replace \ with / for pycharm links - - -def get_logger(name): - """ Use get_logger to define a logger with useful color output and info and warning turned on according to the global LOGGING_LEVEL. - - :param name: the name of this logger. Use __name__ to give it the name of the module that instantiates it. - - :returns: the logger. - """ - # logging.basicConfig(stream=sys.stdout, level=logging.INFO) - logger = logging.getLogger(name) - logger.setLevel(LOGGING_LEVEL) - # create console handler - ch = logging.StreamHandler() - ch.setFormatter(CustomFormatter()) - logger.addHandler(ch) - return logger \ No newline at end of file From 5155fe93b3b4ae66e80037f29a97661954d2bdcc Mon Sep 17 00:00:00 2001 From: Tobi Delbruck Date: Fri, 10 Feb 2023 06:31:59 +0100 Subject: [PATCH 15/19] cartpole_dancer.py starts to work. Music starts and stops, some steps seem to work. mppi parameters copied from development. cost weights adjusted. controller update interval inccreased to 25ms from 20ms to match simulator and actual achieved rate with dancer mppi. mppi rollouts reduced to 700 to speed up control. PhysicalCartPoleDriver.py has class self variable for reference from other classes to the instance. Compile.py raises exception when either option is undefined. --- src/SI_Toolkit/Functions/TF/Compile.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/SI_Toolkit/Functions/TF/Compile.py b/src/SI_Toolkit/Functions/TF/Compile.py index 0a17a5f3..956f2005 100644 --- a/src/SI_Toolkit/Functions/TF/Compile.py +++ b/src/SI_Toolkit/Functions/TF/Compile.py @@ -12,8 +12,7 @@ try: from SI_Toolkit_ASF import USE_TENSORFLOW_EAGER_MODE, USE_TENSORFLOW_XLA except ImportError: - log.warn("No compilation option set in SI_Toolkit_ASF/__init.py__. Setting USE_TENSORFLOW_EAGER_MODE to True.") - USE_TENSORFLOW_EAGER_MODE = True + raise Exception("Either/both of compilation options USE_TENSORFLOW_EAGER_MODE, USE_TENSORFLOW_XLA are missing in SI_Toolkit_ASF/__init.py__.") def tf_function_jit(func): log.info(f'compiling tf.function from {func}') From f63ab96a8917c0e4cc824d3121aeb90b853c47cf Mon Sep 17 00:00:00 2001 From: Tobi Delbruck Date: Mon, 13 Feb 2023 08:14:16 +0100 Subject: [PATCH 16/19] added primitive ability to record the predictor_ODE_tf.py predictions for next timestep and recording this prediction along with the measurement in the PhysicalCartPoleDriver.py CSV output file. In this commit, control is enabled by default because the pycharm debugger hangs on interactive terminal keyboard intput, and the motor output is disabled for remote debugging. --- src/SI_Toolkit/Predictors/predictor_ODE_tf.py | 21 ++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/src/SI_Toolkit/Predictors/predictor_ODE_tf.py b/src/SI_Toolkit/Predictors/predictor_ODE_tf.py index 0532b662..a8195a93 100644 --- a/src/SI_Toolkit/Predictors/predictor_ODE_tf.py +++ b/src/SI_Toolkit/Predictors/predictor_ODE_tf.py @@ -51,26 +51,37 @@ def __init__(self, horizon: int, dt: float, intermediate_steps=10, disable_indiv self.predict_tf = CompileTF(self._predict_tf) - def predict(self, initial_state, Q, time:float=None): + def predict(self, initial_state, Q, time:float=None, horizon:int=None): initial_state, Q = convert_to_tensors(initial_state, Q) initial_state, Q = check_dimensions(initial_state, Q) self.batch_size = tf.shape(Q)[0] self.initial_state = initial_state - output = self.predict_tf(self.initial_state, Q) + output = self.predict_tf(self.initial_state, Q, params=None, horizon=horizon) return output.numpy() - def _predict_tf(self, initial_state, Q, params=None, time:float=None): + def _predict_tf(self, initial_state, Q, params=None, time:float=None, horizon:int=None): + """ Predict the states over horizon next timesteps. + Q must be a 3-dimensional vector [num_rollouts, horizon, Q] where Q is the vector of control inputs - self.output = tf.TensorArray(tf.float32, size=self.horizon + 1, dynamic_size=False) + :param initial_state: the state now + :param Q: the control over horizon next steps + :param params: optional parameters + :param time: the current time in seconds + :param horizon: optional horizon, if None then use self.horizon + + :returns: the predicted states including as first component of horizon dimension the initial state, [num_rollouts, horizon+1, states] + """ + horizon=self.horizon if horizon is None else horizon + self.output = tf.TensorArray(tf.float32, size=horizon + 1, dynamic_size=False) self.output = self.output.write(0, initial_state) next_state = initial_state - for k in tf.range(self.horizon): + for k in tf.range(horizon): next_state = self.next_step_predictor.step(next_state, Q[:, k, :], params) self.output = self.output.write(k + 1, next_state) From ab87fec8b1e4013c4b9fb528f2275a63d67410bb Mon Sep 17 00:00:00 2001 From: Tobi Delbruck Date: Thu, 16 Feb 2023 14:43:47 +0100 Subject: [PATCH 17/19] added 'cartwheel' step to cartpole_trajectory_generator.py. commented out some noisy log.debug. made horizon same as physical cartpole --- src/SI_Toolkit/Functions/TF/Compile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/SI_Toolkit/Functions/TF/Compile.py b/src/SI_Toolkit/Functions/TF/Compile.py index 956f2005..3121a790 100644 --- a/src/SI_Toolkit/Functions/TF/Compile.py +++ b/src/SI_Toolkit/Functions/TF/Compile.py @@ -15,7 +15,7 @@ raise Exception("Either/both of compilation options USE_TENSORFLOW_EAGER_MODE, USE_TENSORFLOW_XLA are missing in SI_Toolkit_ASF/__init.py__.") def tf_function_jit(func): - log.info(f'compiling tf.function from {func}') + # log.debug(f'compiling tf.function from {func}') return tf.function(func=func, jit_compile=True,) From 3dd5102b6296a3bf7f2f27a826f10f847212457d Mon Sep 17 00:00:00 2001 From: tobidelbruck Date: Sun, 19 Feb 2023 20:50:18 +0100 Subject: [PATCH 18/19] fixed some logic and reduced some loggers to debug level updated satisfaction CSV to use markers from audition and added more cartsheels --- src/SI_Toolkit/Functions/TF/Compile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/SI_Toolkit/Functions/TF/Compile.py b/src/SI_Toolkit/Functions/TF/Compile.py index 3121a790..dc89226e 100644 --- a/src/SI_Toolkit/Functions/TF/Compile.py +++ b/src/SI_Toolkit/Functions/TF/Compile.py @@ -58,7 +58,7 @@ def CompileAdaptive(fun): if USE_TENSORFLOW_EAGER_MODE: return identity(fun) elif lib_name == 'TF': - log.info(f'compiling tensorflow {fun}') + log.debug(f'compiling tensorflow {fun}') return CompileTF(fun) else: log.warning(f'JIT compilation for {lib_name} not yet implemented.') From 89f4ead943a975fb5ce549b0462a234308febde7 Mon Sep 17 00:00:00 2001 From: Tobi Delbruck Date: Tue, 28 Feb 2023 11:42:07 +0100 Subject: [PATCH 19/19] major changes to cartpole_dancer_cost and cartpole_trajectory_generator for compatibilty with RPGD gradient computation. update_attributes in globals_and_utils.py supports assignment of tf.Variable and now raises exception if we try to .assign an immutable Tensor. Balance step now uses int +1 or -1 for desired pole up or down, not 'up' or 'down'. computation_library.py now casts variale to numpy() from tensorflow only if it is a Tensor type. --- src/SI_Toolkit/Functions/TF/Compile.py | 2 +- src/SI_Toolkit/computation_library.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/SI_Toolkit/Functions/TF/Compile.py b/src/SI_Toolkit/Functions/TF/Compile.py index dc89226e..b3189ad2 100644 --- a/src/SI_Toolkit/Functions/TF/Compile.py +++ b/src/SI_Toolkit/Functions/TF/Compile.py @@ -28,7 +28,7 @@ def identity(func): if USE_TENSORFLOW_EAGER_MODE: - log.info('TensorFlow compilation is disabled by USE_TENSORFLOW_EAGER_MODE=True') + log.warning('TensorFlow compilation is disabled by USE_TENSORFLOW_EAGER_MODE=True and execution will be extremely slow') CompileTF = identity else: if platform.machine() == 'arm64' and platform.system() == 'Darwin': # For M1 Apple processor diff --git a/src/SI_Toolkit/computation_library.py b/src/SI_Toolkit/computation_library.py index 3bb90692..ca5a971b 100644 --- a/src/SI_Toolkit/computation_library.py +++ b/src/SI_Toolkit/computation_library.py @@ -193,7 +193,7 @@ class TensorFlowLibrary(ComputationLibrary): permute = tf.transpose newaxis = tf.newaxis shape = tf.shape # tobi does not understand reason for this previous definition: # lambda x: x.get_shape() # .as_list() - to_numpy = lambda x: x.numpy() + to_numpy = lambda x: x.numpy() if isinstance(x,(tf.Tensor, tf.Variable)) else x to_variable = lambda x, dtype: tf.Variable(x, dtype=dtype) to_tensor = lambda x, dtype: tf.convert_to_tensor(x, dtype=dtype) constant = lambda x, t: tf.constant(x, dtype=t)