From c9e3d6d8c1af77bcc19b4a93ccd6e88567225c8b Mon Sep 17 00:00:00 2001 From: AndresG Date: Fri, 4 Oct 2024 13:23:27 +0200 Subject: [PATCH 1/2] Added example to explore an initial population of individuals (#579) * implementation of population exploration example script mostly done * updated example readme and requirements, addressed codetools errors * updated requirement files and reformatted necessary files * fixed an error caused by the framework trying to render the scene after the renderer was manually closed * corrected some incorrect measures for the v2 body * addressed code review feedback --------- Co-authored-by: Andres Co-authored-by: Ting-Chia Chiang --- codetools/mypy/mypy.ini | 3 + examples/3_experiment_foundations/README.md | 2 + .../4g_explore_initial_population/README.md | 9 + .../4g_explore_initial_population/config.py | 7 + .../evaluator.py | 74 +++++++ .../4g_explore_initial_population/genotype.py | 48 +++++ .../individual.py | 13 ++ .../4g_explore_initial_population/main.py | 194 ++++++++++++++++++ .../requirements.txt | 1 + .../4_example_experiment_setups/README.md | 1 + .../modular_robot/body/v2/_active_hinge_v2.py | 12 +- .../modular_robot/body/v2/_brick_v2.py | 5 +- .../brain/cpg/_cpg_network_structure.py | 5 +- project.yml | 1 + requirements_dev.txt | 1 + requirements_editable.txt | 2 + .../_control_interface_impl.py | 2 + .../mujoco_simulator/_simulate_scene.py | 6 +- .../viewers/_custom_mujoco_viewer.py | 5 +- .../cppnwin/_random_multineat_genotype.py | 2 +- .../cppnwin/modular_robot/v2/_body_develop.py | 60 ++++-- .../modular_robot/v2/_body_genotype_v2.py | 5 +- 22 files changed, 427 insertions(+), 31 deletions(-) create mode 100644 examples/4_example_experiment_setups/4g_explore_initial_population/README.md create mode 100644 examples/4_example_experiment_setups/4g_explore_initial_population/config.py create mode 100644 examples/4_example_experiment_setups/4g_explore_initial_population/evaluator.py create mode 100644 examples/4_example_experiment_setups/4g_explore_initial_population/genotype.py create mode 100644 examples/4_example_experiment_setups/4g_explore_initial_population/individual.py create mode 100644 examples/4_example_experiment_setups/4g_explore_initial_population/main.py create mode 100644 examples/4_example_experiment_setups/4g_explore_initial_population/requirements.txt diff --git a/codetools/mypy/mypy.ini b/codetools/mypy/mypy.ini index 4094d5608..053c18e55 100644 --- a/codetools/mypy/mypy.ini +++ b/codetools/mypy/mypy.ini @@ -45,6 +45,9 @@ ignore_missing_imports = True [mypy-scipy.*] ignore_missing_imports = True +[mypy-sklearn.*] +ignore_missing_imports = True + [mypy-cairo.*] ignore_missing_imports = True diff --git a/examples/3_experiment_foundations/README.md b/examples/3_experiment_foundations/README.md index 1770c7328..a7df2ae37 100644 --- a/examples/3_experiment_foundations/README.md +++ b/examples/3_experiment_foundations/README.md @@ -6,3 +6,5 @@ Here you will learn about a few things that are important in almost all experime - In `3b_evaluate_single_robot` we will see how you can evaluate robots in ane experiment. - Since evaluating one robot is not very interesting in `3c_evaluate_multiple_isolated_robots` you will see how this evaluation can be done for a population of robots. - Alternatively if you want multiple robots interacting with each other look into example `3d_evaluate_multiple_interacting_robots`. + + diff --git a/examples/4_example_experiment_setups/4g_explore_initial_population/README.md b/examples/4_example_experiment_setups/4g_explore_initial_population/README.md new file mode 100644 index 000000000..ba8d55cd6 --- /dev/null +++ b/examples/4_example_experiment_setups/4g_explore_initial_population/README.md @@ -0,0 +1,9 @@ +In this example you will learn how to create N random robots, starting from their genotypes and the process of mapping them into +phenotypes and finally robots in order to asses the "quality" of your initial population and individual morphological traits. + +You learn: +- How to generate N random genotypes (where N is set in config.py). +- How to develop the genotypes into phenotypes to form robots. +- How to visualize such robots to asses their morphologies +- How to use the "MorphologicalMeasures" class to calculate different morpholoical traits of individuals. +- How to use such measures to compute population metrics, such as diversity. diff --git a/examples/4_example_experiment_setups/4g_explore_initial_population/config.py b/examples/4_example_experiment_setups/4g_explore_initial_population/config.py new file mode 100644 index 000000000..d21fd36e0 --- /dev/null +++ b/examples/4_example_experiment_setups/4g_explore_initial_population/config.py @@ -0,0 +1,7 @@ +"""Configuration parameters for this example.""" + +DATABASE_FILE = "database.sqlite" +NUM_SIMULATORS = 8 +POPULATION_SIZE = 2 # Setting this to 1 will result in warnings and faulty diversity measures, as you need more than 1 individual for that. +EVALUATE = True +VISUALIZE_MAP = True # Be careful when setting this to true when POPULATION_size > 1, as you will get plots for each individual. diff --git a/examples/4_example_experiment_setups/4g_explore_initial_population/evaluator.py b/examples/4_example_experiment_setups/4g_explore_initial_population/evaluator.py new file mode 100644 index 000000000..f7d53ed6a --- /dev/null +++ b/examples/4_example_experiment_setups/4g_explore_initial_population/evaluator.py @@ -0,0 +1,74 @@ +"""Evaluator class.""" + +from genotype import Genotype + +from revolve2.experimentation.evolution.abstract_elements import Evaluator as Eval +from revolve2.modular_robot_simulation import ( + ModularRobotScene, + Terrain, + simulate_scenes, +) +from revolve2.simulators.mujoco_simulator import LocalSimulator +from revolve2.standards import fitness_functions, terrains +from revolve2.standards.simulation_parameters import make_standard_batch_parameters + + +class Evaluator(Eval): + """Provides evaluation of robots.""" + + _simulator: LocalSimulator + _terrain: Terrain + + def __init__( + self, + headless: bool, + num_simulators: int, + ) -> None: + """ + Initialize this object. + + :param headless: `headless` parameter for the physics simulator. + :param num_simulators: `num_simulators` parameter for the physics simulator. + """ + self._simulator = LocalSimulator( + headless=headless, num_simulators=num_simulators + ) + self._terrain = terrains.flat() + + def evaluate( + self, + population: list[Genotype], + ) -> list[float]: + """ + Evaluate multiple robots. + + Fitness is the distance traveled on the xy plane. + + :param population: The robots to simulate. + :returns: Fitnesses of the robots. + """ + robots = [genotype.develop() for genotype in population] + # Create the scenes. + scenes = [] + for robot in robots: + scene = ModularRobotScene(terrain=self._terrain) + scene.add_robot(robot) + scenes.append(scene) + + # Simulate all scenes. + scene_states = simulate_scenes( + simulator=self._simulator, + batch_parameters=make_standard_batch_parameters(), + scenes=scenes, + ) + + # Calculate the xy displacements. + xy_displacements = [ + fitness_functions.xy_displacement( + states[0].get_modular_robot_simulation_state(robot), + states[-1].get_modular_robot_simulation_state(robot), + ) + for robot, states in zip(robots, scene_states) + ] + + return xy_displacements diff --git a/examples/4_example_experiment_setups/4g_explore_initial_population/genotype.py b/examples/4_example_experiment_setups/4g_explore_initial_population/genotype.py new file mode 100644 index 000000000..fd1db0255 --- /dev/null +++ b/examples/4_example_experiment_setups/4g_explore_initial_population/genotype.py @@ -0,0 +1,48 @@ +"""Genotype class.""" + +from __future__ import annotations + +from dataclasses import dataclass + +import multineat +import numpy as np + +from revolve2.modular_robot import ModularRobot +from revolve2.standards.genotypes.cppnwin.modular_robot import BrainGenotypeCpg +from revolve2.standards.genotypes.cppnwin.modular_robot.v2 import BodyGenotypeV2 + + +@dataclass +class Genotype(BodyGenotypeV2, BrainGenotypeCpg): + """A genotype for a body and brain using CPPN.""" + + @classmethod + def random( + cls, + innov_db_body: multineat.InnovationDatabase, + innov_db_brain: multineat.InnovationDatabase, + rng: np.random.Generator, + ) -> Genotype: + """ + Create a random genotype. + + :param innov_db_body: Multineat innovation database for the body. See Multineat library. + :param innov_db_brain: Multineat innovation database for the brain. See Multineat library. + :param rng: Random number generator. + :returns: The created genotype. + """ + body = cls.random_body(innov_db_body, rng) + brain = cls.random_brain(innov_db_brain, rng) + + return Genotype(body=body.body, brain=brain.brain) + + def develop(self, visualize: bool = False) -> ModularRobot: + """ + Develop the genotype into a modular robot. + + :param visualize: Wether to plot the mapping from genotype to phenotype. + :returns: The created robot. + """ + body = self.develop_body(visualize=visualize) + brain = self.develop_brain(body=body) + return ModularRobot(body=body, brain=brain) diff --git a/examples/4_example_experiment_setups/4g_explore_initial_population/individual.py b/examples/4_example_experiment_setups/4g_explore_initial_population/individual.py new file mode 100644 index 000000000..cb254b57f --- /dev/null +++ b/examples/4_example_experiment_setups/4g_explore_initial_population/individual.py @@ -0,0 +1,13 @@ +"""Individual class.""" + +from dataclasses import dataclass + +from genotype import Genotype + + +@dataclass +class Individual: + """An individual in a population.""" + + genotype: Genotype + fitness: float diff --git a/examples/4_example_experiment_setups/4g_explore_initial_population/main.py b/examples/4_example_experiment_setups/4g_explore_initial_population/main.py new file mode 100644 index 000000000..8d9e72a97 --- /dev/null +++ b/examples/4_example_experiment_setups/4g_explore_initial_population/main.py @@ -0,0 +1,194 @@ +"""Main script for the example.""" + +import logging +import random + +import config +import multineat +import numpy as np +from evaluator import Evaluator +from genotype import Genotype +from individual import Individual +from pyrr import Vector3 +from scipy.spatial import distance +from sklearn.neighbors import KDTree + +from revolve2.experimentation.logging import setup_logging +from revolve2.experimentation.rng import make_rng_time_seed +from revolve2.modular_robot import ModularRobot +from revolve2.modular_robot_simulation import ModularRobotScene, simulate_scenes +from revolve2.simulation.scene import Pose +from revolve2.simulators.mujoco_simulator import LocalSimulator +from revolve2.standards import terrains +from revolve2.standards.morphological_measures import MorphologicalMeasures +from revolve2.standards.simulation_parameters import make_standard_batch_parameters + + +def calculate_morphological_features(robot: ModularRobot) -> dict[str, float]: + """ + Calculate the morphological features for a given robot using the MorphologicalMeasures class. + + :param robot: A ModularRobot object. + :returns: A dictionary with the calculated morphological features. + """ + measures: MorphologicalMeasures[np.generic] = MorphologicalMeasures( + robot.body + ) # Explore this class and the referenced paper to learn more about the traits + features = { + "symmetry": measures.symmetry, + "proportion": ( + measures.proportion_2d if measures.is_2d else 0 + ), # Use 0 for 3D robots + "coverage": measures.coverage, + "extremities_prop": measures.limbs, + "extensiveness_prop": measures.length_of_limbs, + "hinge_prop": ( + measures.num_active_hinges / measures.num_modules + if measures.num_modules > 0 + else 0 + ), + "hinge_ratio": ( + measures.num_active_hinges / measures.num_bricks + if measures.num_bricks > 0 + else 0 + ), + "branching_prop": measures.branching, + } + return features + + +def calculate_euclidean_diversity( + morphological_features: list[dict[str, float]] +) -> float: + """ + Calculate the Euclidean diversity of the population using morphological features. + + :param morphological_features: List of morphological features for each robot in the population. + :returns: The average Euclidean diversity of the population. + """ + n = len(morphological_features) + total_distance = 0 + count = 0 + + # Convert feature dictionaries to feature vectors + feature_vectors = [list(features.values()) for features in morphological_features] + + # Calculate pairwise Euclidean distances + for i in range(n): + for j in range(i + 1, n): + dist = distance.euclidean(feature_vectors[i], feature_vectors[j]) + total_distance += dist + count += 1 + + # Average Euclidean distance across all pairs + avg_euclidean_diversity = total_distance / count if count > 0 else 0 + return avg_euclidean_diversity + + +def calculate_kdtree_diversity(morphological_features: list[dict[str, float]]) -> float: + """ + Calculate the diversity of the population using KDTree for nearest neighbors. + + :param morphological_features: List of morphological features for each robot in the population. + :returns: The average KDTree-based diversity of the population. + """ + # Convert feature dictionaries to feature vectors and buld a Kdtree + feature_vectors = [list(features.values()) for features in morphological_features] + kdtree = KDTree(feature_vectors, leaf_size=30, metric="euclidean") + + # Compute the distances of each robot to its k nearest neighbors + k = ( + len(morphological_features) - 1 + ) # Using k-1 because the nearest neighbor is the point itself + distances, _ = kdtree.query(feature_vectors, k=k) + + # Calculate the average diversity as the mean distance to nearest neighbors + avg_kdtree_diversity: float = np.mean([np.mean(dist) for dist in distances]) + + return avg_kdtree_diversity + + +def main() -> None: + """Run the simulation.""" + # Set up logging. + setup_logging() + + # Set up the random number generator and databases. + rng = make_rng_time_seed() + innov_db_body = multineat.InnovationDatabase() + innov_db_brain = multineat.InnovationDatabase() + + # Create an initial population. + logging.info("Generating initial population.") + initial_genotypes = [ + Genotype.random( + innov_db_body=innov_db_body, + innov_db_brain=innov_db_brain, + rng=rng, + ) + for _ in range(config.POPULATION_SIZE) + ] + + # You can choose to not evaluate the robots if all you want is to visualize the morphologies or compute diversity to save time + if config.EVALUATE: + logging.info("Evaluating initial population.") + initial_fitnesses = Evaluator( + headless=True, num_simulators=config.NUM_SIMULATORS + ).evaluate(initial_genotypes) + else: + initial_fitnesses = [ + random.uniform(0.0, 1.0) for _ in range(len(initial_genotypes)) + ] + + # Create a population of individuals, combining genotype with fitness. + population = [ + Individual(genotype, fitness) + for genotype, fitness in zip(initial_genotypes, initial_fitnesses, strict=True) + ] + + # Create the robot bodies from the genotypes of the population + robots = [ + individual.genotype.develop(config.VISUALIZE_MAP) for individual in population + ] + + # Calculate the morphological features for each robot in the population + morphological_features = [] + for robot in robots: + features = calculate_morphological_features(robot) + morphological_features.append(features) + + # Calculate the Euclidean-based morphological diversity of the population + euclidean_diversity = calculate_euclidean_diversity(morphological_features) + print( + f"Euclidean-based morphological diversity of the population: {euclidean_diversity}" + ) + + # Calculate the KDTree-based morphological diversity of the population + kdtree_diversity = calculate_kdtree_diversity(morphological_features) + print(f"KDTree-based morphological diversity of the population: {kdtree_diversity}") + + # Now we can create a scene and add the robots by mapping the genotypes to phenotypes + scene = ModularRobotScene(terrain=terrains.flat()) + i = 0 + for individual in robots: + # By changing the value of "VISUALIZE_MAP" to true you can plot the body creation process + scene.add_robot(individual, pose=Pose(Vector3([i, 0.0, 0.0]))) + i += 1 + + # Declare the simulators and simulation parameters. + simulator = LocalSimulator(viewer_type="custom", headless=False) + batch_parameters = make_standard_batch_parameters() + batch_parameters.simulation_time = 1000 + batch_parameters.simulation_timestep = 0.01 + batch_parameters.sampling_frequency = 10 + + # Finally simulate the scenes to inspect the resulting morphologies + simulate_scenes( + simulator=simulator, + batch_parameters=batch_parameters, + scenes=scene, + ) + + +if __name__ == "__main__": + main() diff --git a/examples/4_example_experiment_setups/4g_explore_initial_population/requirements.txt b/examples/4_example_experiment_setups/4g_explore_initial_population/requirements.txt new file mode 100644 index 000000000..eee5ac04f --- /dev/null +++ b/examples/4_example_experiment_setups/4g_explore_initial_population/requirements.txt @@ -0,0 +1 @@ +scikit-learn>=1.5.2 diff --git a/examples/4_example_experiment_setups/README.md b/examples/4_example_experiment_setups/README.md index ed0f72a72..92c300d26 100644 --- a/examples/4_example_experiment_setups/README.md +++ b/examples/4_example_experiment_setups/README.md @@ -9,4 +9,5 @@ Additionally you will learn hwo to use the evolution abstraction layer in Revolv - `4d_robot_bodybrain_ea_database` is the same example, with the addition of databases to allow for data storage. - If you want to add learning into your experiments look at `4e_robot_brain_cmaes`, in which we add learning to a **single** robot. - `4f_robot_brain_cmaes_database` does the same thing as the previous example with the addition of a database. +- Finally you can learn about the exploration of the initial population and their morphological features in `4g_explore_initial_population` diff --git a/modular_robot/revolve2/modular_robot/body/v2/_active_hinge_v2.py b/modular_robot/revolve2/modular_robot/body/v2/_active_hinge_v2.py index f061e6d76..307119369 100644 --- a/modular_robot/revolve2/modular_robot/body/v2/_active_hinge_v2.py +++ b/modular_robot/revolve2/modular_robot/body/v2/_active_hinge_v2.py @@ -23,21 +23,21 @@ def __init__(self, rotation: float | RightAngles): range=1.047197551, effort=0.948013269, velocity=6.338968228, - frame_bounding_box=Vector3([0.018, 0.053, 0.0165891]), - frame_offset=0.04525, - servo1_bounding_box=Vector3([0.0583, 0.0512, 0.020]), - servo2_bounding_box=Vector3([0.002, 0.053, 0.053]), + frame_bounding_box=Vector3([0.018, 0.052, 0.0165891]), + frame_offset=0.04495, + servo1_bounding_box=Vector3([0.05125, 0.0512, 0.020]), + servo2_bounding_box=Vector3([0.002, 0.052, 0.052]), frame_mass=0.01632, servo1_mass=0.058, servo2_mass=0.025, - servo_offset=0.0299, + servo_offset=0.0239, joint_offset=0.0119, static_friction=1.0, dynamic_friction=1.0, armature=0.002, pid_gain_p=5.0, pid_gain_d=0.05, - child_offset=0.0583 / 2 + 0.002, + child_offset=0.05125 / 2 + 0.002 + 0.01, sensors=[ ActiveHingeSensor() ], # By default, V2 robots have ActiveHinge sensors, since the hardware also supports them natively. diff --git a/modular_robot/revolve2/modular_robot/body/v2/_brick_v2.py b/modular_robot/revolve2/modular_robot/body/v2/_brick_v2.py index 6990e420b..c8588ee7c 100644 --- a/modular_robot/revolve2/modular_robot/body/v2/_brick_v2.py +++ b/modular_robot/revolve2/modular_robot/body/v2/_brick_v2.py @@ -13,10 +13,11 @@ def __init__(self, rotation: float | RightAngles): :param rotation: The modules' rotation. """ + w, h, d = 0.075, 0.075, 0.075 super().__init__( rotation=rotation, - bounding_box=Vector3([0.06288625, 0.06288625, 0.0603]), + bounding_box=Vector3([w, h, d]), mass=0.06043, - child_offset=0.06288625 / 2.0, + child_offset=d / 2.0, sensors=[], ) diff --git a/modular_robot/revolve2/modular_robot/brain/cpg/_cpg_network_structure.py b/modular_robot/revolve2/modular_robot/brain/cpg/_cpg_network_structure.py index 8103fb009..bb808fbac 100644 --- a/modular_robot/revolve2/modular_robot/brain/cpg/_cpg_network_structure.py +++ b/modular_robot/revolve2/modular_robot/brain/cpg/_cpg_network_structure.py @@ -156,7 +156,10 @@ def make_uniform_state(self, value: float) -> npt.NDArray[np.float_]: :param value: The value to use for all states :returns: The array of states. """ - return np.full(self.num_states, value) + _half = self.num_states // 2 + mask = np.hstack([np.full(_half, 1), np.full(_half, -1)]) + + return mask * value @property def num_cpgs(self) -> int: diff --git a/project.yml b/project.yml index 5bfcc86ba..f79ddd3cf 100644 --- a/project.yml +++ b/project.yml @@ -24,6 +24,7 @@ examples: - 4_example_experiment_setups/4d_robot_bodybrain_ea_database - 4_example_experiment_setups/4e_robot_brain_cmaes - 4_example_experiment_setups/4f_robot_brain_cmaes_database + - 4_example_experiment_setups/4g_explore_initial_population - 5_physical_modular_robots/5a_physical_robot_remote - 5_physical_modular_robots/5b_compare_simulated_and_physical_robot tests-dir: tests diff --git a/requirements_dev.txt b/requirements_dev.txt index 5c453d647..7feb074c6 100644 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -12,4 +12,5 @@ -r ./examples/4_example_experiment_setups/4d_robot_bodybrain_ea_database/requirements.txt -r ./examples/4_example_experiment_setups/4f_robot_brain_cmaes_database/requirements.txt -r ./examples/4_example_experiment_setups/4b_simple_ea_xor_database/requirements.txt +-r ./examples/4_example_experiment_setups/4g_explore_initial_population/requirements.txt -r ./tests/requirements.txt diff --git a/requirements_editable.txt b/requirements_editable.txt index cec14ab6c..d9b9ddeda 100644 --- a/requirements_editable.txt +++ b/requirements_editable.txt @@ -10,3 +10,5 @@ -r ./examples/4_example_experiment_setups/4d_robot_bodybrain_ea_database/requirements.txt -r ./examples/4_example_experiment_setups/4f_robot_brain_cmaes_database/requirements.txt -r ./examples/4_example_experiment_setups/4b_simple_ea_xor_database/requirements.txt +-r ./examples/4_example_experiment_setups/4g_explore_initial_population/requirements.txt + diff --git a/simulators/mujoco_simulator/revolve2/simulators/mujoco_simulator/_control_interface_impl.py b/simulators/mujoco_simulator/revolve2/simulators/mujoco_simulator/_control_interface_impl.py index a63808a1c..fd4f1a4fe 100644 --- a/simulators/mujoco_simulator/revolve2/simulators/mujoco_simulator/_control_interface_impl.py +++ b/simulators/mujoco_simulator/revolve2/simulators/mujoco_simulator/_control_interface_impl.py @@ -40,7 +40,9 @@ def set_joint_hinge_position_target( assert ( maybe_hinge_joint_mujoco is not None ), "Hinge joint does not exist in this scene." + # Set position target self._data.ctrl[maybe_hinge_joint_mujoco.ctrl_index_position] = position + # Set velocity target self._data.ctrl[maybe_hinge_joint_mujoco.ctrl_index_velocity] = 0.0 diff --git a/simulators/mujoco_simulator/revolve2/simulators/mujoco_simulator/_simulate_scene.py b/simulators/mujoco_simulator/revolve2/simulators/mujoco_simulator/_simulate_scene.py index a701515a6..8c86f573e 100644 --- a/simulators/mujoco_simulator/revolve2/simulators/mujoco_simulator/_simulate_scene.py +++ b/simulators/mujoco_simulator/revolve2/simulators/mujoco_simulator/_simulate_scene.py @@ -179,7 +179,11 @@ def simulate_scene( if not headless or ( record_settings is not None and time >= last_video_time + video_step ): - viewer.render() + _status = viewer.render() + + # Check if simulation was closed + if _status == -1: + break # capture video frame if it's time if record_settings is not None and time >= last_video_time + video_step: diff --git a/simulators/mujoco_simulator/revolve2/simulators/mujoco_simulator/viewers/_custom_mujoco_viewer.py b/simulators/mujoco_simulator/revolve2/simulators/mujoco_simulator/viewers/_custom_mujoco_viewer.py index 82fd6a243..a9b7a1a48 100755 --- a/simulators/mujoco_simulator/revolve2/simulators/mujoco_simulator/viewers/_custom_mujoco_viewer.py +++ b/simulators/mujoco_simulator/revolve2/simulators/mujoco_simulator/viewers/_custom_mujoco_viewer.py @@ -80,7 +80,10 @@ def render(self) -> int | None: :return: A cycle position if applicable. """ - super().render() + if self.is_alive: + super().render() + else: + return -1 if self._viewer_mode == CustomMujocoViewerMode.MANUAL: return self._position return None diff --git a/standards/revolve2/standards/genotypes/cppnwin/_random_multineat_genotype.py b/standards/revolve2/standards/genotypes/cppnwin/_random_multineat_genotype.py index ac405f74b..306dadcdb 100644 --- a/standards/revolve2/standards/genotypes/cppnwin/_random_multineat_genotype.py +++ b/standards/revolve2/standards/genotypes/cppnwin/_random_multineat_genotype.py @@ -33,7 +33,7 @@ def random_multineat_genotype( False, # FS_NEAT output_activation_func, # output activation type multineat.ActivationFunction.UNSIGNED_SIGMOID, # hidden activation type - 1, # seed_type + 0, # seed_type multineat_params, 1, # number of hidden layers ) diff --git a/standards/revolve2/standards/genotypes/cppnwin/modular_robot/v2/_body_develop.py b/standards/revolve2/standards/genotypes/cppnwin/modular_robot/v2/_body_develop.py index d4e39a0cf..c1d748079 100644 --- a/standards/revolve2/standards/genotypes/cppnwin/modular_robot/v2/_body_develop.py +++ b/standards/revolve2/standards/genotypes/cppnwin/modular_robot/v2/_body_develop.py @@ -2,6 +2,7 @@ from queue import Queue from typing import Any +import matplotlib.pyplot as plt import multineat import numpy as np from numpy.typing import NDArray @@ -22,28 +23,26 @@ class __Module: def develop( genotype: multineat.Genome, + visualize: bool = False, # Add a flag to control visualization ) -> BodyV2: """ - Develop a CPPNWIN genotype into a modular robot body. + Develop a CPPNWIN genotype into a modular robot body with optional step-by-step visualization. It is important that the genotype was created using a compatible function. :param genotype: The genotype to create the body from. - :returns: The create body. + :param visualize: Whether to visualize the body development process. + :returns: The created body. """ - max_parts = 20 # Determine the maximum parts available for a robots body. + max_parts = 20 # Determine the maximum parts available for a robot's body. body_net = ( multineat.NeuralNetwork() ) # Instantiate the CPPN network for body construction. genotype.BuildPhenotype(body_net) # Build the CPPN from the genotype of the robot. - - to_explore: Queue[__Module] = ( - Queue() - ) # Here we have a queue that is used to build our robot. + to_explore: Queue[__Module] = Queue() # Queue used to build the robot. grid = np.zeros( shape=(max_parts * 2 + 1, max_parts * 2 + 1, max_parts * 2 + 1), dtype=np.uint8 ) - body = BodyV2() core_position = Vector3( @@ -63,6 +62,12 @@ def develop( ) ) + # Prepare for visualization if enabled + if visualize: + fig = plt.figure() + ax = fig.add_subplot(111, projection="3d") + plt.show(block=False) + while not to_explore.empty(): module = to_explore.get() @@ -72,6 +77,12 @@ def develop( if child is not None: to_explore.put(child) part_count += 1 + if visualize: + __visualize_structure(grid, ax) + + if visualize: + plt.pause(0.001) # Allow the plot to update smoothly + return body @@ -128,7 +139,8 @@ def __add_child( if grid[tuple(position)] > 0: return None - """Now we anjust the position for the potential new module to fit the attachment point of the parent, additionally we query the CPPN for child type and angle of the child.""" + """Now we adjust the position for the potential new module to fit the attachment point of the parent, + additionally we query the CPPN for child type and angle of the child""" new_pos = np.array(np.round(position + attachment_point.offset), dtype=np.int64) child_type, angle = __evaluate_cppn(body_net, new_pos, chain_length) @@ -143,18 +155,12 @@ def __add_child( up = __rotate(module.up, forward, Quaternion.from_eulers([angle, 0, 0])) module.module_reference.set_child(child, attachment_index) - return __Module( - position, - forward, - up, - chain_length, - child, - ) + return __Module(position, forward, up, chain_length, child) def __rotate(a: Vector3, b: Vector3, rotation: Quaternion) -> Vector3: """ - Rotates vector a, a given angle around b. + Rotates vector a around the axis defined by vector b by an angle defined in the rotation quaternion. Rodrigues' rotation formula is used. :param a: Vector a. :param b: Vector b. @@ -178,3 +184,23 @@ def __vec3_int(vector: Vector3) -> Vector3[np.int_]: :return: The integer vector. """ return Vector3(list(map(lambda v: int(round(v)), vector)), dtype=np.int64) + + +def __visualize_structure(grid: NDArray[np.uint8], ax: plt.Axes) -> None: + """ + Visualize the structure of the robot's body using Matplotlib. + + :param grid: The 3D grid containing the robot body. + :param ax: The Matplotlib Axes3D object to draw on. + """ + ax.clear() + ax.set_xlabel("X") + ax.set_ylabel("Y") + ax.set_zlabel("Z") + + # Get the occupied grid positions + x, y, z = np.nonzero(grid) + + ax.scatter(x, y, z, c="r", marker="o") + plt.draw() + plt.pause(0.5) diff --git a/standards/revolve2/standards/genotypes/cppnwin/modular_robot/v2/_body_genotype_v2.py b/standards/revolve2/standards/genotypes/cppnwin/modular_robot/v2/_body_genotype_v2.py index 9bd895527..90a70554e 100644 --- a/standards/revolve2/standards/genotypes/cppnwin/modular_robot/v2/_body_genotype_v2.py +++ b/standards/revolve2/standards/genotypes/cppnwin/modular_robot/v2/_body_genotype_v2.py @@ -109,10 +109,11 @@ def crossover_body( ) ) - def develop_body(self) -> BodyV2: + def develop_body(self, visualize: bool = False) -> BodyV2: """ Develop the genotype into a modular robot. + :param visualize: Whether to plot the mapping from genotype to phenotype for visualization. :returns: The created robot. """ - return develop(self.body.genotype) + return develop(self.body.genotype, visualize=visualize) From 6d15a1ccca941baa2d0cc5e31963a09a8f6f6f0f Mon Sep 17 00:00:00 2001 From: Ting-Chia Chiang Date: Tue, 8 Oct 2024 10:45:59 +0200 Subject: [PATCH 2/2] Add camera support for v2 physical interfaces (#580) * Update the dependencies for modular_robot_physical * Add camera support for physical_interfaces v2 * Update the doc for clarity * Refactor image channel conversion for cap'n proto serialization * Add display_camera_view() to remote and include camera support in an example --- .../physical_robot_core_setup/index.rst | 2 + .../5a_physical_robot_remote/main.py | 9 ++++ modular_robot_physical/pyproject.toml | 3 +- .../v2/_v2_physical_interface.py | 19 ++----- .../modular_robot_physical/remote/_remote.py | 49 +++++++++++++++++-- .../robot_daemon/_robo_server_impl.py | 7 +-- 6 files changed, 66 insertions(+), 23 deletions(-) diff --git a/docs/source/physical_robot_core_setup/index.rst b/docs/source/physical_robot_core_setup/index.rst index 533a0b640..012c40f91 100644 --- a/docs/source/physical_robot_core_setup/index.rst +++ b/docs/source/physical_robot_core_setup/index.rst @@ -39,6 +39,8 @@ On the RPi adjust the config in `/boot/config.txt` or on newer systems `/boot/fi ------------------ Setting up the RPi ------------------ +**Note**: For students in the CI Group, the RPi is already set up. All RPis are flashed with the same image, so the following steps are not necessary. Additionally, there should be an IP address on the head, allowing you to SSH into it. However, be aware that there will always be ongoing development changes in the revolve2-modular-robot_physical and revolve2-robohat packages, so make sure to pip install the latest version in your virtual environment. + This step is the same for all types of hardware. #. Flash the SD card with Raspberry Pi OS (previously Raspbian). Some Important notes: diff --git a/examples/5_physical_modular_robots/5a_physical_robot_remote/main.py b/examples/5_physical_modular_robots/5a_physical_robot_remote/main.py index b7ae25644..b63898ef8 100644 --- a/examples/5_physical_modular_robots/5a_physical_robot_remote/main.py +++ b/examples/5_physical_modular_robots/5a_physical_robot_remote/main.py @@ -1,9 +1,12 @@ """An example on how to remote control a physical modular robot.""" +from pyrr import Vector3 + from revolve2.experimentation.rng import make_rng_time_seed from revolve2.modular_robot import ModularRobot from revolve2.modular_robot.body import RightAngles from revolve2.modular_robot.body.base import ActiveHinge +from revolve2.modular_robot.body.sensors import CameraSensor from revolve2.modular_robot.body.v2 import ActiveHingeV2, BodyV2, BrickV2 from revolve2.modular_robot.brain.cpg import BrainCpgNetworkNeighborRandom from revolve2.modular_robot_physical import Config, UUIDKey @@ -40,6 +43,10 @@ def make_body() -> ( body.core_v2.right_face.bottom, body.core_v2.right_face.bottom.attachment, ) + """Add a camera sensor to the core.""" + body.core.add_sensor( + CameraSensor(position=Vector3([0, 0, 0]), camera_size=(480, 640)) + ) return body, active_hinges @@ -100,6 +107,7 @@ def main() -> None: Create a Remote for the physical modular robot. Make sure to target the correct hardware type and fill in the correct IP and credentials. The debug flag is turned on. If the remote complains it cannot keep up, turning off debugging might improve performance. + If you want to display the camera view, set display_camera_view to True. """ print("Initializing robot..") run_remote( @@ -107,6 +115,7 @@ def main() -> None: hostname="localhost", # "Set the robot IP here. debug=True, on_prepared=on_prepared, + display_camera_view=False, ) """ Note that theoretically if you want the robot to be self controlled and not dependant on a external remote, you can run this script on the robot locally. diff --git a/modular_robot_physical/pyproject.toml b/modular_robot_physical/pyproject.toml index 6adc977a7..da3c1b8d5 100644 --- a/modular_robot_physical/pyproject.toml +++ b/modular_robot_physical/pyproject.toml @@ -32,8 +32,9 @@ pyrr = "^0.10.3" typed-argparse = "^0.3.1" pycapnp = { version = "^2.0.0b2" } pigpio = { version = "^1.78", optional = true } -revolve2-robohat = { version = "0.5.0", optional = true } +revolve2-robohat = { version = "0.6.2", optional = true } rpi-lgpio = { version = "0.5", optional = true } +opencv-python = "^4.10.0.84" # cpnp-stub-generator is disabled because it depends on pycapnp <2.0.0. # It is rarely used and by developers only, so we remove it for now and developers can install it manually. # If you manually install it make sure you also install the correct pycpanp version afterwards as it will be overridden. diff --git a/modular_robot_physical/revolve2/modular_robot_physical/physical_interfaces/v2/_v2_physical_interface.py b/modular_robot_physical/revolve2/modular_robot_physical/physical_interfaces/v2/_v2_physical_interface.py index 5f73f5923..7d34ddec7 100644 --- a/modular_robot_physical/revolve2/modular_robot_physical/physical_interfaces/v2/_v2_physical_interface.py +++ b/modular_robot_physical/revolve2/modular_robot_physical/physical_interfaces/v2/_v2_physical_interface.py @@ -1,13 +1,13 @@ import math from typing import Sequence -import cv2 import numpy as np from numpy.typing import NDArray from pyrr import Vector3 from robohatlib.hal.assemblyboard.PwmPlug import PwmPlug from robohatlib.hal.assemblyboard.servo.ServoData import ServoData from robohatlib.hal.assemblyboard.ServoAssemblyConfig import ServoAssemblyConfig +from robohatlib.hal.Camera import Camera from robohatlib.Robohat import Robohat from .._physical_interface import PhysicalInterface @@ -84,6 +84,7 @@ def __init__(self, debug: bool, dry: bool) -> None: self._robohat.init(servoboard_1_datas_list, servoboard_2_datas_list) self._robohat.do_buzzer_beep() self._robohat.set_servo_direct_mode(_mode=True) + self.cam: Camera = self._robohat.get_camera() def set_servo_targets(self, pins: list[int], targets: list[float]) -> None: """ @@ -181,17 +182,7 @@ def get_camera_view(self) -> NDArray[np.uint8]: """ Get the current view from the camera. - :returns: A dummy image until robohatlib has camera support. - """ - image = np.zeros((3, 100, 100), dtype=int) - cv2.putText( - image, - "Dummy Image", - (10, 10), - fontFace=cv2.FONT_HERSHEY_SIMPLEX, - fontScale=1, - color=(255, 0, 0), - thickness=1, - lineType=2, - ) + :returns: An image captured from robohatlib. + """ + image = self.cam.get_capture_array().astype(np.uint8) return image diff --git a/modular_robot_physical/revolve2/modular_robot_physical/remote/_remote.py b/modular_robot_physical/revolve2/modular_robot_physical/remote/_remote.py index 6bbf0feca..085b16233 100644 --- a/modular_robot_physical/revolve2/modular_robot_physical/remote/_remote.py +++ b/modular_robot_physical/revolve2/modular_robot_physical/remote/_remote.py @@ -3,6 +3,7 @@ from typing import Callable import capnp +import cv2 import numpy as np from numpy.typing import NDArray from pyrr import Vector3 @@ -50,6 +51,7 @@ async def _run_remote_impl( port: int, debug: bool, manual_mode: bool, + display_camera_view: bool, ) -> None: active_hinge_sensor_to_pin = { UUIDKey(key.value.sensors.active_hinge_sensor): pin @@ -207,6 +209,7 @@ async def _run_remote_impl( pin_controls = _active_hinge_targets_to_pin_controls( config, control_interface._set_active_hinges ) + match hardware_type: case HardwareType.v1: await service.control( @@ -242,6 +245,13 @@ async def _run_remote_impl( camera_sensor_states=camera_sensor_states, ) + # Display camera image + if display_camera_view: + _display_camera_view( + config.modular_robot.body.core.sensors.camera_sensor, + sensor_readings, + ) + if battery_print_timer > 5.0: print( f"Battery level is at {sensor_readings.battery * 100.0}%." @@ -265,11 +275,17 @@ def _capnp_to_camera_view( :param camera_size: The camera size to reconstruct the image. :return: The NDArray imag. """ - np_image = np.zeros(shape=(3, *camera_size), dtype=np.uint8) - np_image[0] = np.array(image.r).reshape(camera_size).astype(np.uint8) - np_image[1] = np.array(image.g).reshape(camera_size).astype(np.uint8) - np_image[2] = np.array(image.b).reshape(camera_size).astype(np.uint8) - return np_image + r_channel = np.array(image.r, dtype=np.uint8).reshape( + (camera_size[0], camera_size[1]) + ) + g_channel = np.array(image.g, dtype=np.uint8).reshape( + (camera_size[0], camera_size[1]) + ) + b_channel = np.array(image.b, dtype=np.uint8).reshape( + (camera_size[0], camera_size[1]) + ) + rgb_image = cv2.merge((r_channel, g_channel, b_channel)).astype(np.uint8) + return rgb_image def _get_imu_sensor_state( @@ -318,6 +334,26 @@ def _get_camera_sensor_state( } +def _display_camera_view( + camera_sensor: CameraSensor | None, + sensor_readings: robot_daemon_protocol_capnp.SensorReadings, +) -> None: + """ + Display a camera view from the camera readings. + + :param camera_sensor: The sensor in question. + :param sensor_readings: The sensor readings. + """ + if camera_sensor is None: + print("No camera sensor found.") + else: + rgb_image = _capnp_to_camera_view( + sensor_readings.cameraView, camera_sensor.camera_size + ) + cv2.imshow("Captured Image", rgb_image) + cv2.waitKey(1) + + def run_remote( config: Config, hostname: str, @@ -325,6 +361,7 @@ def run_remote( port: int = STANDARD_PORT, debug: bool = False, manual_mode: bool = False, + display_camera_view: bool = False, ) -> None: """ Control a robot remotely, running the controller on your local machine. @@ -335,6 +372,7 @@ def run_remote( :param port: Port the robot daemon uses. :param debug: Enable debug messages. :param manual_mode: Enable manual controls for the robot, ignoring the brain. + :param display_camera_view: Display the camera view of the robot. """ asyncio.run( capnp.run( @@ -345,6 +383,7 @@ def run_remote( port=port, debug=debug, manual_mode=manual_mode, + display_camera_view=display_camera_view, ) ) ) diff --git a/modular_robot_physical/revolve2/modular_robot_physical/robot_daemon/_robo_server_impl.py b/modular_robot_physical/revolve2/modular_robot_physical/robot_daemon/_robo_server_impl.py index 39e8ab6b8..30824cb2c 100644 --- a/modular_robot_physical/revolve2/modular_robot_physical/robot_daemon/_robo_server_impl.py +++ b/modular_robot_physical/revolve2/modular_robot_physical/robot_daemon/_robo_server_impl.py @@ -293,8 +293,9 @@ def _camera_view_to_capnp(image: NDArray[np.uint8]) -> capnpImage: :param image: The NDArray image. :return: The capnp Image object. """ + # Convert each channel to a list of Int32 for Cap'n Proto return robot_daemon_protocol_capnp.Image( - r=image[0].flatten().tolist(), - g=image[1].flatten().tolist(), - b=image[2].flatten().tolist(), + r=image[:, :, 0].astype(np.int32).flatten().tolist(), + g=image[:, :, 1].astype(np.int32).flatten().tolist(), + b=image[:, :, 2].astype(np.int32).flatten().tolist(), )