diff --git a/tensorboard/plugins/hparams/backend_context.py b/tensorboard/plugins/hparams/backend_context.py index 98959498d5e..5a61939d9f5 100644 --- a/tensorboard/plugins/hparams/backend_context.py +++ b/tensorboard/plugins/hparams/backend_context.py @@ -403,7 +403,7 @@ def compute_metric_infos_from_data_provider_session_groups( self, ctx, experiment_id, session_groups ): session_runs = set( - f"{s.experiment_id}/{s.run}" if s.run else s.experiment_id + generate_data_provider_session_name(experiment_id, s) for sg in session_groups for s in sg.sessions ) @@ -460,6 +460,22 @@ def _compute_metric_names(self, ctx, experiment_id, session_runs): return metric_names_list +def generate_data_provider_session_name(experiment_id, session): + """Generates a name from a HyperparameterSesssionRun. + + If the HyperparameterSessionRun contains no experiment or run information + then the name is set to the original experiment_id. + """ + if not session.experiment_id and not session.run: + return experiment_id + elif not session.experiment_id: + return session.run + elif not session.run: + return session.experiment_id + else: + return f"{session.experiment_id}/{session.run}" + + def _find_longest_parent_path(path_set, path): """Finds the longest "parent-path" of 'path' in 'path_set'. diff --git a/tensorboard/plugins/hparams/backend_context_test.py b/tensorboard/plugins/hparams/backend_context_test.py index e11bfed7a6e..ef7d6f36a0e 100644 --- a/tensorboard/plugins/hparams/backend_context_test.py +++ b/tensorboard/plugins/hparams/backend_context_test.py @@ -652,6 +652,43 @@ def test_experiment_from_data_provider_session_group_without_run_name(self): """ self.assertProtoEquals(expected_exp, actual_exp) + def test_experiment_from_data_provider_session_group_without_experiment_name( + self, + ): + self._mock_tb_context.data_provider.list_tensors.side_effect = None + self._hyperparameters = provider.ListHyperparametersResult( + hyperparameters=[], + session_groups=[ + provider.HyperparameterSessionGroup( + root=provider.HyperparameterSessionRun( + experiment_id="", run="exp/session_1" + ), + sessions=[ + provider.HyperparameterSessionRun( + experiment_id="", run="exp/session_1" + ), + ], + hyperparameter_values=[], + ), + ], + ) + actual_exp = self._experiment_from_metadata() + expected_exp = """ + metric_infos: { + name: {group: '', tag: 'accuracy'} + } + metric_infos: { + name: {group: '', tag: 'loss'} + } + metric_infos: { + name: {group: 'eval', tag: 'loss'} + } + metric_infos: { + name: {group: 'train', tag: 'loss'} + } + """ + self.assertProtoEquals(expected_exp, actual_exp) + def test_experiment_from_data_provider_old_response_type(self): self._hyperparameters = [ provider.Hyperparameter( diff --git a/tensorboard/plugins/hparams/list_session_groups.py b/tensorboard/plugins/hparams/list_session_groups.py index 9fbaa323a01..8252a76258b 100644 --- a/tensorboard/plugins/hparams/list_session_groups.py +++ b/tensorboard/plugins/hparams/list_session_groups.py @@ -25,10 +25,12 @@ from tensorboard.data import provider from tensorboard.plugins.hparams import api_pb2 +from tensorboard.plugins.hparams import backend_context as backend_context_lib from tensorboard.plugins.hparams import error from tensorboard.plugins.hparams import json_format_compat from tensorboard.plugins.hparams import metadata from tensorboard.plugins.hparams import metrics +from tensorboard.plugins.hparams import plugin_data_pb2 class Handler: @@ -93,13 +95,15 @@ def _session_groups_from_tags(self): hparams_run_to_tag_to_content, # Don't pass any information from the DataProvider since we are only # examining session groups based on tag metadata - [], + provider.ListHyperparametersResult( + hyperparameters=[], session_groups=[] + ), ) extractors = _create_extractors(self._request.col_params) filters = _create_filters(self._request.col_params, extractors) session_groups = self._build_session_groups( - hparams_run_to_tag_to_content, experiment + hparams_run_to_tag_to_content, experiment.metric_infos ) session_groups = self._filter(session_groups, filters) self._sort(session_groups, extractors) @@ -116,16 +120,37 @@ def _session_groups_from_data_provider(self): sort, ) + metric_infos = self._backend_context.compute_metric_infos_from_data_provider_session_groups( + self._request_context, self._experiment_id, response + ) + + all_metric_evals = self._backend_context.read_last_scalars( + self._request_context, + self._experiment_id, + run_tag_filter=None, + ) + session_groups = [] for provider_group in response: - sessions = [ - api_pb2.Session(name=f"{s.experiment_id}/{s.run}") - for s in provider_group.sessions - ] - name = ( - f"{provider_group.root.experiment_id}/{provider_group.root.run}" - if provider_group.root.run - else provider_group.root.experiment_id + sessions = [] + for session in provider_group.sessions: + session_name = ( + backend_context_lib.generate_data_provider_session_name( + self._experiment_id, session + ) + ) + sessions.append( + self._build_session( + metric_infos, + session_name, + plugin_data_pb2.SessionStartInfo(), + plugin_data_pb2.SessionEndInfo(), + all_metric_evals, + ) + ) + + name = backend_context_lib.generate_data_provider_session_name( + self._experiment_id, provider_group.root ) session_group = api_pb2.SessionGroup( name=name, @@ -154,9 +179,16 @@ def _session_groups_from_data_provider(self): session_groups.append(session_group) + # Compute the session group's aggregated metrics for each group. + for group in session_groups: + if group.sessions: + self._aggregate_metrics(group) + return session_groups - def _build_session_groups(self, hparams_run_to_tag_to_content, experiment): + def _build_session_groups( + self, hparams_run_to_tag_to_content, metric_infos + ): """Returns a list of SessionGroups protobuffers from the summary data.""" @@ -178,7 +210,7 @@ def _build_session_groups(self, hparams_run_to_tag_to_content, experiment): metric_runs = set() metric_tags = set() for session_name in session_names: - for metric in experiment.metric_infos: + for metric in metric_infos: metric_name = metric.name (run, tag) = metrics.run_tag_from_session_and_metric( session_name, metric_name @@ -207,7 +239,11 @@ def _build_session_groups(self, hparams_run_to_tag_to_content, experiment): tag_to_content[metadata.SESSION_END_INFO_TAG] ) session = self._build_session( - experiment, session_name, start_info, end_info, all_metric_evals + metric_infos, + session_name, + start_info, + end_info, + all_metric_evals, ) if session.status in self._request.allowed_statuses: self._add_session(session, start_info, groups_by_name) @@ -263,7 +299,7 @@ def _add_session(self, session, start_info, groups_by_name): groups_by_name[group_name] = group def _build_session( - self, experiment, name, start_info, end_info, all_metric_evals + self, metric_infos, name, start_info, end_info, all_metric_evals ): """Builds a session object.""" @@ -273,7 +309,7 @@ def _build_session( start_time_secs=start_info.start_time_secs, model_uri=start_info.model_uri, metric_values=self._build_session_metric_values( - experiment, name, all_metric_evals + metric_infos, name, all_metric_evals ), monitor_url=start_info.monitor_url, ) @@ -283,13 +319,13 @@ def _build_session( return result def _build_session_metric_values( - self, experiment, session_name, all_metric_evals + self, metric_infos, session_name, all_metric_evals ): """Builds the session metric values.""" # result is a list of api_pb2.MetricValue instances. result = [] - for metric_info in experiment.metric_infos: + for metric_info in metric_infos: metric_name = metric_info.name (run, tag) = metrics.run_tag_from_session_and_metric( session_name, metric_name diff --git a/tensorboard/plugins/hparams/list_session_groups_test.py b/tensorboard/plugins/hparams/list_session_groups_test.py index 0363461960d..34b486ccedd 100644 --- a/tensorboard/plugins/hparams/list_session_groups_test.py +++ b/tensorboard/plugins/hparams/list_session_groups_test.py @@ -48,6 +48,9 @@ def setUp(self): self._mock_tb_context.data_provider.list_tensors.side_effect = ( self._mock_list_tensors ) + self._mock_tb_context.data_provider.list_scalars.side_effect = ( + self._mock_list_scalars + ) self._mock_tb_context.data_provider.read_scalars.side_effect = ( self._mock_read_scalars ) @@ -215,6 +218,48 @@ def _mock_list_tensors( result[run][tag] = t return result + def _mock_list_scalars( + self, + ctx, + *, + experiment_id, + plugin_name, + run_tag_filter=provider.RunTagFilter(), + ): + """Mock data for DataProvider.list_scalars(). + + The ScalarTimeSeries generated here correspond to the scalar values + generated by _mock_read_scalars(). + + These are currently used exclusively by the DataProvider-based hparams + to generate metric_infos whereas the classic Tensor-based hparams + generate metric_infos from the ExperimentTag in _mock_list_tensors(). + """ + scalars_content = { + "session_1": { + "current_temp": b"", + "delta_temp": b"", + "optional_metric": b"", + }, + "session_2": {"current_temp": b"", "delta_temp": b""}, + "session_3": {"current_temp": b"", "delta_temp": b""}, + "session_4": {"current_temp": b"", "delta_temp": b""}, + "session_5": {"current_temp": b"", "delta_temp": b""}, + } + result = {} + for run, tag_to_content in scalars_content.items(): + result.setdefault(run, {}) + for tag, content in tag_to_content.items(): + t = provider.ScalarTimeSeries( + max_step=0, + max_wall_time=0, + plugin_content=content, + description="", + display_name="", + ) + result[run][tag] = t + return result + def _mock_read_scalars( self, ctx=None, @@ -1919,6 +1964,178 @@ def test_experiment_from_data_provider_start_index_and_slize_size(self): response, ) + def test_experiment_from_data_provider_with_metric_values_from_experiment_id( + self, + ): + self._mock_tb_context.data_provider.list_tensors.side_effect = None + self._hyperparameters = [ + provider.HyperparameterSessionGroup( + # The sessions names correspond to return values from + # _mock_list_scalars() and _mock_read_scalars() in order to + # generate metric infos and values. + root=provider.HyperparameterSessionRun( + experiment_id="session_2", run="" + ), + sessions=[ + provider.HyperparameterSessionRun( + experiment_id="session_2", run="" + ) + ], + hyperparameter_values=[], + ), + ] + request = """ + start_index: 0 + slice_size: 10 + """ + response = self._run_handler(request) + self.assertLen(response.session_groups, 1) + self.assertEquals("session_2", response.session_groups[0].name) + self.assertLen(response.session_groups[0].sessions, 1) + self.assertProtoEquals( + """ + name: "session_2" + metric_values { + name { + tag: "current_temp" + } + value: 100.0 + training_step: 1 + wall_time_secs: 1.0 + } + metric_values { + name { + tag: "delta_temp" + } + value: 150.0 + training_step: 3 + wall_time_secs: 11.0 + } + """, + response.session_groups[0].sessions[0], + ) + + def test_experiment_from_data_provider_with_metric_values_from_run_name( + self, + ): + self._mock_tb_context.data_provider.list_tensors.side_effect = None + self._hyperparameters = [ + provider.HyperparameterSessionGroup( + # The sessions names correspond to return values from + # _mock_list_scalars() and _mock_read_scalars() in order to + # generate metric infos and values. + root=provider.HyperparameterSessionRun( + experiment_id="", run="session_2" + ), + sessions=[ + provider.HyperparameterSessionRun( + experiment_id="", run="session_2" + ) + ], + hyperparameter_values=[], + ), + ] + request = """ + start_index: 0 + slice_size: 10 + """ + response = self._run_handler(request) + self.assertLen(response.session_groups, 1) + self.assertEquals("session_2", response.session_groups[0].name) + self.assertLen(response.session_groups[0].sessions, 1) + self.assertProtoEquals( + """ + name: "session_2" + metric_values { + name { + tag: "current_temp" + } + value: 100.0 + training_step: 1 + wall_time_secs: 1.0 + } + metric_values { + name { + tag: "delta_temp" + } + value: 150.0 + training_step: 3 + wall_time_secs: 11.0 + } + """, + response.session_groups[0].sessions[0], + ) + + def test_experiment_from_data_provider_with_metric_values_aggregates( + self, + ): + # Aggregations are tested in-depth elsewhere using the Tensor-based + # hparams. For DataProvider-based hparam tests we just test one + # aggregation to verify the aggregation logic is being applied. + self._mock_tb_context.data_provider.list_tensors.side_effect = None + self._hyperparameters = [ + provider.HyperparameterSessionGroup( + # The sessions names correspond to return values from + # _mock_list_scalars() and _mock_read_scalars() in order to + # generate metric infos and values. + root=provider.HyperparameterSessionRun( + experiment_id="", run="" + ), + sessions=[ + provider.HyperparameterSessionRun( + experiment_id="", run="session_1" + ), + provider.HyperparameterSessionRun( + experiment_id="", run="session_2" + ), + provider.HyperparameterSessionRun( + experiment_id="", run="session_3" + ), + ], + hyperparameter_values=[], + ) + ] + request = """ + start_index: 0 + slice_size: 10 + aggregation_type: AGGREGATION_AVG + """ + response = self._run_handler(request) + self.assertLen(response.session_groups[0].metric_values, 3) + self.assertProtoEquals( + """ + name { + tag: "current_temp" + } + value: 37.0 + training_step: 1 + wall_time_secs: 1.0 + """, + response.session_groups[0].metric_values[0], + ) + self.assertProtoEquals( + """ + name { + tag: "delta_temp" + } + value: 55.5 + training_step: 2 + wall_time_secs: 10.3333333 + """, + response.session_groups[0].metric_values[1], + ) + self.assertProtoEquals( + """ + name { + tag: "optional_metric" + } + value: 33.0 + training_step: 20 + wall_time_secs: 2.0 + """, + response.session_groups[0].metric_values[2], + ) + def _run_handler(self, request): request_proto = api_pb2.ListSessionGroupsRequest() text_format.Merge(request, request_proto)