Skip to content

Commit

Permalink
Remove surprise files from cli and test new options for showing errors
Browse files Browse the repository at this point in the history
  • Loading branch information
eigerx committed Jul 30, 2024
1 parent 94c8ff2 commit 475ff72
Show file tree
Hide file tree
Showing 3 changed files with 66 additions and 56 deletions.
14 changes: 6 additions & 8 deletions python/lsst/ctrl/mpexec/cli/cmd/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,12 +346,10 @@ def update_graph_run(
@click.option("--full-output-filename", default="", help="Output report as a yaml file with this name.")
@click.option("--logs/--no-logs", default=True, help="Get butler log datasets for extra information.")
@click.option(
"--show-errors",
is_flag=True,
"--brief",
default=False,
help="Pretty-print a dict of errors from failed"
" quanta to the screen. Note: the default is to output a yaml file with error information"
" (data_ids and associated messages) to the current working directory instead.",
is_flag=True,
help="Only show counts in report for brief " "summary (no error information",
)
@click.option(
"--curse-failed-logs",
Expand All @@ -373,7 +371,7 @@ def report(
where: str,
full_output_filename: str = "",
logs: bool = True,
show_errors: bool = False,
brief: bool = False,
curse_failed_logs: bool = False,
force_v2: bool = False,
) -> None:
Expand All @@ -386,8 +384,8 @@ def report(
"""
if force_v2 or len(qgraphs) > 1 or collections is not None:
script.report_v2(
repo, qgraphs, collections, where, full_output_filename, logs, show_errors, curse_failed_logs
repo, qgraphs, collections, where, full_output_filename, logs, brief, curse_failed_logs
)
else:
assert len(qgraphs) == 1, "Cannot make a report without a quantum graph."
script.report(repo, qgraphs[0], full_output_filename, logs, show_errors)
script.report(repo, qgraphs[0], full_output_filename, logs, brief)
39 changes: 7 additions & 32 deletions python/lsst/ctrl/mpexec/cli/script/report.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,9 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pprint
import time
from collections.abc import Sequence
from typing import Any

import yaml
from astropy.table import Table
from lsst.daf.butler import Butler
from lsst.pipe.base import QuantumGraph
Expand All @@ -42,7 +40,7 @@ def report(
qgraph_uri: str,
full_output_filename: str | None,
logs: bool = True,
show_errors: bool = False,
brief: bool = False,
) -> None:
"""Summarize the produced and missing expected dataset in a quantum graph.
Expand All @@ -62,11 +60,9 @@ def report(
command-line instead.
logs : `bool`
Get butler log datasets for extra information (error messages).
show_errors : `bool`
If no output yaml is provided, print error messages to the
command-line along with the report. By default, these messages and
their associated data ids are stored in a yaml file with format
`{run timestamp}_err.yaml` in the working directory instead.
brief : `bool`
List only the counts (or data_ids if number of failures < 5). This
option is good for those who just want to see totals.
"""
butler = Butler.from_config(butler_config, writeable=False)
qgraph = QuantumGraph.loadUri(qgraph_uri)
Expand Down Expand Up @@ -110,16 +106,9 @@ def report(
datasets.add_column(data_products, index=0, name="DatasetType")
quanta.pprint_all()
print("\n")
if show_errors:
if not brief:
pprint.pprint(error_summary)
print("\n")
else:
assert qgraph.metadata is not None, "Saved QGs always have metadata."
collection = qgraph.metadata["output_run"]
collection = str(collection)
run_name = collection.split("/")[-1]
with open(f"{run_name}_err.yaml", "w") as stream:
yaml.safe_dump(error_summary, stream)
datasets.pprint_all()
else:
report.write_summary_yaml(butler, full_output_filename, do_store_logs=logs)
Expand All @@ -132,7 +121,7 @@ def report_v2(
where: str,
full_output_filename: str | None,
logs: bool = True,
show_errors: bool = False,
brief: bool = False,
curse_failed_logs: bool = False,
) -> None:
"""Docstring."""
Expand Down Expand Up @@ -261,24 +250,10 @@ def report_v2(
if cursed_datasets:
print("Cursed Datasets")
curse_table.pprint_all()
if show_errors:
if not brief:
if failed_quanta_table:
print("Failed Quanta")
pprint.pprint(failed_quanta_table)
if unsuccessful_datasets:
print("Unsuccessful Datasets")
pprint.pprint(unsuccessful_datasets)
elif not show_errors:
timestr = time.strftime("%Y%m%d-%H%M%S")
if failed_quanta_table:
with open(f"qpg_failed_quanta_{timestr}.yaml", "w") as stream:
yaml.safe_dump(failed_quanta_table, stream)
if wonky_quanta_table:
with open(f"qpg_wonky_quanta_{timestr}.yaml", "w") as stream:
yaml.safe_dump(wonky_quanta_table, stream)
if unsuccessful_datasets:
with open(f"qpg_unsuccessful_datasets_{timestr}.yaml", "w") as stream:
yaml.safe_dump(unsuccessful_datasets, stream)
if curse_table:
with open(f"qpg_cursed_datasets_{timestr}.yaml", "w") as stream:
yaml.safe_dump(curse_table, stream)
69 changes: 53 additions & 16 deletions tests/test_cliCmdReport.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def test_report(self):

result_hr = self.runner.invoke(
pipetask_cli,
["report", self.root, graph_uri, "--no-logs", "--show-errors"],
["report", self.root, graph_uri, "--no-logs"],
input="no",
)

Expand All @@ -103,31 +103,68 @@ def test_report(self):
self.assertIn("Expected", result_hr.stdout)
self.assertIn("Succeeded", result_hr.stdout)

# Check brief option for pipetask report
result_brief = self.runner.invoke(
pipetask_cli,
["report", self.root, graph_uri, "--no-logs", "--brief"],
input="no",
)
self.assertIsInstance(result_brief.stdout, str)

# Check that task0 and the failed quanta for task0 exist in the string
self.assertIn("task0", result_brief.stdout)
self.assertIn("Failed", result_brief.stdout)
self.assertIn("Expected", result_brief.stdout)
self.assertIn("Succeeded", result_brief.stdout)

# Test cli for the QPG
result_v2_show_err = self.runner.invoke(
result_v2_terminal_out = self.runner.invoke(
pipetask_cli,
["report", self.root, graph_uri, "--no-logs", "--force-v2"],
input="no",
)

# Check that we can read from the command line
self.assertEqual(result_v2_terminal_out.exit_code, 0, clickResultMsg(result_v2_terminal_out))

# Check that we get string output
self.assertIsInstance(result_v2_terminal_out.stdout, str)

# Check that task0 and the quanta for task0 exist in the string
self.assertIn("task0", result_v2_terminal_out.stdout)
self.assertIn("Not Attempted", result_v2_terminal_out.stdout)
self.assertIn("Successful", result_v2_terminal_out.stdout)
self.assertIn("Blocked", result_v2_terminal_out.stdout)
self.assertIn("Failed", result_v2_terminal_out.stdout)
self.assertIn("Wonky", result_v2_terminal_out.stdout)
self.assertIn("TOTAL", result_v2_terminal_out.stdout)
self.assertIn("EXPECTED", result_v2_terminal_out.stdout)

# Check that title from the error summary appears
self.assertIn("Unsuccessful Datasets", result_v2_terminal_out.stdout)

# Test cli for the QPG brief option
result_v2_brief = self.runner.invoke(
pipetask_cli,
["report", self.root, graph_uri, "--no-logs", "--show-errors", "--force-v2"],
["report", self.root, graph_uri, "--no-logs", "--force-v2", "--brief"],
input="no",
)

# Check that we can read from the command line
self.assertEqual(result_v2_show_err.exit_code, 0, clickResultMsg(result_v2_show_err))
self.assertEqual(result_v2_brief.exit_code, 0, clickResultMsg(result_v2_brief))

# Check that we get string output
self.assertIsInstance(result_v2_show_err.stdout, str)
self.assertIsInstance(result_v2_brief.stdout, str)

# Check that task0 and the quanta for task0 exist in the string
self.assertIn("task0", result_v2_show_err.stdout)
self.assertIn("Not Attempted", result_v2_show_err.stdout)
self.assertIn("Successful", result_v2_show_err.stdout)
self.assertIn("Blocked", result_v2_show_err.stdout)
self.assertIn("Failed", result_v2_show_err.stdout)
self.assertIn("Wonky", result_v2_show_err.stdout)
self.assertIn("TOTAL", result_v2_show_err.stdout)
self.assertIn("EXPECTED", result_v2_show_err.stdout)

# Check that title from --show-errors appears
self.assertIn("Unsuccessful Datasets", result_v2_show_err.stdout)
self.assertIn("task0", result_v2_brief.stdout)
self.assertIn("Not Attempted", result_v2_brief.stdout)
self.assertIn("Successful", result_v2_brief.stdout)
self.assertIn("Blocked", result_v2_brief.stdout)
self.assertIn("Failed", result_v2_brief.stdout)
self.assertIn("Wonky", result_v2_brief.stdout)
self.assertIn("TOTAL", result_v2_brief.stdout)
self.assertIn("EXPECTED", result_v2_brief.stdout)

# Check that the full output option works
test_filename_v2 = os.path.join(self.root, "report_test.json")
Expand Down

0 comments on commit 475ff72

Please sign in to comment.