diff --git a/dlt/cli/_dlt.py b/dlt/cli/_dlt.py index 1d5f7ce932..9894227046 100644 --- a/dlt/cli/_dlt.py +++ b/dlt/cli/_dlt.py @@ -498,7 +498,10 @@ def main() -> int: ) pipe_cmd_schema = pipeline_subparsers.add_parser("schema", help="Displays default schema") pipe_cmd_schema.add_argument( - "--format", choices=["json", "yaml"], default="yaml", help="Display schema in this format" + "--format", + choices=["json", "yaml"], + default="yaml", + help="Display schema in this format", ) pipe_cmd_schema.add_argument( "--remove-defaults", action="store_true", help="Does not show default hint values" diff --git a/dlt/cli/pipeline_command.py b/dlt/cli/pipeline_command.py index 91f64763d3..9981fa8493 100644 --- a/dlt/cli/pipeline_command.py +++ b/dlt/cli/pipeline_command.py @@ -263,7 +263,13 @@ def _display_pending_packages() -> Tuple[Sequence[str], Sequence[str]]: fmt.warning("Pipeline does not have a default schema") else: fmt.echo("Found schema with name %s" % fmt.bold(p.default_schema_name)) - schema_str = p.default_schema.to_pretty_yaml(remove_defaults=True) + format_ = command_kwargs.get("format") + remove_defaults_ = command_kwargs.get("remove_defaults") + s = p.default_schema + if format_ == "json": + schema_str = json.dumps(s.to_dict(remove_defaults=remove_defaults_), pretty=True) + else: + schema_str = s.to_pretty_yaml(remove_defaults=remove_defaults_) fmt.echo(schema_str) if operation == "drop": diff --git a/docs/website/docs/intro-snippets.py b/docs/website/docs/intro-snippets.py index 340a6ff262..f270dcee6e 100644 --- a/docs/website/docs/intro-snippets.py +++ b/docs/website/docs/intro-snippets.py @@ -18,14 +18,13 @@ def intro_snippet() -> None: response.raise_for_status() data.append(response.json()) # Extract, normalize, and load the data - load_info = pipeline.run(data, table_name='player') + load_info = pipeline.run(data, table_name="player") # @@@DLT_SNIPPET_END api assert_load_info(load_info) def csv_snippet() -> None: - # @@@DLT_SNIPPET_START csv import dlt import pandas as pd @@ -50,8 +49,8 @@ def csv_snippet() -> None: assert_load_info(load_info) -def db_snippet() -> None: +def db_snippet() -> None: # @@@DLT_SNIPPET_START db import dlt from sqlalchemy import create_engine @@ -74,13 +73,9 @@ def db_snippet() -> None: ) # Convert the rows into dictionaries on the fly with a map function - load_info = pipeline.run( - map(lambda row: dict(row._mapping), rows), - table_name="genome" - ) + load_info = pipeline.run(map(lambda row: dict(row._mapping), rows), table_name="genome") print(load_info) # @@@DLT_SNIPPET_END db assert_load_info(load_info) -