diff --git a/api/delta_table/delta_table_alterer/index.html b/api/delta_table/delta_table_alterer/index.html index 891c3a7220..b6d14729a1 100644 --- a/api/delta_table/delta_table_alterer/index.html +++ b/api/delta_table/delta_table_alterer/index.html @@ -1412,7 +1412,7 @@
add_constraint(constraints: Dict[str, str]) -> None
+add_constraint(constraints: Dict[str, str], custom_metadata: Optional[Dict[str, str]] = None) -> None
@@ -1446,22 +1446,36 @@
required
+
+ custom_metadata
+
+ Optional[Dict[str, str]]
+
+
+
+ custom metadata that will be added to the transaction commit.
+
+
+
+ None
+
+
-
-
- Example
- from deltalake import DeltaTable
+ Example:
+
from deltalake import DeltaTable
dt = DeltaTable("test_table_constraints")
dt.alter.add_constraint({
"value_gt_5": "value > 5",
})
-
-Check configuration
-
-
+**Check configuration**
+```
+dt.metadata().configuration
+{'delta.constraints.value_gt_5': 'value > 5'}
+```
+
+
diff --git a/api/delta_table/delta_table_merger/index.html b/api/delta_table/delta_table_merger/index.html
index 65fcbf541f..f75e53272a 100644
--- a/api/delta_table/delta_table_merger/index.html
+++ b/api/delta_table/delta_table_merger/index.html
@@ -1492,7 +1492,7 @@
-TableMerger(table: DeltaTable, source: pyarrow.RecordBatchReader, predicate: str, source_alias: Optional[str] = None, target_alias: Optional[str] = None, safe_cast: bool = True, writer_properties: Optional[WriterProperties] = None)
+TableMerger(table: DeltaTable, source: pyarrow.RecordBatchReader, predicate: str, source_alias: Optional[str] = None, target_alias: Optional[str] = None, safe_cast: bool = True, writer_properties: Optional[WriterProperties] = None, custom_metadata: Optional[Dict[str, str]] = None)
diff --git a/api/delta_table/delta_table_optimizer/index.html b/api/delta_table/delta_table_optimizer/index.html
index c8295d8e51..5db384062c 100644
--- a/api/delta_table/delta_table_optimizer/index.html
+++ b/api/delta_table/delta_table_optimizer/index.html
@@ -1426,7 +1426,7 @@
-compact(partition_filters: Optional[FilterType] = None, target_size: Optional[int] = None, max_concurrent_tasks: Optional[int] = None, min_commit_interval: Optional[Union[int, timedelta]] = None, writer_properties: Optional[WriterProperties] = None) -> Dict[str, Any]
+compact(partition_filters: Optional[FilterType] = None, target_size: Optional[int] = None, max_concurrent_tasks: Optional[int] = None, min_commit_interval: Optional[Union[int, timedelta]] = None, writer_properties: Optional[WriterProperties] = None, custom_metadata: Optional[Dict[str, str]] = None) -> Dict[str, Any]
@@ -1526,6 +1526,20 @@
None
+
+ custom_metadata
+
+ Optional[Dict[str, str]]
+
+
+
+ custom metadata that will be added to the transaction commit.
+
+
+
+ None
+
+
@@ -1584,7 +1598,7 @@
-z_order(columns: Iterable[str], partition_filters: Optional[FilterType] = None, target_size: Optional[int] = None, max_concurrent_tasks: Optional[int] = None, max_spill_size: int = 20 * 1024 * 1024 * 1024, min_commit_interval: Optional[Union[int, timedelta]] = None, writer_properties: Optional[WriterProperties] = None) -> Dict[str, Any]
+z_order(columns: Iterable[str], partition_filters: Optional[FilterType] = None, target_size: Optional[int] = None, max_concurrent_tasks: Optional[int] = None, max_spill_size: int = 20 * 1024 * 1024 * 1024, min_commit_interval: Optional[Union[int, timedelta]] = None, writer_properties: Optional[WriterProperties] = None, custom_metadata: Optional[Dict[str, str]] = None) -> Dict[str, Any]
@@ -1696,6 +1710,20 @@
None
+
+ custom_metadata
+
+ Optional[Dict[str, str]]
+
+
+
+ custom metadata that will be added to the transaction commit.
+
+
+
+ None
+
+
diff --git a/api/delta_table/index.html b/api/delta_table/index.html
index 4ccdf0deb3..8c1ed30335 100644
--- a/api/delta_table/index.html
+++ b/api/delta_table/index.html
@@ -1750,7 +1750,7 @@
-create(table_uri: Union[str, Path], schema: Union[pyarrow.Schema, DeltaSchema], mode: Literal['error', 'append', 'overwrite', 'ignore'] = 'error', partition_by: Optional[Union[List[str], str]] = None, name: Optional[str] = None, description: Optional[str] = None, configuration: Optional[Mapping[str, Optional[str]]] = None, storage_options: Optional[Dict[str, str]] = None) -> DeltaTable
+create(table_uri: Union[str, Path], schema: Union[pyarrow.Schema, DeltaSchema], mode: Literal['error', 'append', 'overwrite', 'ignore'] = 'error', partition_by: Optional[Union[List[str], str]] = None, name: Optional[str] = None, description: Optional[str] = None, configuration: Optional[Mapping[str, Optional[str]]] = None, storage_options: Optional[Dict[str, str]] = None, custom_metadata: Optional[Dict[str, str]] = None) -> DeltaTable
@@ -1885,6 +1885,20 @@
None
+
+ custom_metadata
+
+ Optional[Dict[str, str]]
+
+
+
+ custom metadata that will be added to the transaction commit.
+
+
+
+ None
+
+
@@ -1943,7 +1957,7 @@
-delete(predicate: Optional[str] = None, writer_properties: Optional[WriterProperties] = None) -> Dict[str, Any]
+delete(predicate: Optional[str] = None, writer_properties: Optional[WriterProperties] = None, custom_metadata: Optional[Dict[str, str]] = None) -> Dict[str, Any]
@@ -1981,6 +1995,34 @@
None
+
+ writer_properties
+
+ Optional[WriterProperties]
+
+
+
+ Pass writer properties to the Rust parquet writer.
+
+
+
+ None
+
+
+
+ custom_metadata
+
+ Optional[Dict[str, str]]
+
+
+
+ custom metadata that will be added to the transaction commit.
+
+
+
+ None
+
+
@@ -2711,7 +2753,7 @@
-merge(source: Union[pyarrow.Table, pyarrow.RecordBatch, pyarrow.RecordBatchReader, ds.Dataset, pandas.DataFrame], predicate: str, source_alias: Optional[str] = None, target_alias: Optional[str] = None, error_on_type_mismatch: bool = True, writer_properties: Optional[WriterProperties] = None, large_dtypes: bool = True) -> TableMerger
+merge(source: Union[pyarrow.Table, pyarrow.RecordBatch, pyarrow.RecordBatchReader, ds.Dataset, pandas.DataFrame], predicate: str, source_alias: Optional[str] = None, target_alias: Optional[str] = None, error_on_type_mismatch: bool = True, writer_properties: Optional[WriterProperties] = None, large_dtypes: bool = True, custom_metadata: Optional[Dict[str, str]] = None) -> TableMerger
@@ -2831,6 +2873,20 @@
True
+
+ custom_metadata
+
+ Optional[Dict[str, str]]
+
+
+
+ custom metadata that will be added to the transaction commit.
+
+
+
+ None
+
+
@@ -2965,7 +3021,7 @@
-repair(dry_run: bool = False) -> Dict[str, Any]
+
@@ -3003,6 +3059,20 @@
False
+
+ custom_metadata
+
+ Optional[Dict[str, str]]
+
+
+
+ custom metadata that will be added to the transaction commit.
+
+
+
+ None
+
+
Returns:
@@ -3033,7 +3103,7 @@
-restore(target: Union[int, datetime, str], *, ignore_missing_files: bool = False, protocol_downgrade_allowed: bool = False) -> Dict[str, Any]
+restore(target: Union[int, datetime, str], *, ignore_missing_files: bool = False, protocol_downgrade_allowed: bool = False, custom_metadata: Optional[Dict[str, str]] = None) -> Dict[str, Any]
@@ -3095,6 +3165,20 @@
False
+
+ custom_metadata
+
+ Optional[Dict[str, str]]
+
+
+
+ custom metadata that will be added to the transaction commit.
+
+
+
+ None
+
+
@@ -3476,7 +3560,7 @@
-update(updates: Optional[Dict[str, str]] = None, new_values: Optional[Dict[str, Union[int, float, str, datetime, bool, List[Any]]]] = None, predicate: Optional[str] = None, writer_properties: Optional[WriterProperties] = None, error_on_type_mismatch: bool = True) -> Dict[str, Any]
+update(updates: Optional[Dict[str, str]] = None, new_values: Optional[Dict[str, Union[int, float, str, datetime, bool, List[Any]]]] = None, predicate: Optional[str] = None, writer_properties: Optional[WriterProperties] = None, error_on_type_mismatch: bool = True, custom_metadata: Optional[Dict[str, str]] = None) -> Dict[str, Any]
@@ -3567,32 +3651,24 @@
True
-
-
-
-
-
-
Returns:
-
-
-
- Type
- Description
-
-
-
+ custom_metadata
- Dict[str, Any]
+ Optional[Dict[str, str]]
- the metrics from update
+ custom metadata that will be added to the transaction commit.
+
+ None
+
+ Returns:
+ the metrics from update
Example
@@ -3663,7 +3739,7 @@
-vacuum(retention_hours: Optional[int] = None, dry_run: bool = True, enforce_retention_duration: bool = True) -> List[str]
+vacuum(retention_hours: Optional[int] = None, dry_run: bool = True, enforce_retention_duration: bool = True, custom_metadata: Optional[Dict[str, str]] = None) -> List[str]
@@ -3725,32 +3801,24 @@
True
-
-
-
-
-
-
Returns:
-
-
-
- Type
- Description
-
-
-
+ custom_metadata
- List[str]
+ Optional[Dict[str, str]]
- the list of files no longer referenced by the Delta Table and are older than the retention threshold.
+ custom metadata that will be added to the transaction commit.
+
+ None
+
+ Returns:
+ the list of files no longer referenced by the Delta Table and are older than the retention threshold.
diff --git a/api/delta_writer/index.html b/api/delta_writer/index.html
index db19dcfdb9..7c1db6517e 100644
--- a/api/delta_writer/index.html
+++ b/api/delta_writer/index.html
@@ -1828,7 +1828,7 @@
-WriterProperties(data_page_size_limit: Optional[int] = None, dictionary_page_size_limit: Optional[int] = None, data_page_row_count_limit: Optional[int] = None, write_batch_size: Optional[int] = None, max_row_group_size: Optional[int] = None, compression: Optional[str] = None, compression_level: Optional[int] = None)
+WriterProperties(data_page_size_limit: Optional[int] = None, dictionary_page_size_limit: Optional[int] = None, data_page_row_count_limit: Optional[int] = None, write_batch_size: Optional[int] = None, max_row_group_size: Optional[int] = None, compression: Optional[Literal['UNCOMPRESSED', 'SNAPPY', 'GZIP', 'BROTLI', 'LZ4', 'ZSTD', 'LZ4_RAW']] = None, compression_level: Optional[int] = None)
@@ -1836,8 +1836,7 @@
A Writer Properties instance for the Rust parquet writer.
- Create a Writer Properties instance for the Rust parquet writer,
-see options https://arrow.apache.org/rust/parquet/file/properties/struct.WriterProperties.html:
+ Create a Writer Properties instance for the Rust parquet writer:
@@ -1925,11 +1924,11 @@
compression
- Optional[str]
+ Optional[Literal['UNCOMPRESSED', 'SNAPPY', 'GZIP', 'BROTLI', 'LZ4', 'ZSTD', 'LZ4_RAW']]
- compression type
+ compression type.
@@ -1943,7 +1942,10 @@
- level of compression, only relevant for subset of compression types
+ If none and compression has a level, the default level will be used, only relevant for
+GZIP: levels (1-9),
+BROTLI: levels (1-11),
+ZSTD: levels (1-22),
diff --git a/search/search_index.json b/search/search_index.json
index 1ffacffdad..b7fe073616 100644
--- a/search/search_index.json
+++ b/search/search_index.json
@@ -1 +1 @@
-{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"The deltalake package","text":"This is the documentation for the native Rust/Python implementation of Delta Lake. It is based on the delta-rs Rust library and requires no Spark or JVM dependencies. For the PySpark implementation, see delta-spark instead.
This module provides the capability to read, write, and manage Delta Lake tables with Python or Rust without Spark or Java. It uses Apache Arrow under the hood, so is compatible with other Arrow-native or integrated libraries such as pandas, DuckDB, and Polars.
"},{"location":"#important-terminology","title":"Important terminology","text":" - \"Rust deltalake\" refers to the Rust API of delta-rs (no Spark dependency)
- \"Python deltalake\" refers to the Python API of delta-rs (no Spark dependency)
- \"Delta Spark\" refers to the Scala impementation of the Delta Lake transaction log protocol. This depends on Spark and Java.
"},{"location":"#why-implement-the-delta-lake-transaction-log-protocol-in-rust-and-scala","title":"Why implement the Delta Lake transaction log protocol in Rust and Scala?","text":"Delta Spark depends on Java and Spark, which is fine for many use cases, but not all Delta Lake users want to depend on these libraries. delta-rs allows using Delta Lake in Rust or other native projects when using a JVM is often not an option.
Python deltalake lets you query Delta tables without depending on Java/Scala.
Suppose you want to query a Delta table with pandas on your local machine. Python deltalake makes it easy to query the table with a simple pip install
command - no need to install Java.
"},{"location":"#contributing","title":"Contributing","text":"The Delta Lake community welcomes contributors from all developers, regardless of your experience or programming background.
You can write Rust code, Python code, documentation, submit bugs, or give talks to the community. We welcome all of these contributions.
Feel free to join our Slack and message us in the #delta-rs channel any time!
We value kind communication and building a productive, friendly environment for maximum collaboration and fun.
"},{"location":"#project-history","title":"Project history","text":"Check out this video by Denny Lee & QP Hou to learn about the genesis of the delta-rs project:
"},{"location":"api/catalog/","title":"Catalog","text":"","boost":2},{"location":"api/catalog/#deltalake.data_catalog.DataCatalog","title":"deltalake.data_catalog.DataCatalog","text":" Bases: Enum
List of the Data Catalogs
","boost":2},{"location":"api/catalog/#deltalake.data_catalog.DataCatalog.AWS","title":"AWS class-attribute
instance-attribute
","text":"AWS = 'glue'\n
Refers to the AWS Glue Data Catalog <https://docs.aws.amazon.com/glue/latest/dg/catalog-and-crawler.html>
_
","boost":2},{"location":"api/catalog/#deltalake.data_catalog.DataCatalog.UNITY","title":"UNITY class-attribute
instance-attribute
","text":"UNITY = 'unity'\n
Refers to the Databricks Unity Catalog <https://docs.databricks.com/data-governance/unity-catalog/index.html>
_
","boost":2},{"location":"api/delta_writer/","title":"Writer","text":"","boost":10},{"location":"api/delta_writer/#write-to-delta-tables","title":"Write to Delta Tables","text":"","boost":10},{"location":"api/delta_writer/#deltalake.write_deltalake","title":"deltalake.write_deltalake","text":"write_deltalake(table_or_uri: Union[str, Path, DeltaTable], data: Union[pd.DataFrame, ds.Dataset, pa.Table, pa.RecordBatch, Iterable[pa.RecordBatch], RecordBatchReader], *, schema: Optional[Union[pa.Schema, DeltaSchema]] = None, partition_by: Optional[Union[List[str], str]] = None, filesystem: Optional[pa_fs.FileSystem] = None, mode: Literal['error', 'append', 'overwrite', 'ignore'] = 'error', file_options: Optional[ds.ParquetFileWriteOptions] = None, max_partitions: Optional[int] = None, max_open_files: int = 1024, max_rows_per_file: int = 10 * 1024 * 1024, min_rows_per_group: int = 64 * 1024, max_rows_per_group: int = 128 * 1024, name: Optional[str] = None, description: Optional[str] = None, configuration: Optional[Mapping[str, Optional[str]]] = None, overwrite_schema: bool = False, storage_options: Optional[Dict[str, str]] = None, partition_filters: Optional[List[Tuple[str, str, Any]]] = None, predicate: Optional[str] = None, large_dtypes: bool = False, engine: Literal['pyarrow', 'rust'] = 'pyarrow', writer_properties: Optional[WriterProperties] = None, custom_metadata: Optional[Dict[str, str]] = None) -> None\n
Write to a Delta Lake table
If the table does not already exist, it will be created.
This function only supports writer protocol version 2 currently. When attempting to write to an existing table with a higher min_writer_version, this function will throw DeltaProtocolError.
Note that this function does NOT register this table in a data catalog.
A locking mechanism is needed to prevent unsafe concurrent writes to a delta lake directory when writing to S3. DynamoDB is the only available locking provider at the moment in delta-rs. To enable DynamoDB as the locking provider, you need to set the AWS_S3_LOCKING_PROVIDER
to 'dynamodb' as a storage_option or as an environment variable.
Additionally, you must create a DynamoDB table with the name 'delta_rs_lock_table' so that it can be automatically discovered by delta-rs. Alternatively, you can use a table name of your choice, but you must set the DYNAMO_LOCK_TABLE_NAME
variable to match your chosen table name. The required schema for the DynamoDB table is as follows:
- Key Schema: AttributeName=key, KeyType=HASH
- Attribute Definitions: AttributeName=key, AttributeType=S
Please note that this locking mechanism is not compatible with any other locking mechanisms, including the one used by Spark.
Parameters:
Name Type Description Default table_or_uri
Union[str, Path, DeltaTable]
URI of a table or a DeltaTable object.
required data
Union[DataFrame, Dataset, Table, RecordBatch, Iterable[RecordBatch], RecordBatchReader]
Data to write. If passing iterable, the schema must also be given.
required schema
Optional[Union[Schema, Schema]]
Optional schema to write.
None
partition_by
Optional[Union[List[str], str]]
List of columns to partition the table by. Only required when creating a new table.
None
filesystem
Optional[FileSystem]
Optional filesystem to pass to PyArrow. If not provided will be inferred from uri. The file system has to be rooted in the table root. Use the pyarrow.fs.SubTreeFileSystem, to adopt the root of pyarrow file systems.
None
mode
Literal['error', 'append', 'overwrite', 'ignore']
How to handle existing data. Default is to error if table already exists. If 'append', will add new data. If 'overwrite', will replace table with new data. If 'ignore', will not write anything if table already exists.
'error'
file_options
Optional[ParquetFileWriteOptions]
Optional write options for Parquet (ParquetFileWriteOptions). Can be provided with defaults using ParquetFileWriteOptions().make_write_options(). Please refer to https://github.com/apache/arrow/blob/master/python/pyarrow/_dataset_parquet.pyx#L492-L533 for the list of available options. Only used in pyarrow engine.
None
max_partitions
Optional[int]
the maximum number of partitions that will be used. Only used in pyarrow engine.
None
max_open_files
int
Limits the maximum number of files that can be left open while writing. If an attempt is made to open too many files then the least recently used file will be closed. If this setting is set too low you may end up fragmenting your data into many small files. Only used in pyarrow engine.
1024
max_rows_per_file
int
Maximum number of rows per file. If greater than 0 then this will limit how many rows are placed in any single file. Otherwise there will be no limit and one file will be created in each output directory unless files need to be closed to respect max_open_files min_rows_per_group: Minimum number of rows per group. When the value is set, the dataset writer will batch incoming data and only write the row groups to the disk when sufficient rows have accumulated. Only used in pyarrow engine.
10 * 1024 * 1024
max_rows_per_group
int
Maximum number of rows per group. If the value is set, then the dataset writer may split up large incoming batches into multiple row groups. If this value is set, then min_rows_per_group should also be set.
128 * 1024
name
Optional[str]
User-provided identifier for this table.
None
description
Optional[str]
User-provided description for this table.
None
configuration
Optional[Mapping[str, Optional[str]]]
A map containing configuration options for the metadata action.
None
overwrite_schema
bool
If True, allows updating the schema of the table.
False
storage_options
Optional[Dict[str, str]]
options passed to the native delta filesystem. Unused if 'filesystem' is defined.
None
predicate
Optional[str]
When using Overwrite
mode, replace data that matches a predicate. Only used in rust engine.
None
partition_filters
Optional[List[Tuple[str, str, Any]]]
the partition filters that will be used for partition overwrite. Only used in pyarrow engine.
None
large_dtypes
bool
If True, the data schema is kept in large_dtypes, has no effect on pandas dataframe input.
False
engine
Literal['pyarrow', 'rust']
writer engine to write the delta table. Rust
engine is still experimental but you may see up to 4x performance improvements over pyarrow.
'pyarrow'
writer_properties
Optional[WriterProperties]
Pass writer properties to the Rust parquet writer.
None
custom_metadata
Optional[Dict[str, str]]
Custom metadata to add to the commitInfo.
None
","boost":10},{"location":"api/delta_writer/#deltalake.WriterProperties","title":"deltalake.WriterProperties dataclass
","text":"WriterProperties(data_page_size_limit: Optional[int] = None, dictionary_page_size_limit: Optional[int] = None, data_page_row_count_limit: Optional[int] = None, write_batch_size: Optional[int] = None, max_row_group_size: Optional[int] = None, compression: Optional[str] = None, compression_level: Optional[int] = None)\n
A Writer Properties instance for the Rust parquet writer.
Create a Writer Properties instance for the Rust parquet writer, see options https://arrow.apache.org/rust/parquet/file/properties/struct.WriterProperties.html:
Parameters:
Name Type Description Default data_page_size_limit
Optional[int]
Limit DataPage size to this in bytes.
None
dictionary_page_size_limit
Optional[int]
Limit the size of each DataPage to store dicts to this amount in bytes.
None
data_page_row_count_limit
Optional[int]
Limit the number of rows in each DataPage.
None
write_batch_size
Optional[int]
Splits internally to smaller batch size.
None
max_row_group_size
Optional[int]
Max number of rows in row group.
None
compression
Optional[str]
compression type
None
compression_level
Optional[int]
level of compression, only relevant for subset of compression types
None
","boost":10},{"location":"api/delta_writer/#convert-to-delta-tables","title":"Convert to Delta Tables","text":"","boost":10},{"location":"api/delta_writer/#deltalake.convert_to_deltalake","title":"deltalake.convert_to_deltalake","text":"convert_to_deltalake(uri: Union[str, Path], mode: Literal['error', 'ignore'] = 'error', partition_by: Optional[pa.Schema] = None, partition_strategy: Optional[Literal['hive']] = None, name: Optional[str] = None, description: Optional[str] = None, configuration: Optional[Mapping[str, Optional[str]]] = None, storage_options: Optional[Dict[str, str]] = None, custom_metadata: Optional[Dict[str, str]] = None) -> None\n
Convert
parquet tables to delta
tables.
Currently only HIVE partitioned tables are supported. Convert to delta
creates a transaction log commit with add actions, and additional properties provided such as configuration, name, and description.
Parameters:
Name Type Description Default uri
Union[str, Path]
URI of a table.
required partition_by
Optional[Schema]
Optional partitioning schema if table is partitioned.
None
partition_strategy
Optional[Literal['hive']]
Optional partition strategy to read and convert
None
mode
Literal['error', 'ignore']
How to handle existing data. Default is to error if table already exists. If 'ignore', will not convert anything if table already exists.
'error'
name
Optional[str]
User-provided identifier for this table.
None
description
Optional[str]
User-provided description for this table.
None
configuration
Optional[Mapping[str, Optional[str]]]
A map containing configuration options for the metadata action.
None
storage_options
Optional[Dict[str, str]]
options passed to the native delta filesystem. Unused if 'filesystem' is defined.
None
custom_metadata
Optional[Dict[str, str]]
custom metadata that will be added to the transaction commit
None
","boost":10},{"location":"api/exceptions/","title":"Exceptions","text":"","boost":2},{"location":"api/exceptions/#deltalake.exceptions.DeltaError","title":"deltalake.exceptions.DeltaError","text":" Bases: builtins.Exception
The base class for Delta-specific errors.
","boost":2},{"location":"api/exceptions/#deltalake.exceptions.DeltaProtocolError","title":"deltalake.exceptions.DeltaProtocolError","text":" Bases: _internal.DeltaError
Raised when a violation with the Delta protocol specs ocurred.
","boost":2},{"location":"api/exceptions/#deltalake.exceptions.TableNotFoundError","title":"deltalake.exceptions.TableNotFoundError","text":" Bases: _internal.DeltaError
Raised when a Delta table cannot be loaded from a location.
","boost":2},{"location":"api/exceptions/#deltalake.exceptions.CommitFailedError","title":"deltalake.exceptions.CommitFailedError","text":" Bases: _internal.DeltaError
Raised when a commit to a Delta table fails.
","boost":2},{"location":"api/schema/","title":"Schema","text":"","boost":2},{"location":"api/schema/#schema-and-field","title":"Schema and field","text":"Schemas, fields, and data types are provided in the deltalake.schema
submodule.
","boost":2},{"location":"api/schema/#deltalake.Schema","title":"deltalake.Schema","text":"Schema(fields: List[Field])\n
Bases: deltalake._internal.StructType
A Delta Lake schema
Create using a list of :class:Field
:
Schema([Field(\"x\", \"integer\"), Field(\"y\", \"string\")]) Schema([Field(x, PrimitiveType(\"integer\"), nullable=True), Field(y, PrimitiveType(\"string\"), nullable=True)])
Or create from a PyArrow schema:
import pyarrow as pa Schema.from_pyarrow(pa.schema({\"x\": pa.int32(), \"y\": pa.string()})) Schema([Field(x, PrimitiveType(\"integer\"), nullable=True), Field(y, PrimitiveType(\"string\"), nullable=True)])
","boost":2},{"location":"api/schema/#deltalake.Schema.invariants","title":"invariants","text":"invariants: List[Tuple[str, str]] = <attribute 'invariants' of 'deltalake._internal.Schema' objects>\n
","boost":2},{"location":"api/schema/#deltalake.Schema.from_json","title":"from_json staticmethod
","text":"from_json(schema_json) -> Schema\n
Create a new Schema from a JSON string.
Parameters:
Name Type Description Default json
str
a JSON string
required Example A schema has the same JSON format as a StructType.
Schema.from_json('''{\n \"type\": \"struct\",\n \"fields\": [{\"name\": \"x\", \"type\": \"integer\", \"nullable\": true, \"metadata\": {}}]\n }\n)'''\n# Returns Schema([Field(x, PrimitiveType(\"integer\"), nullable=True)])\n
","boost":2},{"location":"api/schema/#deltalake.Schema.from_pyarrow","title":"from_pyarrow staticmethod
","text":"from_pyarrow(data_type) -> Schema\n
Create a Schema from a PyArrow Schema type
Will raise TypeError
if the PyArrow type is not a primitive type.
Parameters:
Name Type Description Default type
Schema
A PyArrow Schema
required Returns:
Type Description Schema
a Schema
","boost":2},{"location":"api/schema/#deltalake.Schema.to_json","title":"to_json method descriptor
","text":"to_json() -> str\n
Get the JSON string representation of the Schema.
Returns:
Type Description str
a JSON string
Example A schema has the same JSON format as a StructType.
Schema([Field(\"x\", \"integer\")]).to_json()\n# Returns '{\"type\":\"struct\",\"fields\":[{\"name\":\"x\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}'\n
","boost":2},{"location":"api/schema/#deltalake.Schema.to_pyarrow","title":"to_pyarrow method descriptor
","text":"to_pyarrow(as_large_types: bool = False) -> pyarrow.Schema\n
Return equivalent PyArrow schema
Parameters:
Name Type Description Default as_large_types
bool
get schema with all variable size types (list, binary, string) as large variants (with int64 indices). This is for compatibility with systems like Polars that only support the large versions of Arrow types.
False
Returns:
Type Description Schema
a PyArrow Schema
","boost":2},{"location":"api/schema/#deltalake.Field","title":"deltalake.Field","text":"Field(name: str, type: DataType, *, nullable: bool = True, metadata: Optional[Dict[str, Any]] = None)\n
","boost":2},{"location":"api/schema/#deltalake.Field.metadata","title":"metadata","text":"metadata: Dict[str, Any] = <attribute 'metadata' of 'deltalake._internal.Field' objects>\n
","boost":2},{"location":"api/schema/#deltalake.Field.name","title":"name","text":"name: str = <attribute 'name' of 'deltalake._internal.Field' objects>\n
","boost":2},{"location":"api/schema/#deltalake.Field.nullable","title":"nullable","text":"nullable: bool = <attribute 'nullable' of 'deltalake._internal.Field' objects>\n
","boost":2},{"location":"api/schema/#deltalake.Field.type","title":"type","text":"type: DataType = <attribute 'type' of 'deltalake._internal.Field' objects>\n
","boost":2},{"location":"api/schema/#deltalake.Field.from_json","title":"from_json staticmethod
","text":"from_json(field_json) -> Field\n
Create a Field from a JSON string.
Parameters:
Name Type Description Default json
str
the JSON string.
required Returns:
Type Description Field
Field
Example Field.from_json('''{\n \"name\": \"col\",\n \"type\": \"integer\",\n \"nullable\": true,\n \"metadata\": {}\n }'''\n)\n# Returns Field(col, PrimitiveType(\"integer\"), nullable=True)\n
","boost":2},{"location":"api/schema/#deltalake.Field.from_pyarrow","title":"from_pyarrow staticmethod
","text":"from_pyarrow(field: pyarrow.Field) -> Field\n
Create a Field from a PyArrow field Note: This currently doesn't preserve field metadata.
Parameters:
Name Type Description Default field
Field
a PyArrow Field
required Returns:
Type Description Field
a Field
","boost":2},{"location":"api/schema/#deltalake.Field.to_json","title":"to_json method descriptor
","text":"to_json() -> str\n
Get the field as JSON string.
Returns:
Type Description str
a JSON string
Example Field(\"col\", \"integer\").to_json()\n# Returns '{\"name\":\"col\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}'\n
","boost":2},{"location":"api/schema/#deltalake.Field.to_pyarrow","title":"to_pyarrow method descriptor
","text":"to_pyarrow() -> pyarrow.Field\n
Convert to an equivalent PyArrow field Note: This currently doesn't preserve field metadata.
Returns:
Type Description Field
a pyarrow Field
","boost":2},{"location":"api/schema/#data-types","title":"Data types","text":"","boost":2},{"location":"api/schema/#deltalake.schema.PrimitiveType","title":"deltalake.schema.PrimitiveType","text":"PrimitiveType(data_type: str)\n
","boost":2},{"location":"api/schema/#deltalake.schema.PrimitiveType.type","title":"type","text":"type: str = <attribute 'type' of 'deltalake._internal.PrimitiveType' objects>\n
","boost":2},{"location":"api/schema/#deltalake.schema.PrimitiveType.from_json","title":"from_json staticmethod
","text":"from_json(type_json) -> PrimitiveType\n
Create a PrimitiveType from a JSON string
The JSON representation for a primitive type is just a quoted string: PrimitiveType.from_json('\"integer\"')
Parameters:
Name Type Description Default json
str
a JSON string
required Returns:
Type Description PrimitiveType
a PrimitiveType type
","boost":2},{"location":"api/schema/#deltalake.schema.PrimitiveType.from_pyarrow","title":"from_pyarrow staticmethod
","text":"from_pyarrow(data_type) -> PrimitiveType\n
Create a PrimitiveType from a PyArrow datatype
Will raise TypeError
if the PyArrow type is not a primitive type.
Parameters:
Name Type Description Default type
DataType
A PyArrow DataType
required Returns:
Type Description PrimitiveType
a PrimitiveType
","boost":2},{"location":"api/schema/#deltalake.schema.PrimitiveType.to_pyarrow","title":"to_pyarrow method descriptor
","text":"to_pyarrow() -> pyarrow.DataType\n
Get the equivalent PyArrow type (pyarrow.DataType)
","boost":2},{"location":"api/schema/#deltalake.schema.ArrayType","title":"deltalake.schema.ArrayType","text":"ArrayType(element_type: DataType, *, contains_null: bool = True)\n
","boost":2},{"location":"api/schema/#deltalake.schema.ArrayType.contains_null","title":"contains_null","text":"contains_null: bool = <attribute 'contains_null' of 'deltalake._internal.ArrayType' objects>\n
","boost":2},{"location":"api/schema/#deltalake.schema.ArrayType.element_type","title":"element_type","text":"element_type: DataType = <attribute 'element_type' of 'deltalake._internal.ArrayType' objects>\n
","boost":2},{"location":"api/schema/#deltalake.schema.ArrayType.type","title":"type","text":"type: Literal['array'] = <attribute 'type' of 'deltalake._internal.ArrayType' objects>\n
","boost":2},{"location":"api/schema/#deltalake.schema.ArrayType.from_json","title":"from_json staticmethod
","text":"from_json(type_json) -> ArrayType\n
Create an ArrayType from a JSON string
Parameters:
Name Type Description Default json
str
a JSON string
required Returns:
Type Description ArrayType
an ArrayType
Example The JSON representation for an array type is an object with type
(set to \"array\"
), elementType
, and containsNull
.
ArrayType.from_json(\n '''{\n \"type\": \"array\",\n \"elementType\": \"integer\",\n \"containsNull\": false\n }'''\n)\n# Returns ArrayType(PrimitiveType(\"integer\"), contains_null=False)\n
","boost":2},{"location":"api/schema/#deltalake.schema.ArrayType.from_pyarrow","title":"from_pyarrow staticmethod
","text":"from_pyarrow(data_type) -> ArrayType\n
Create an ArrayType from a pyarrow.ListType.
Will raise TypeError
if a different PyArrow DataType is provided.
Parameters:
Name Type Description Default type
ListType
The PyArrow ListType
required Returns:
Type Description ArrayType
an ArrayType
","boost":2},{"location":"api/schema/#deltalake.schema.ArrayType.to_json","title":"to_json method descriptor
","text":"to_json() -> str\n
Get the JSON string representation of the type.
","boost":2},{"location":"api/schema/#deltalake.schema.ArrayType.to_pyarrow","title":"to_pyarrow method descriptor
","text":"to_pyarrow() -> pyarrow.ListType\n
Get the equivalent PyArrow type.
","boost":2},{"location":"api/schema/#deltalake.schema.MapType","title":"deltalake.schema.MapType","text":"MapType(key_type: DataType, value_type: DataType, *, value_contains_null: bool = True)\n
","boost":2},{"location":"api/schema/#deltalake.schema.MapType.key_type","title":"key_type","text":"key_type: DataType = <attribute 'key_type' of 'deltalake._internal.MapType' objects>\n
","boost":2},{"location":"api/schema/#deltalake.schema.MapType.type","title":"type","text":"type: Literal['map'] = <attribute 'type' of 'deltalake._internal.MapType' objects>\n
","boost":2},{"location":"api/schema/#deltalake.schema.MapType.value_contains_null","title":"value_contains_null","text":"value_contains_null: bool = <attribute 'value_contains_null' of 'deltalake._internal.MapType' objects>\n
","boost":2},{"location":"api/schema/#deltalake.schema.MapType.value_type","title":"value_type","text":"value_type: DataType = <attribute 'value_type' of 'deltalake._internal.MapType' objects>\n
","boost":2},{"location":"api/schema/#deltalake.schema.MapType.from_json","title":"from_json staticmethod
","text":"from_json(type_json) -> MapType\n
Create a MapType from a JSON string
Parameters:
Name Type Description Default json
str
a JSON string
required Returns:
Type Description MapType
an ArrayType
Example The JSON representation for a map type is an object with type
(set to map
), keyType
, valueType
, and valueContainsNull
:
MapType.from_json(\n '''{\n \"type\": \"map\",\n \"keyType\": \"integer\",\n \"valueType\": \"string\",\n \"valueContainsNull\": true\n }'''\n)\n# Returns MapType(PrimitiveType(\"integer\"), PrimitiveType(\"string\"), value_contains_null=True)\n
","boost":2},{"location":"api/schema/#deltalake.schema.MapType.from_pyarrow","title":"from_pyarrow staticmethod
","text":"from_pyarrow(data_type) -> MapType\n
Create a MapType from a PyArrow MapType.
Will raise TypeError
if passed a different type.
Parameters:
Name Type Description Default type
MapType
the PyArrow MapType
required Returns:
Type Description MapType
a MapType
","boost":2},{"location":"api/schema/#deltalake.schema.MapType.to_json","title":"to_json method descriptor
","text":"to_json() -> str\n
Get JSON string representation of map type.
Returns:
Type Description str
a JSON string
","boost":2},{"location":"api/schema/#deltalake.schema.MapType.to_pyarrow","title":"to_pyarrow method descriptor
","text":"to_pyarrow() -> pyarrow.MapType\n
Get the equivalent PyArrow data type.
","boost":2},{"location":"api/schema/#deltalake.schema.StructType","title":"deltalake.schema.StructType","text":"StructType(fields: List[Field])\n
","boost":2},{"location":"api/schema/#deltalake.schema.StructType.fields","title":"fields","text":"fields: List[Field] = <attribute 'fields' of 'deltalake._internal.StructType' objects>\n
","boost":2},{"location":"api/schema/#deltalake.schema.StructType.type","title":"type","text":"type: Literal['struct'] = <attribute 'type' of 'deltalake._internal.StructType' objects>\n
The string \"struct\"
","boost":2},{"location":"api/schema/#deltalake.schema.StructType.from_json","title":"from_json staticmethod
","text":"from_json(type_json) -> StructType\n
Create a new StructType from a JSON string.
Parameters:
Name Type Description Default json
str
a JSON string
required Returns:
Type Description StructType
a StructType
Example StructType.from_json(\n '''{\n \"type\": \"struct\",\n \"fields\": [{\"name\": \"x\", \"type\": \"integer\", \"nullable\": true, \"metadata\": {}}]\n }'''\n)\n# Returns StructType([Field(x, PrimitiveType(\"integer\"), nullable=True)])\n
","boost":2},{"location":"api/schema/#deltalake.schema.StructType.from_pyarrow","title":"from_pyarrow staticmethod
","text":"from_pyarrow(data_type) -> StructType\n
Create a new StructType from a PyArrow struct type.
Will raise TypeError
if a different data type is provided.
Parameters:
Name Type Description Default type
StructType
a PyArrow struct type.
required Returns:
Type Description StructType
a StructType
","boost":2},{"location":"api/schema/#deltalake.schema.StructType.to_json","title":"to_json method descriptor
","text":"to_json() -> str\n
Get the JSON representation of the type.
Returns:
Type Description str
a JSON string
Example StructType([Field(\"x\", \"integer\")]).to_json()\n# Returns '{\"type\":\"struct\",\"fields\":[{\"name\":\"x\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}'\n
","boost":2},{"location":"api/schema/#deltalake.schema.StructType.to_pyarrow","title":"to_pyarrow method descriptor
","text":"to_pyarrow() -> pyarrow.StructType\n
Get the equivalent PyArrow StructType
Returns:
Type Description StructType
a PyArrow StructType
","boost":2},{"location":"api/storage/","title":"Storage","text":"The delta filesystem handler for the pyarrow engine writer.
","boost":2},{"location":"api/storage/#deltalake.fs.DeltaStorageHandler","title":"deltalake.fs.DeltaStorageHandler","text":"DeltaStorageHandler(root: str, options: dict[str, str] | None = None, known_sizes: dict[str, int] | None = None)\n
Bases: DeltaFileSystemHandler
, FileSystemHandler
DeltaStorageHandler is a concrete implementations of a PyArrow FileSystemHandler.
","boost":2},{"location":"api/storage/#deltalake.fs.DeltaStorageHandler.get_file_info_selector","title":"get_file_info_selector","text":"get_file_info_selector(selector: FileSelector) -> List[FileInfo]\n
Get info for the files defined by FileSelector.
Parameters:
Name Type Description Default selector
FileSelector
FileSelector object
required Returns:
Type Description List[FileInfo]
list of file info objects
","boost":2},{"location":"api/storage/#deltalake.fs.DeltaStorageHandler.open_input_file","title":"open_input_file","text":"open_input_file(path: str) -> pa.PythonFile\n
Open an input file for random access reading.
Parameters:
Name Type Description Default path
str
The source to open for reading.
required Returns:
Type Description PythonFile
NativeFile
","boost":2},{"location":"api/storage/#deltalake.fs.DeltaStorageHandler.open_input_stream","title":"open_input_stream","text":"open_input_stream(path: str) -> pa.PythonFile\n
Open an input stream for sequential reading.
Parameters:
Name Type Description Default path
str
The source to open for reading.
required Returns:
Type Description PythonFile
NativeFile
","boost":2},{"location":"api/storage/#deltalake.fs.DeltaStorageHandler.open_output_stream","title":"open_output_stream","text":"open_output_stream(path: str, metadata: Optional[Dict[str, str]] = None) -> pa.PythonFile\n
Open an output stream for sequential writing.
If the target already exists, existing data is truncated.
Parameters:
Name Type Description Default path
str
The source to open for writing.
required metadata
Optional[Dict[str, str]]
If not None, a mapping of string keys to string values.
None
Returns:
Type Description PythonFile
NativeFile
","boost":2},{"location":"api/delta_table/","title":"DeltaTable","text":"","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable","title":"deltalake.DeltaTable dataclass
","text":"DeltaTable(table_uri: Union[str, Path, os.PathLike[str]], version: Optional[int] = None, storage_options: Optional[Dict[str, str]] = None, without_files: bool = False, log_buffer_size: Optional[int] = None)\n
Represents a Delta Table
Create the Delta Table from a path with an optional version. Multiple StorageBackends are currently supported: AWS S3, Azure Data Lake Storage Gen2, Google Cloud Storage (GCS) and local URI. Depending on the storage backend used, you could provide options values using the storage_options
parameter.
Parameters:
Name Type Description Default table_uri
Union[str, Path, PathLike[str]]
the path of the DeltaTable
required version
Optional[int]
version of the DeltaTable
None
storage_options
Optional[Dict[str, str]]
a dictionary of the options to use for the storage backend
None
without_files
bool
If True, will load table without tracking files. Some append-only applications might have no need of tracking any files. So, the DeltaTable will be loaded with a significant memory reduction.
False
log_buffer_size
Optional[int]
Number of files to buffer when reading the commit log. A positive integer. Setting a value greater than 1 results in concurrent calls to the storage api. This can decrease latency if there are many files in the log since the last checkpoint, but will also increase memory usage. Possible rate limits of the storage backend should also be considered for optimal performance. Defaults to 4 * number of cpus.
None
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.alter","title":"alter property
","text":"alter: TableAlterer\n
Namespace for all table alter related methods.
Returns:
Name Type Description TableAlterer
TableAlterer
TableAlterer Object
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.optimize","title":"optimize property
","text":"optimize: TableOptimizer\n
Namespace for all table optimize related methods.
Returns:
Name Type Description TableOptimizer
TableOptimizer
TableOptimizer Object
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.cleanup_metadata","title":"cleanup_metadata","text":"cleanup_metadata() -> None\n
Delete expired log files before current version from table. The table log retention is based on the configuration.logRetentionDuration
value, 30 days by default.
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.create","title":"create classmethod
","text":"create(table_uri: Union[str, Path], schema: Union[pyarrow.Schema, DeltaSchema], mode: Literal['error', 'append', 'overwrite', 'ignore'] = 'error', partition_by: Optional[Union[List[str], str]] = None, name: Optional[str] = None, description: Optional[str] = None, configuration: Optional[Mapping[str, Optional[str]]] = None, storage_options: Optional[Dict[str, str]] = None) -> DeltaTable\n
CREATE
or CREATE_OR_REPLACE
a delta table given a table_uri.
Parameters:
Name Type Description Default table_uri
Union[str, Path]
URI of a table
required schema
Union[Schema, Schema]
Table schema
required mode
Literal['error', 'append', 'overwrite', 'ignore']
How to handle existing data. Default is to error if table already exists. If 'append', returns not support error if table exists. If 'overwrite', will CREATE_OR_REPLACE
table. If 'ignore', will not do anything if table already exists. Defaults to \"error\".
'error'
partition_by
Optional[Union[List[str], str]]
List of columns to partition the table by.
None
name
Optional[str]
User-provided identifier for this table.
None
description
Optional[str]
User-provided description for this table.
None
configuration
Optional[Mapping[str, Optional[str]]]
A map containing configuration options for the metadata action.
None
storage_options
Optional[Dict[str, str]]
options passed to the object store crate.
None
Returns:
Name Type Description DeltaTable
DeltaTable
created delta table
Example import pyarrow as pa\n\nfrom deltalake import DeltaTable\n\ndt = DeltaTable.create(\n table_uri=\"my_local_table\",\n schema=pa.schema(\n [pa.field(\"foo\", pa.string()), pa.field(\"bar\", pa.string())]\n ),\n mode=\"error\",\n partition_by=\"bar\",\n)\n
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.delete","title":"delete","text":"delete(predicate: Optional[str] = None, writer_properties: Optional[WriterProperties] = None) -> Dict[str, Any]\n
Delete records from a Delta Table that statisfy a predicate.
When a predicate is not provided then all records are deleted from the Delta Table. Otherwise a scan of the Delta table is performed to mark any files that contain records that satisfy the predicate. Once files are determined they are rewritten without the records.
Parameters:
Name Type Description Default predicate
Optional[str]
a SQL where clause. If not passed, will delete all rows.
None
Returns:
Type Description Dict[str, Any]
the metrics from delete.
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.file_uris","title":"file_uris","text":"file_uris(partition_filters: Optional[List[Tuple[str, str, Any]]] = None) -> List[str]\n
Get the list of files as absolute URIs, including the scheme (e.g. \"s3://\").
Local files will be just plain absolute paths, without a scheme. (That is, no 'file://' prefix.)
Use the partition_filters parameter to retrieve a subset of files that match the given filters.
Parameters:
Name Type Description Default partition_filters
Optional[List[Tuple[str, str, Any]]]
the partition filters that will be used for getting the matched files
None
Returns:
Type Description List[str]
list of the .parquet files with an absolute URI referenced for the current version of the DeltaTable
Predicates are expressed in disjunctive normal form (DNF), like [(\"x\", \"=\", \"a\"), ...]. DNF allows arbitrary boolean logical combinations of single partition predicates. The innermost tuples each describe a single partition predicate. The list of inner predicates is interpreted as a conjunction (AND), forming a more selective and multiple partition predicates. Each tuple has format: (key, op, value) and compares the key with the value. The supported op are: =
, !=
, in
, and not in
. If the op is in or not in, the value must be a collection such as a list, a set or a tuple. The supported type for value is str. Use empty string ''
for Null partition value.
Example (\"x\", \"=\", \"a\")\n(\"x\", \"!=\", \"a\")\n(\"y\", \"in\", [\"a\", \"b\", \"c\"])\n(\"z\", \"not in\", [\"a\",\"b\"])\n
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.files","title":"files","text":"files(partition_filters: Optional[List[Tuple[str, str, Any]]] = None) -> List[str]\n
Get the .parquet files of the DeltaTable.
The paths are as they are saved in the delta log, which may either be relative to the table root or absolute URIs.
Parameters:
Name Type Description Default partition_filters
Optional[List[Tuple[str, str, Any]]]
the partition filters that will be used for getting the matched files
None
Returns:
Type Description List[str]
list of the .parquet files referenced for the current version of the DeltaTable
Predicates are expressed in disjunctive normal form (DNF), like [(\"x\", \"=\", \"a\"), ...]. DNF allows arbitrary boolean logical combinations of single partition predicates. The innermost tuples each describe a single partition predicate. The list of inner predicates is interpreted as a conjunction (AND), forming a more selective and multiple partition predicates. Each tuple has format: (key, op, value) and compares the key with the value. The supported op are: =
, !=
, in
, and not in
. If the op is in or not in, the value must be a collection such as a list, a set or a tuple. The supported type for value is str. Use empty string ''
for Null partition value.
Example (\"x\", \"=\", \"a\")\n(\"x\", \"!=\", \"a\")\n(\"y\", \"in\", [\"a\", \"b\", \"c\"])\n(\"z\", \"not in\", [\"a\",\"b\"])\n
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.from_data_catalog","title":"from_data_catalog classmethod
","text":"from_data_catalog(data_catalog: DataCatalog, database_name: str, table_name: str, data_catalog_id: Optional[str] = None, version: Optional[int] = None, log_buffer_size: Optional[int] = None) -> DeltaTable\n
Create the Delta Table from a Data Catalog.
Parameters:
Name Type Description Default data_catalog
DataCatalog
the Catalog to use for getting the storage location of the Delta Table
required database_name
str
the database name inside the Data Catalog
required table_name
str
the table name inside the Data Catalog
required data_catalog_id
Optional[str]
the identifier of the Data Catalog
None
version
Optional[int]
version of the DeltaTable
None
log_buffer_size
Optional[int]
Number of files to buffer when reading the commit log. A positive integer. Setting a value greater than 1 results in concurrent calls to the storage api. This can decrease latency if there are many files in the log since the last checkpoint, but will also increase memory usage. Possible rate limits of the storage backend should also be considered for optimal performance. Defaults to 4 * number of cpus.
None
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.get_add_actions","title":"get_add_actions","text":"get_add_actions(flatten: bool = False) -> pyarrow.RecordBatch\n
Return a dataframe with all current add actions.
Add actions represent the files that currently make up the table. This data is a low-level representation parsed from the transaction log.
Parameters:
Name Type Description Default flatten
bool
whether to flatten the schema. Partition values columns are given the prefix partition.
, statistics (null_count, min, and max) are given the prefix null_count.
, min.
, and max.
, and tags the prefix tags.
. Nested field names are concatenated with .
.
False
Returns:
Type Description RecordBatch
a PyArrow RecordBatch containing the add action data.
Example from pprint import pprint\nfrom deltalake import DeltaTable, write_deltalake\nimport pyarrow as pa\ndata = pa.table({\"x\": [1, 2, 3], \"y\": [4, 5, 6]})\nwrite_deltalake(\"tmp\", data, partition_by=[\"x\"])\ndt = DeltaTable(\"tmp\")\ndf = dt.get_add_actions().to_pandas()\ndf[\"path\"].sort_values(ignore_index=True)\n0 x=1/0\n1 x=2/0\n2 x=3/0\n
df = dt.get_add_actions(flatten=True).to_pandas()\ndf[\"partition.x\"].sort_values(ignore_index=True)\n0 1\n1 2\n2 3\n
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.history","title":"history","text":"history(limit: Optional[int] = None) -> List[Dict[str, Any]]\n
Run the history command on the DeltaTable. The operations are returned in reverse chronological order.
Parameters:
Name Type Description Default limit
Optional[int]
the commit info limit to return
None
Returns:
Type Description List[Dict[str, Any]]
list of the commit infos registered in the transaction log
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.load_as_version","title":"load_as_version","text":"load_as_version(version: Union[int, str, datetime]) -> None\n
Load/time travel a DeltaTable to a specified version number, or a timestamp version of the table. If a string is passed then the argument should be an RFC 3339 and ISO 8601 date and time string format.
Parameters:
Name Type Description Default version
Union[int, str, datetime]
the identifier of the version of the DeltaTable to load
required Example Use a version number
dt = DeltaTable(\"test_table\")\ndt.load_as_version(1)\n
Use a datetime object
dt.load_as_version(datetime(2023,1,1))\n
Use a datetime in string format
dt.load_as_version(\"2018-01-26T18:30:09Z\")\ndt.load_as_version(\"2018-12-19T16:39:57-08:00\")\ndt.load_as_version(\"2018-01-26T18:30:09.453+00:00\")\n
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.load_version","title":"load_version","text":"load_version(version: int) -> None\n
Load a DeltaTable with a specified version.
Deprecated
Load_version and load_with_datetime have been combined into DeltaTable.load_as_version
.
Parameters:
Name Type Description Default version
int
the identifier of the version of the DeltaTable to load
required","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.load_with_datetime","title":"load_with_datetime","text":"load_with_datetime(datetime_string: str) -> None\n
Time travel Delta table to the latest version that's created at or before provided datetime_string
argument. The datetime_string
argument should be an RFC 3339 and ISO 8601 date and time string.
Deprecated
Load_version and load_with_datetime have been combined into DeltaTable.load_as_version
.
Parameters:
Name Type Description Default datetime_string
str
the identifier of the datetime point of the DeltaTable to load
required Example \"2018-01-26T18:30:09Z\"\n\"2018-12-19T16:39:57-08:00\"\n\"2018-01-26T18:30:09.453+00:00\"\n
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.merge","title":"merge","text":"merge(source: Union[pyarrow.Table, pyarrow.RecordBatch, pyarrow.RecordBatchReader, ds.Dataset, pandas.DataFrame], predicate: str, source_alias: Optional[str] = None, target_alias: Optional[str] = None, error_on_type_mismatch: bool = True, writer_properties: Optional[WriterProperties] = None, large_dtypes: bool = True) -> TableMerger\n
Pass the source data which you want to merge on the target delta table, providing a predicate in SQL query like format. You can also specify on what to do when the underlying data types do not match the underlying table.
Parameters:
Name Type Description Default source
Union[Table, RecordBatch, RecordBatchReader, Dataset, DataFrame]
source data
required predicate
str
SQL like predicate on how to merge
required source_alias
Optional[str]
Alias for the source table
None
target_alias
Optional[str]
Alias for the target table
None
error_on_type_mismatch
bool
specify if merge will return error if data types are mismatching :default = True
True
writer_properties
Optional[WriterProperties]
Pass writer properties to the Rust parquet writer
None
large_dtypes
bool
If True, the data schema is kept in large_dtypes.
True
Returns:
Name Type Description TableMerger
TableMerger
TableMerger Object
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.metadata","title":"metadata","text":"metadata() -> Metadata\n
Get the current metadata of the DeltaTable.
Returns:
Type Description Metadata
the current Metadata registered in the transaction log
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.protocol","title":"protocol","text":"protocol() -> ProtocolVersions\n
Get the reader and writer protocol versions of the DeltaTable.
Returns:
Type Description ProtocolVersions
the current ProtocolVersions registered in the transaction log
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.repair","title":"repair","text":"repair(dry_run: bool = False) -> Dict[str, Any]\n
Repair the Delta Table by auditing active files that do not exist in the underlying filesystem and removes them. This can be useful when there are accidental deletions or corrupted files.
Active files are ones that have an add action in the log, but no corresponding remove action. This operation creates a new FSCK transaction containing a remove action for each of the missing or corrupted files.
Parameters:
Name Type Description Default dry_run
bool
when activated, list only the files, otherwise add remove actions to transaction log. Defaults to False.
False
Returns: The metrics from repair (FSCK) action.
Example from deltalake import DeltaTable\ndt = DeltaTable('TEST')\ndt.repair(dry_run=False)\n
Results in {'dry_run': False, 'files_removed': ['6-0d084325-6885-4847-b008-82c1cf30674c-0.parquet', 5-4fba1d3e-3e20-4de1-933d-a8e13ac59f53-0.parquet']}\n
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.restore","title":"restore","text":"restore(target: Union[int, datetime, str], *, ignore_missing_files: bool = False, protocol_downgrade_allowed: bool = False) -> Dict[str, Any]\n
Run the Restore command on the Delta Table: restore table to a given version or datetime.
Parameters:
Name Type Description Default target
Union[int, datetime, str]
the expected version will restore, which represented by int, date str or datetime.
required ignore_missing_files
bool
whether the operation carry on when some data files missing.
False
protocol_downgrade_allowed
bool
whether the operation when protocol version upgraded.
False
Returns:
Type Description Dict[str, Any]
the metrics from restore.
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.schema","title":"schema","text":"schema() -> DeltaSchema\n
Get the current schema of the DeltaTable.
Returns:
Type Description Schema
the current Schema registered in the transaction log
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.to_pandas","title":"to_pandas","text":"to_pandas(partitions: Optional[List[Tuple[str, str, Any]]] = None, columns: Optional[List[str]] = None, filesystem: Optional[Union[str, pa_fs.FileSystem]] = None, filters: Optional[FilterType] = None) -> pandas.DataFrame\n
Build a pandas dataframe using data from the DeltaTable.
Parameters:
Name Type Description Default partitions
Optional[List[Tuple[str, str, Any]]]
A list of partition filters, see help(DeltaTable.files_by_partitions) for filter syntax
None
columns
Optional[List[str]]
The columns to project. This can be a list of column names to include (order and duplicates will be preserved)
None
filesystem
Optional[Union[str, FileSystem]]
A concrete implementation of the Pyarrow FileSystem or a fsspec-compatible interface. If None, the first file path will be used to determine the right FileSystem
None
filters
Optional[FilterType]
A disjunctive normal form (DNF) predicate for filtering rows. If you pass a filter you do not need to pass partitions
None
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.to_pyarrow_dataset","title":"to_pyarrow_dataset","text":"to_pyarrow_dataset(partitions: Optional[List[Tuple[str, str, Any]]] = None, filesystem: Optional[Union[str, pa_fs.FileSystem]] = None, parquet_read_options: Optional[ParquetReadOptions] = None) -> pyarrow.dataset.Dataset\n
Build a PyArrow Dataset using data from the DeltaTable.
Parameters:
Name Type Description Default partitions
Optional[List[Tuple[str, str, Any]]]
A list of partition filters, see help(DeltaTable.files_by_partitions) for filter syntax
None
filesystem
Optional[Union[str, FileSystem]]
A concrete implementation of the Pyarrow FileSystem or a fsspec-compatible interface. If None, the first file path will be used to determine the right FileSystem
None
parquet_read_options
Optional[ParquetReadOptions]
Optional read options for Parquet. Use this to handle INT96 to timestamp conversion for edge cases like 0001-01-01 or 9999-12-31
None
More info: https://arrow.apache.org/docs/python/generated/pyarrow.dataset.ParquetReadOptions.html
Returns:
Type Description Dataset
the PyArrow dataset in PyArrow
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.to_pyarrow_table","title":"to_pyarrow_table","text":"to_pyarrow_table(partitions: Optional[List[Tuple[str, str, Any]]] = None, columns: Optional[List[str]] = None, filesystem: Optional[Union[str, pa_fs.FileSystem]] = None, filters: Optional[FilterType] = None) -> pyarrow.Table\n
Build a PyArrow Table using data from the DeltaTable.
Parameters:
Name Type Description Default partitions
Optional[List[Tuple[str, str, Any]]]
A list of partition filters, see help(DeltaTable.files_by_partitions) for filter syntax
None
columns
Optional[List[str]]
The columns to project. This can be a list of column names to include (order and duplicates will be preserved)
None
filesystem
Optional[Union[str, FileSystem]]
A concrete implementation of the Pyarrow FileSystem or a fsspec-compatible interface. If None, the first file path will be used to determine the right FileSystem
None
filters
Optional[FilterType]
A disjunctive normal form (DNF) predicate for filtering rows. If you pass a filter you do not need to pass partitions
None
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.update","title":"update","text":"update(updates: Optional[Dict[str, str]] = None, new_values: Optional[Dict[str, Union[int, float, str, datetime, bool, List[Any]]]] = None, predicate: Optional[str] = None, writer_properties: Optional[WriterProperties] = None, error_on_type_mismatch: bool = True) -> Dict[str, Any]\n
UPDATE
records in the Delta Table that matches an optional predicate. Either updates or new_values needs to be passed for it to execute.
Parameters:
Name Type Description Default updates
Optional[Dict[str, str]]
a mapping of column name to update SQL expression.
None
new_values
Optional[Dict[str, Union[int, float, str, datetime, bool, List[Any]]]]
a mapping of column name to python datatype.
None
predicate
Optional[str]
a logical expression.
None
writer_properties
Optional[WriterProperties]
Pass writer properties to the Rust parquet writer.
None
error_on_type_mismatch
bool
specify if update will return error if data types are mismatching :default = True
True
Returns:
Type Description Dict[str, Any]
the metrics from update
Example Update some row values with SQL predicate
This is equivalent to UPDATE table SET deleted = true WHERE id = '3'
from deltalake import write_deltalake, DeltaTable\nimport pandas as pd\ndf = pd.DataFrame(\n {\"id\": [\"1\", \"2\", \"3\"],\n \"deleted\": [False, False, False],\n \"price\": [10., 15., 20.]\n })\nwrite_deltalake(\"tmp\", df)\ndt = DeltaTable(\"tmp\")\ndt.update(predicate=\"id = '3'\", updates = {\"deleted\": 'True'})\n\n{'num_added_files': 1, 'num_removed_files': 1, 'num_updated_rows': 1, 'num_copied_rows': 2, 'execution_time_ms': ..., 'scan_time_ms': ...}\n
Update all row values
This is equivalent to UPDATE table SET deleted = true, id = concat(id, '_old')
.
dt.update(updates = {\"deleted\": 'True', \"id\": \"concat(id, '_old')\"})\n\n{'num_added_files': 1, 'num_removed_files': 1, 'num_updated_rows': 3, 'num_copied_rows': 0, 'execution_time_ms': ..., 'scan_time_ms': ...}\n
Use Python objects instead of SQL strings
Use the new_values
parameter instead of the updates
parameter. For example, this is equivalent to UPDATE table SET price = 150.10 WHERE id = '1'
dt.update(predicate=\"id = '1_old'\", new_values = {\"price\": 150.10})\n\n{'num_added_files': 1, 'num_removed_files': 1, 'num_updated_rows': 1, 'num_copied_rows': 2, 'execution_time_ms': ..., 'scan_time_ms': ...}\n
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.update_incremental","title":"update_incremental","text":"update_incremental() -> None\n
Updates the DeltaTable to the latest version by incrementally applying newer versions.
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.vacuum","title":"vacuum","text":"vacuum(retention_hours: Optional[int] = None, dry_run: bool = True, enforce_retention_duration: bool = True) -> List[str]\n
Run the Vacuum command on the Delta Table: list and delete files no longer referenced by the Delta table and are older than the retention threshold.
Parameters:
Name Type Description Default retention_hours
Optional[int]
the retention threshold in hours, if none then the value from configuration.deletedFileRetentionDuration
is used or default of 1 week otherwise.
None
dry_run
bool
when activated, list only the files, delete otherwise
True
enforce_retention_duration
bool
when disabled, accepts retention hours smaller than the value from configuration.deletedFileRetentionDuration
.
True
Returns:
Type Description List[str]
the list of files no longer referenced by the Delta Table and are older than the retention threshold.
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.version","title":"version","text":"version() -> int\n
Get the version of the DeltaTable.
Returns:
Type Description int
The current version of the DeltaTable
","boost":2},{"location":"api/delta_table/delta_table_alterer/","title":"TableAlterer","text":"","boost":10},{"location":"api/delta_table/delta_table_alterer/#deltalake.table.TableAlterer","title":"deltalake.table.TableAlterer","text":"TableAlterer(table: DeltaTable)\n
API for various table alteration commands.
","boost":10},{"location":"api/delta_table/delta_table_alterer/#deltalake.table.TableAlterer.add_constraint","title":"add_constraint","text":"add_constraint(constraints: Dict[str, str]) -> None\n
Add constraints to the table. Limited to single constraint
at once.
Parameters:
Name Type Description Default constraints
Dict[str, str]
mapping of constraint name to SQL-expression to evaluate on write
required Example from deltalake import DeltaTable\ndt = DeltaTable(\"test_table_constraints\")\ndt.alter.add_constraint({\n \"value_gt_5\": \"value > 5\",\n})\n
Check configuration
dt.metadata().configuration\n{'delta.constraints.value_gt_5': 'value > 5'}\n
","boost":10},{"location":"api/delta_table/delta_table_merger/","title":"TableMerger","text":"","boost":2},{"location":"api/delta_table/delta_table_merger/#deltalake.table.TableMerger","title":"deltalake.table.TableMerger","text":"TableMerger(table: DeltaTable, source: pyarrow.RecordBatchReader, predicate: str, source_alias: Optional[str] = None, target_alias: Optional[str] = None, safe_cast: bool = True, writer_properties: Optional[WriterProperties] = None)\n
API for various table MERGE
commands.
","boost":2},{"location":"api/delta_table/delta_table_merger/#deltalake.table.TableMerger.execute","title":"execute","text":"execute() -> Dict[str, Any]\n
Executes MERGE
with the previously provided settings in Rust with Apache Datafusion query engine.
Returns:
Name Type Description Dict
Dict[str, Any]
metrics
","boost":2},{"location":"api/delta_table/delta_table_merger/#deltalake.table.TableMerger.when_matched_delete","title":"when_matched_delete","text":"when_matched_delete(predicate: Optional[str] = None) -> TableMerger\n
Delete a matched row from the table only if the given predicate
(if specified) is true for the matched row. If not specified it deletes all matches.
Parameters:
Name Type Description Default predicate
(str | None, Optional)
SQL like predicate on when to delete.
None
Returns:
Name Type Description TableMerger
TableMerger
TableMerger Object
Example Delete on a predicate
from deltalake import DeltaTable, write_deltalake\nimport pyarrow as pa\n\ndata = pa.table({\"x\": [1, 2, 3], \"y\": [4, 5, 6]})\nwrite_deltalake(\"tmp\", data)\ndt = DeltaTable(\"tmp\")\nnew_data = pa.table({\"x\": [2, 3], \"deleted\": [False, True]})\n\n(\n dt.merge(\n source=new_data,\n predicate='target.x = source.x',\n source_alias='source',\n target_alias='target')\n .when_matched_delete(\n predicate=\"source.deleted = true\")\n .execute()\n)\n{'num_source_rows': 2, 'num_target_rows_inserted': 0, 'num_target_rows_updated': 0, 'num_target_rows_deleted': 1, 'num_target_rows_copied': 2, 'num_output_rows': 2, 'num_target_files_added': 1, 'num_target_files_removed': 1, 'execution_time_ms': ..., 'scan_time_ms': ..., 'rewrite_time_ms': ...}\n\ndt.to_pandas().sort_values(\"x\", ignore_index=True)\n x y\n0 1 4\n1 2 5\n
Delete all records that were matched
dt = DeltaTable(\"tmp\")\n(\n dt.merge(\n source=new_data,\n predicate='target.x = source.x',\n source_alias='source',\n target_alias='target')\n .when_matched_delete()\n .execute()\n)\n{'num_source_rows': 2, 'num_target_rows_inserted': 0, 'num_target_rows_updated': 0, 'num_target_rows_deleted': 1, 'num_target_rows_copied': 1, 'num_output_rows': 1, 'num_target_files_added': 1, 'num_target_files_removed': 1, 'execution_time_ms': ..., 'scan_time_ms': ..., 'rewrite_time_ms': ...}\n\ndt.to_pandas()\n x y\n0 1 4\n
","boost":2},{"location":"api/delta_table/delta_table_merger/#deltalake.table.TableMerger.when_matched_update","title":"when_matched_update","text":"when_matched_update(updates: Dict[str, str], predicate: Optional[str] = None) -> TableMerger\n
Update a matched table row based on the rules defined by updates
. If a predicate
is specified, then it must evaluate to true for the row to be updated.
Parameters:
Name Type Description Default updates
Dict[str, str]
a mapping of column name to update SQL expression.
required predicate
Optional[str]
SQL like predicate on when to update.
None
Returns:
Name Type Description TableMerger
TableMerger
TableMerger Object
Example from deltalake import DeltaTable, write_deltalake\nimport pyarrow as pa\n\ndata = pa.table({\"x\": [1, 2, 3], \"y\": [4, 5, 6]})\nwrite_deltalake(\"tmp\", data)\ndt = DeltaTable(\"tmp\")\nnew_data = pa.table({\"x\": [1], \"y\": [7]})\n\n(\n dt.merge(\n source=new_data,\n predicate=\"target.x = source.x\",\n source_alias=\"source\",\n target_alias=\"target\")\n .when_matched_update(updates={\"x\": \"source.x\", \"y\": \"source.y\"})\n .execute()\n)\n{'num_source_rows': 1, 'num_target_rows_inserted': 0, 'num_target_rows_updated': 1, 'num_target_rows_deleted': 0, 'num_target_rows_copied': 2, 'num_output_rows': 3, 'num_target_files_added': 1, 'num_target_files_removed': 1, 'execution_time_ms': ..., 'scan_time_ms': ..., 'rewrite_time_ms': ...}\n\ndt.to_pandas()\n x y\n0 1 7\n1 2 5\n2 3 6\n
","boost":2},{"location":"api/delta_table/delta_table_merger/#deltalake.table.TableMerger.when_matched_update_all","title":"when_matched_update_all","text":"when_matched_update_all(predicate: Optional[str] = None) -> TableMerger\n
Updating all source fields to target fields, source and target are required to have the same field names. If a predicate
is specified, then it must evaluate to true for the row to be updated.
Parameters:
Name Type Description Default predicate
Optional[str]
SQL like predicate on when to update all columns.
None
Returns:
Name Type Description TableMerger
TableMerger
TableMerger Object
Example from deltalake import DeltaTable, write_deltalake\nimport pyarrow as pa\n\ndata = pa.table({\"x\": [1, 2, 3], \"y\": [4, 5, 6]})\nwrite_deltalake(\"tmp\", data)\ndt = DeltaTable(\"tmp\")\nnew_data = pa.table({\"x\": [1], \"y\": [7]})\n\n(\n dt.merge(\n source=new_data,\n predicate=\"target.x = source.x\",\n source_alias=\"source\",\n target_alias=\"target\")\n .when_matched_update_all()\n .execute()\n)\n{'num_source_rows': 1, 'num_target_rows_inserted': 0, 'num_target_rows_updated': 1, 'num_target_rows_deleted': 0, 'num_target_rows_copied': 2, 'num_output_rows': 3, 'num_target_files_added': 1, 'num_target_files_removed': 1, 'execution_time_ms': ..., 'scan_time_ms': ..., 'rewrite_time_ms': ...}\n\ndt.to_pandas()\n x y\n0 1 7\n1 2 5\n2 3 6\n
","boost":2},{"location":"api/delta_table/delta_table_merger/#deltalake.table.TableMerger.when_not_matched_by_source_delete","title":"when_not_matched_by_source_delete","text":"when_not_matched_by_source_delete(predicate: Optional[str] = None) -> TableMerger\n
Delete a target row that has no matches in the source from the table only if the given predicate
(if specified) is true for the target row.
Parameters:
Name Type Description Default predicate
Optional[str]
SQL like predicate on when to delete when not matched by source.
None
Returns:
Name Type Description TableMerger
TableMerger
TableMerger Object
","boost":2},{"location":"api/delta_table/delta_table_merger/#deltalake.table.TableMerger.when_not_matched_by_source_update","title":"when_not_matched_by_source_update","text":"when_not_matched_by_source_update(updates: Dict[str, str], predicate: Optional[str] = None) -> TableMerger\n
Update a target row that has no matches in the source based on the rules defined by updates
. If a predicate
is specified, then it must evaluate to true for the row to be updated.
Parameters:
Name Type Description Default updates
Dict[str, str]
a mapping of column name to update SQL expression.
required predicate
Optional[str]
SQL like predicate on when to update.
None
Returns:
Name Type Description TableMerger
TableMerger
TableMerger Object
Example from deltalake import DeltaTable, write_deltalake\nimport pyarrow as pa\n\ndata = pa.table({\"x\": [1, 2, 3], \"y\": [4, 5, 6]})\nwrite_deltalake(\"tmp\", data)\ndt = DeltaTable(\"tmp\")\nnew_data = pa.table({\"x\": [2, 3, 4]})\n\n(\n dt.merge(\n source=new_data,\n predicate='target.x = source.x',\n source_alias='source',\n target_alias='target')\n .when_not_matched_by_source_update(\n predicate = \"y > 3\",\n updates = {\"y\": \"0\"})\n .execute()\n)\n{'num_source_rows': 3, 'num_target_rows_inserted': 0, 'num_target_rows_updated': 1, 'num_target_rows_deleted': 0, 'num_target_rows_copied': 2, 'num_output_rows': 3, 'num_target_files_added': 1, 'num_target_files_removed': 1, 'execution_time_ms': ..., 'scan_time_ms': ..., 'rewrite_time_ms': ...}\n\ndt.to_pandas().sort_values(\"x\", ignore_index=True)\n x y\n0 1 0\n1 2 5\n2 3 6\n
","boost":2},{"location":"api/delta_table/delta_table_merger/#deltalake.table.TableMerger.when_not_matched_insert","title":"when_not_matched_insert","text":"when_not_matched_insert(updates: Dict[str, str], predicate: Optional[str] = None) -> TableMerger\n
Insert a new row to the target table based on the rules defined by updates
. If a predicate
is specified, then it must evaluate to true for the new row to be inserted.
Parameters:
Name Type Description Default updates
dict
a mapping of column name to insert SQL expression.
required predicate
(str | None, Optional)
SQL like predicate on when to insert.
None
Returns:
Name Type Description TableMerger
TableMerger
TableMerger Object
Example from deltalake import DeltaTable, write_deltalake\nimport pyarrow as pa\n\ndata = pa.table({\"x\": [1, 2, 3], \"y\": [4, 5, 6]})\nwrite_deltalake(\"tmp\", data)\ndt = DeltaTable(\"tmp\")\nnew_data = pa.table({\"x\": [4], \"y\": [7]})\n\n(\n dt.merge(\n source=new_data,\n predicate=\"target.x = source.x\",\n source_alias=\"source\",\n target_alias=\"target\",)\n .when_not_matched_insert(\n updates={\n \"x\": \"source.x\",\n \"y\": \"source.y\",\n })\n .execute()\n)\n{'num_source_rows': 1, 'num_target_rows_inserted': 1, 'num_target_rows_updated': 0, 'num_target_rows_deleted': 0, 'num_target_rows_copied': 3, 'num_output_rows': 4, 'num_target_files_added': 1, 'num_target_files_removed': 1, 'execution_time_ms': ..., 'scan_time_ms': ..., 'rewrite_time_ms': ...}\n\ndt.to_pandas().sort_values(\"x\", ignore_index=True)\n x y\n0 1 4\n1 2 5\n2 3 6\n3 4 7\n
","boost":2},{"location":"api/delta_table/delta_table_merger/#deltalake.table.TableMerger.when_not_matched_insert_all","title":"when_not_matched_insert_all","text":"when_not_matched_insert_all(predicate: Optional[str] = None) -> TableMerger\n
Insert a new row to the target table, updating all source fields to target fields. Source and target are required to have the same field names. If a predicate
is specified, then it must evaluate to true for the new row to be inserted.
Parameters:
Name Type Description Default predicate
Optional[str]
SQL like predicate on when to insert.
None
Returns:
Name Type Description TableMerger
TableMerger
TableMerger Object
Example from deltalake import DeltaTable, write_deltalake\nimport pyarrow as pa\n\ndata = pa.table({\"x\": [1, 2, 3], \"y\": [4, 5, 6]})\nwrite_deltalake(\"tmp\", data)\ndt = DeltaTable(\"tmp\")\nnew_data = pa.table({\"x\": [4], \"y\": [7]})\n\n(\n dt.merge(\n source=new_data,\n predicate='target.x = source.x',\n source_alias='source',\n target_alias='target')\n .when_not_matched_insert_all()\n .execute()\n)\n{'num_source_rows': 1, 'num_target_rows_inserted': 1, 'num_target_rows_updated': 0, 'num_target_rows_deleted': 0, 'num_target_rows_copied': 3, 'num_output_rows': 4, 'num_target_files_added': 1, 'num_target_files_removed': 1, 'execution_time_ms': ..., 'scan_time_ms': ..., 'rewrite_time_ms': ...}\n\ndt.to_pandas().sort_values(\"x\", ignore_index=True)\n x y\n0 1 4\n1 2 5\n2 3 6\n3 4 7\n
","boost":2},{"location":"api/delta_table/delta_table_merger/#deltalake.table.TableMerger.with_writer_properties","title":"with_writer_properties","text":"with_writer_properties(data_page_size_limit: Optional[int] = None, dictionary_page_size_limit: Optional[int] = None, data_page_row_count_limit: Optional[int] = None, write_batch_size: Optional[int] = None, max_row_group_size: Optional[int] = None) -> TableMerger\n
Deprecated
Use .merge(writer_properties = WriterProperties())
instead
Pass writer properties to the Rust parquet writer, see options https://arrow.apache.org/rust/parquet/file/properties/struct.WriterProperties.html:
Parameters:
Name Type Description Default data_page_size_limit
Optional[int]
Limit DataPage size to this in bytes.
None
dictionary_page_size_limit
Optional[int]
Limit the size of each DataPage to store dicts to this amount in bytes.
None
data_page_row_count_limit
Optional[int]
Limit the number of rows in each DataPage.
None
write_batch_size
Optional[int]
Splits internally to smaller batch size.
None
max_row_group_size
Optional[int]
Max number of rows in row group.
None
Returns:
Name Type Description TableMerger
TableMerger
TableMerger Object
","boost":2},{"location":"api/delta_table/delta_table_optimizer/","title":"TableOptimizer","text":"","boost":10},{"location":"api/delta_table/delta_table_optimizer/#deltalake.table.TableOptimizer","title":"deltalake.table.TableOptimizer","text":"TableOptimizer(table: DeltaTable)\n
API for various table optimization commands.
","boost":10},{"location":"api/delta_table/delta_table_optimizer/#deltalake.table.TableOptimizer.compact","title":"compact","text":"compact(partition_filters: Optional[FilterType] = None, target_size: Optional[int] = None, max_concurrent_tasks: Optional[int] = None, min_commit_interval: Optional[Union[int, timedelta]] = None, writer_properties: Optional[WriterProperties] = None) -> Dict[str, Any]\n
Compacts small files to reduce the total number of files in the table.
This operation is idempotent; if run twice on the same table (assuming it has not been updated) it will do nothing the second time.
If this operation happens concurrently with any operations other than append, it will fail.
Parameters:
Name Type Description Default partition_filters
Optional[FilterType]
the partition filters that will be used for getting the matched files
None
target_size
Optional[int]
desired file size after bin-packing files, in bytes. If not provided, will attempt to read the table configuration value delta.targetFileSize
. If that value isn't set, will use default value of 256MB.
None
max_concurrent_tasks
Optional[int]
the maximum number of concurrent tasks to use for file compaction. Defaults to number of CPUs. More concurrent tasks can make compaction faster, but will also use more memory.
None
min_commit_interval
Optional[Union[int, timedelta]]
minimum interval in seconds or as timedeltas before a new commit is created. Interval is useful for long running executions. Set to 0 or timedelta(0), if you want a commit per partition.
None
writer_properties
Optional[WriterProperties]
Pass writer properties to the Rust parquet writer.
None
Returns:
Type Description Dict[str, Any]
the metrics from optimize
Example Use a timedelta object to specify the seconds, minutes or hours of the interval.
from deltalake import DeltaTable, write_deltalake\nfrom datetime import timedelta\nimport pyarrow as pa\n\nwrite_deltalake(\"tmp\", pa.table({\"x\": [1], \"y\": [4]}))\nwrite_deltalake(\"tmp\", pa.table({\"x\": [2], \"y\": [5]}), mode=\"append\")\n\ndt = DeltaTable(\"tmp\")\ntime_delta = timedelta(minutes=10)\ndt.optimize.compact(min_commit_interval=time_delta)\n{'numFilesAdded': 1, 'numFilesRemoved': 2, 'filesAdded': ..., 'filesRemoved': ..., 'partitionsOptimized': 1, 'numBatches': 2, 'totalConsideredFiles': 2, 'totalFilesSkipped': 0, 'preserveInsertionOrder': True}\n
","boost":10},{"location":"api/delta_table/delta_table_optimizer/#deltalake.table.TableOptimizer.z_order","title":"z_order","text":"z_order(columns: Iterable[str], partition_filters: Optional[FilterType] = None, target_size: Optional[int] = None, max_concurrent_tasks: Optional[int] = None, max_spill_size: int = 20 * 1024 * 1024 * 1024, min_commit_interval: Optional[Union[int, timedelta]] = None, writer_properties: Optional[WriterProperties] = None) -> Dict[str, Any]\n
Reorders the data using a Z-order curve to improve data skipping.
This also performs compaction, so the same parameters as compact() apply.
Parameters:
Name Type Description Default columns
Iterable[str]
the columns to use for Z-ordering. There must be at least one column. partition_filters: the partition filters that will be used for getting the matched files
required target_size
Optional[int]
desired file size after bin-packing files, in bytes. If not provided, will attempt to read the table configuration value delta.targetFileSize
. If that value isn't set, will use default value of 256MB.
None
max_concurrent_tasks
Optional[int]
the maximum number of concurrent tasks to use for file compaction. Defaults to number of CPUs. More concurrent tasks can make compaction faster, but will also use more memory.
None
max_spill_size
int
the maximum number of bytes to spill to disk. Defaults to 20GB.
20 * 1024 * 1024 * 1024
min_commit_interval
Optional[Union[int, timedelta]]
minimum interval in seconds or as timedeltas before a new commit is created. Interval is useful for long running executions. Set to 0 or timedelta(0), if you want a commit per partition.
None
writer_properties
Optional[WriterProperties]
Pass writer properties to the Rust parquet writer.
None
Returns:
Type Description Dict[str, Any]
the metrics from optimize
Example Use a timedelta object to specify the seconds, minutes or hours of the interval.
from deltalake import DeltaTable, write_deltalake\nfrom datetime import timedelta\nimport pyarrow as pa\n\nwrite_deltalake(\"tmp\", pa.table({\"x\": [1], \"y\": [4]}))\nwrite_deltalake(\"tmp\", pa.table({\"x\": [2], \"y\": [5]}), mode=\"append\")\n\ndt = DeltaTable(\"tmp\")\ntime_delta = timedelta(minutes=10)\ndt.optimize.z_order([\"x\"], min_commit_interval=time_delta)\n{'numFilesAdded': 1, 'numFilesRemoved': 2, 'filesAdded': ..., 'filesRemoved': ..., 'partitionsOptimized': 0, 'numBatches': 1, 'totalConsideredFiles': 2, 'totalFilesSkipped': 0, 'preserveInsertionOrder': True}\n
","boost":10},{"location":"api/delta_table/metadata/","title":"Metadata","text":"","boost":2},{"location":"api/delta_table/metadata/#deltalake.Metadata","title":"deltalake.Metadata dataclass
","text":"Metadata(table: RawDeltaTable)\n
Create a Metadata instance.
","boost":2},{"location":"api/delta_table/metadata/#deltalake.Metadata.configuration","title":"configuration property
","text":"configuration: Dict[str, str]\n
Return the DeltaTable properties.
","boost":2},{"location":"api/delta_table/metadata/#deltalake.Metadata.created_time","title":"created_time property
","text":"created_time: int\n
Return The time when this metadata action is created, in milliseconds since the Unix epoch of the DeltaTable.
","boost":2},{"location":"api/delta_table/metadata/#deltalake.Metadata.description","title":"description property
","text":"description: str\n
Return the user-provided description of the DeltaTable.
","boost":2},{"location":"api/delta_table/metadata/#deltalake.Metadata.id","title":"id property
","text":"id: int\n
Return the unique identifier of the DeltaTable.
","boost":2},{"location":"api/delta_table/metadata/#deltalake.Metadata.name","title":"name property
","text":"name: str\n
Return the user-provided identifier of the DeltaTable.
","boost":2},{"location":"api/delta_table/metadata/#deltalake.Metadata.partition_columns","title":"partition_columns property
","text":"partition_columns: List[str]\n
Return an array containing the names of the partitioned columns of the DeltaTable.
","boost":2},{"location":"how-delta-lake-works/architecture-of-delta-table/","title":"Architecture of a Delta Lake table","text":"A Delta table consists of Parquet files that contain data and a transaction log that stores metadata about the transactions.
Let's create a Delta table, perform some operations, and inspect the files that are created.
"},{"location":"how-delta-lake-works/architecture-of-delta-table/#delta-lake-transaction-examples","title":"Delta Lake transaction examples","text":"Start by creating a pandas DataFrame and writing it out to a Delta table.
import pandas as pd\nfrom deltalake import DeltaTable, write_deltalake\n\ndf = pd.DataFrame({\"num\": [1, 2, 3], \"letter\": [\"a\", \"b\", \"c\"]})\nwrite_deltalake(\"tmp/some-table\", df)\n
Now inspect the files created in storage:
tmp/some-table\n\u251c\u2500\u2500 0-62dffa23-bbe1-4496-8fb5-bff6724dc677-0.parquet\n\u2514\u2500\u2500 _delta_log\n \u2514\u2500\u2500 00000000000000000000.json\n
The Parquet file stores the data that was written. The _delta_log
directory stores metadata about the transactions. Let's inspect the _delta_log/00000000000000000000.json
file.
{\n \"protocol\": {\n \"minReaderVersion\": 1,\n \"minWriterVersion\": 1\n }\n}\n{\n \"metaData\": {\n \"id\": \"b96ea1a2-1830-4da2-8827-5334cc6104ed\",\n \"name\": null,\n \"description\": null,\n \"format\": {\n \"provider\": \"parquet\",\n \"options\": {}\n },\n \"schemaString\": \"{\\\"type\\\":\\\"struct\\\",\\\"fields\\\":[{\\\"name\\\":\\\"num\\\",\\\"type\\\":\\\"long\\\",\\\"nullable\\\":true,\\\"metadata\\\":{}},{\\\"name\\\":\\\"letter\\\",\\\"type\\\":\\\"string\\\",\\\"nullable\\\":true,\\\"metadata\\\":{}}]}\",\n \"partitionColumns\": [],\n \"createdTime\": 1701740315599,\n \"configuration\": {}\n }\n}\n{\n \"add\": {\n \"path\": \"0-62dffa23-bbe1-4496-8fb5-bff6724dc677-0.parquet\",\n \"size\": 2208,\n \"partitionValues\": {},\n \"modificationTime\": 1701740315597,\n \"dataChange\": true,\n \"stats\": \"{\\\"numRecords\\\": 3, \\\"minValues\\\": {\\\"num\\\": 1, \\\"letter\\\": \\\"a\\\"}, \\\"maxValues\\\": {\\\"num\\\": 3, \\\"letter\\\": \\\"c\\\"}, \\\"nullCount\\\": {\\\"num\\\": 0, \\\"letter\\\": 0}}\"\n }\n}\n{\n \"commitInfo\": {\n \"timestamp\": 1701740315602,\n \"operation\": \"CREATE TABLE\",\n \"operationParameters\": {\n \"location\": \"file:///Users/matthew.powers/Documents/code/delta/delta-examples/notebooks/python-deltalake/tmp/some-table\",\n \"metadata\": \"{\\\"configuration\\\":{},\\\"created_time\\\":1701740315599,\\\"description\\\":null,\\\"format\\\":{\\\"options\\\":{},\\\"provider\\\":\\\"parquet\\\"},\\\"id\\\":\\\"b96ea1a2-1830-4da2-8827-5334cc6104ed\\\",\\\"name\\\":null,\\\"partition_columns\\\":[],\\\"schema\\\":{\\\"fields\\\":[{\\\"metadata\\\":{},\\\"name\\\":\\\"num\\\",\\\"nullable\\\":true,\\\"type\\\":\\\"long\\\"},{\\\"metadata\\\":{},\\\"name\\\":\\\"letter\\\",\\\"nullable\\\":true,\\\"type\\\":\\\"string\\\"}],\\\"type\\\":\\\"struct\\\"}}\",\n \"protocol\": \"{\\\"minReaderVersion\\\":1,\\\"minWriterVersion\\\":1}\",\n \"mode\": \"ErrorIfExists\"\n },\n \"clientVersion\": \"delta-rs.0.17.0\"\n }\n}\n
The tranasction log file contains the following information:
- the files added to the Delta table
- schema of the files
- column level metadata including the min/max value for each file
Create another pandas DataFrame and append it to the Delta table to see how this transaction is recorded.
df = pd.DataFrame({\"num\": [8, 9], \"letter\": [\"dd\", \"ee\"]})\nwrite_deltalake(f\"{cwd}/tmp/delta-table\", df, mode=\"append\")\n
Here are the files in storage:
tmp/some-table\n\u251c\u2500\u2500 0-62dffa23-bbe1-4496-8fb5-bff6724dc677-0.parquet\n\u251c\u2500\u2500 1-57abb6fb-2249-43ba-a7be-cf09bcc230de-0.parquet\n\u2514\u2500\u2500 _delta_log\n \u251c\u2500\u2500 00000000000000000000.json\n \u2514\u2500\u2500 00000000000000000001.json\n
Here are the contents of the _delta_log/00000000000000000001.json
file:
{\n \"add\": {\n \"path\": \"1-57abb6fb-2249-43ba-a7be-cf09bcc230de-0.parquet\",\n \"size\": 2204,\n \"partitionValues\": {},\n \"modificationTime\": 1701740386169,\n \"dataChange\": true,\n \"stats\": \"{\\\"numRecords\\\": 2, \\\"minValues\\\": {\\\"num\\\": 8, \\\"letter\\\": \\\"dd\\\"}, \\\"maxValues\\\": {\\\"num\\\": 9, \\\"letter\\\": \\\"ee\\\"}, \\\"nullCount\\\": {\\\"num\\\": 0, \\\"letter\\\": 0}}\"\n }\n}\n{\n \"commitInfo\": {\n \"timestamp\": 1701740386169,\n \"operation\": \"WRITE\",\n \"operationParameters\": {\n \"partitionBy\": \"[]\",\n \"mode\": \"Append\"\n },\n \"clientVersion\": \"delta-rs.0.17.0\"\n }\n}\n
The transaction log records that the second file has been persisted in the Delta table.
Now create a third pandas DataFrame and overwrite the Delta table with the new data.
df = pd.DataFrame({\"num\": [11, 22], \"letter\": [\"aa\", \"bb\"]})\nwrite_deltalake(f\"{cwd}/tmp/delta-table\", df, mode=\"append\")\n
Here are the files in storage:
tmp/some-table\n\u251c\u2500\u2500 0-62dffa23-bbe1-4496-8fb5-bff6724dc677-0.parquet\n\u251c\u2500\u2500 1-57abb6fb-2249-43ba-a7be-cf09bcc230de-0.parquet\n\u251c\u2500\u2500 2-95ef2108-480c-4b89-96f0-ff9185dab9ad-0.parquet\n\u2514\u2500\u2500 _delta_log\n \u251c\u2500\u2500 00000000000000000000.json\n \u251c\u2500\u2500 00000000000000000001.json\n \u2514\u2500\u2500 00000000000000000002.json\n
Here are the contents of the _delta_log/0002.json
file:
{\n \"add\": {\n \"path\": \"2-95ef2108-480c-4b89-96f0-ff9185dab9ad-0.parquet\",\n \"size\": 2204,\n \"partitionValues\": {},\n \"modificationTime\": 1701740465102,\n \"dataChange\": true,\n \"stats\": \"{\\\"numRecords\\\": 2, \\\"minValues\\\": {\\\"num\\\": 11, \\\"letter\\\": \\\"aa\\\"}, \\\"maxValues\\\": {\\\"num\\\": 22, \\\"letter\\\": \\\"bb\\\"}, \\\"nullCount\\\": {\\\"num\\\": 0, \\\"letter\\\": 0}}\"\n }\n}\n{\n \"remove\": {\n \"path\": \"0-62dffa23-bbe1-4496-8fb5-bff6724dc677-0.parquet\",\n \"deletionTimestamp\": 1701740465102,\n \"dataChange\": true,\n \"extendedFileMetadata\": false,\n \"partitionValues\": {},\n \"size\": 2208\n }\n}\n{\n \"remove\": {\n \"path\": \"1-57abb6fb-2249-43ba-a7be-cf09bcc230de-0.parquet\",\n \"deletionTimestamp\": 1701740465102,\n \"dataChange\": true,\n \"extendedFileMetadata\": false,\n \"partitionValues\": {},\n \"size\": 2204\n }\n}\n{\n \"commitInfo\": {\n \"timestamp\": 1701740465102,\n \"operation\": \"WRITE\",\n \"operationParameters\": {\n \"mode\": \"Overwrite\",\n \"partitionBy\": \"[]\"\n },\n \"clientVersion\": \"delta-rs.0.17.0\"\n }\n}\n
This transaction adds a data file and marks the two exising data files for removal. Marking a file for removal in the transaction log is known as \"tombstoning the file\" or a \"logical delete\". This is different from a \"physical delete\" which actually removes the data file from storage.
"},{"location":"how-delta-lake-works/architecture-of-delta-table/#how-delta-table-operations-differ-from-data-lakes","title":"How Delta table operations differ from data lakes","text":"Data lakes consist of data files persisted in storage. They don't have a transaction log that retain metadata about the transactions.
Data lakes perform transactions differently than Delta tables.
When you perform an overwrite tranasction with a Delta table, you logically delete the exiting data without physically removing it.
Data lakes don't support logical deletes, so you have to physically delete the data from storage.
Logical data operations are safer because they can be rolled back if they don't complete successfully. Physically removing data from storage can be dangerous, especially if it's before a transaction is complete.
We're now ready to look into Delta Lake ACID transactions in more detail.
"},{"location":"integrations/delta-lake-arrow/","title":"Delta Lake Arrow Integrations","text":"Delta Lake tables can be exposed as Arrow tables and Arrow datasets, which allows for interoperability with a variety of query engines.
This page shows you how to convert Delta tables to Arrow data structures and teaches you the difference between Arrow tables and Arrow datasets. Tables are \"eager\" and datasets are \"lazy\", which has important performance implications, keep reading to learn more!
"},{"location":"integrations/delta-lake-arrow/#delta-lake-to-arrow-dataset","title":"Delta Lake to Arrow Dataset","text":"Delta tables can easily be exposed as Arrow datasets. This makes it easy for any query engine that can read Arrow datasets to read a Delta table.
Let's take a look at the h2o groupby dataset that contains 9 columns of data. Here are three representative rows of data:
+-------+-------+--------------+-------+-------+--------+------+------+---------+\n| id1 | id2 | id3 | id4 | id5 | id6 | v1 | v2 | v3 |\n|-------+-------+--------------+-------+-------+--------+------+------+---------|\n| id016 | id046 | id0000109363 | 88 | 13 | 146094 | 4 | 6 | 18.8377 |\n| id039 | id087 | id0000466766 | 14 | 30 | 111330 | 4 | 14 | 46.7973 |\n| id047 | id098 | id0000307804 | 85 | 23 | 187639 | 3 | 5 | 47.5773 |\n+-------+-------+--------------+-------+-------+--------+------+------+---------+\n
Here's how to expose the Delta table as a PyArrow dataset and run a query with DuckDB:
import duckdb\nfrom deltalake import DeltaTable\n\ntable = DeltaTable(\"delta/G1_1e9_1e2_0_0\")\ndataset = table.to_pyarrow_dataset()\nquack = duckdb.arrow(dataset)\nquack.filter(\"id1 = 'id016' and v2 > 10\")\n
Here's the result:
\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 id1 \u2502 id2 \u2502 id3 \u2502 id4 \u2502 id5 \u2502 id6 \u2502 v1 \u2502 v2 \u2502 v3 \u2502\n\u2502 varchar \u2502 varchar \u2502 varchar \u2502 int32 \u2502 int32 \u2502 int32 \u2502 int32 \u2502 int32 \u2502 double \u2502\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 id016 \u2502 id054 \u2502 id0002309114 \u2502 62 \u2502 95 \u2502 7180859 \u2502 4 \u2502 13 \u2502 7.750173 \u2502\n\u2502 id016 \u2502 id044 \u2502 id0003968533 \u2502 63 \u2502 98 \u2502 2356363 \u2502 4 \u2502 14 \u2502 3.942417 \u2502\n\u2502 id016 \u2502 id034 \u2502 id0001082839 \u2502 58 \u2502 73 \u2502 8039808 \u2502 5 \u2502 12 \u2502 76.820135 \u2502\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 ? rows (>9999 rows, 3 shown) 9 columns \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
Arrow datasets allow for the predicates to get pushed down to the query engine, so the query is executed quickly.
"},{"location":"integrations/delta-lake-arrow/#delta-lake-to-arrow-table","title":"Delta Lake to Arrow Table","text":"You can also run the same query with DuckDB on an Arrow table:
quack = duckdb.arrow(table.to_pyarrow_table())\nquack.filter(\"id1 = 'id016' and v2 > 10\")\n
This returns the same result, but it runs slower.
"},{"location":"integrations/delta-lake-arrow/#difference-between-arrow-dataset-and-arrow-table","title":"Difference between Arrow Dataset and Arrow Table","text":"Arrow Datasets are lazy and allow for full predicate pushdown unlike Arrow tables which are eagerly loaded into memory.
The previous DuckDB queries were run on a 1 billion row dataset that's roughly 50 GB when stored as an uncompressed CSV file. Here are the runtimes when the data is stored in a Delta table and the queries are executed on a 2021 Macbook M1 with 64 GB of RAM:
- Arrow table: 17.1 seconds
- Arrow dataset: 0.01 seconds
The query runs much faster on an Arrow dataset because the predicates can be pushed down to the query engine and lots of data can be skipped.
Arrow tables are eagerly materialized in memory and don't allow for the same amount of data skipping.
"},{"location":"integrations/delta-lake-arrow/#multiple-query-engines-can-query-arrow-datasets","title":"Multiple query engines can query Arrow Datasets","text":"Other query engines like DataFusion can also query Arrow datasets, see the following example:
from datafusion import SessionContext\n\nctx = SessionContext()\nctx.register_dataset(\"my_dataset\", table.to_pyarrow_dataset())\nctx.sql(\"select * from my_dataset where v2 > 5\")\n
Here's the result:
+-------+-------+--------------+-----+-----+--------+----+----+-----------+\n| id1 | id2 | id3 | id4 | id5 | id6 | v1 | v2 | v3 |\n+-------+-------+--------------+-----+-----+--------+----+----+-----------+\n| id082 | id049 | id0000022715 | 97 | 55 | 756924 | 2 | 11 | 74.161136 |\n| id053 | id052 | id0000113549 | 19 | 56 | 139048 | 1 | 10 | 95.178444 |\n| id090 | id043 | id0000637409 | 94 | 50 | 12448 | 3 | 12 | 60.21896 |\n+-------+-------+--------------+-----+-----+--------+----+----+-----------+\n
Any query engine that's capable of reading an Arrow table/dataset can read a Delta table.
"},{"location":"integrations/delta-lake-arrow/#conclusion","title":"Conclusion","text":"Delta tables can easily be exposed as Arrow tables/datasets.
Therefore any query engine that can read an Arrow table/dataset can also read a Delta table.
Arrow datasets allow for more predicates to be pushed down to the query engine, so they can perform better performance than Arrow tables.
"},{"location":"integrations/delta-lake-datafusion/","title":"Using Delta Lake with DataFusion","text":"This page explains how to use Delta Lake with DataFusion.
Delta Lake offers DataFusion users better performance and more features compared to other formats like CSV or Parquet.
Delta Lake works well with the DataFusion Rust API and the DataFusion Python API. It's a great option for all DataFusion users.
Delta Lake also depends on DataFusion to implement SQL-related functionality under the hood. We will also discuss this dependency at the end of this guide in case you're interested in learning more about the symbiotic relationship between the two libraries.
"},{"location":"integrations/delta-lake-datafusion/#delta-lake-performance-benefits-for-datafusion-users","title":"Delta Lake performance benefits for DataFusion users","text":"Let's run some DataFusion queries on a Parquet file and a Delta table with the same data to learn more about the performance benefits of Delta Lake.
Suppose you have the following dataset with 1 billion rows and 9 columns. Here are the first three rows of data:
+-------+-------+--------------+-------+-------+--------+------+------+---------+\n| id1 | id2 | id3 | id4 | id5 | id6 | v1 | v2 | v3 |\n|-------+-------+--------------+-------+-------+--------+------+------+---------|\n| id016 | id046 | id0000109363 | 88 | 13 | 146094 | 4 | 6 | 18.8377 |\n| id039 | id087 | id0000466766 | 14 | 30 | 111330 | 4 | 14 | 46.7973 |\n| id047 | id098 | id0000307804 | 85 | 23 | 187639 | 3 | 5 | 47.5773 |\n+-------+-------+--------------+-------+-------+--------+------+------+---------+\n
Here's how to register a Delta Lake table as a PyArrow dataset:
from datafusion import SessionContext\nfrom deltalake import DeltaTable\n\nctx = SessionContext()\ntable = DeltaTable(\"G1_1e9_1e2_0_0\")\nctx.register_dataset(\"my_delta_table\", table.to_pyarrow_dataset())\n
Now query the table:
ctx.sql(\"select id1, sum(v1) as v1 from my_delta_table where id1='id096' group by id1\")\n
That query takes 2.8 seconds to execute.
Let's register the same dataset as a Parquet table, run the same query, and compare the runtime difference.
Register the Parquet table and run the query:
path = \"G1_1e9_1e2_0_0.parquet\"\nctx.register_parquet(\"my_parquet_table\", path)\nctx.sql(\"select id1, sum(v1) as v1 from my_parquet_table where id1='id096' group by id1\")\n
This query takes 5.3 seconds to run.
Parquet stores data in row groups and DataFusion can intelligently skip row groups that don't contain relevant data, so the query is faster than a file format like CSV which doesn't support row group skipping.
Delta Lake stores file-level metadata information in the transaction log, so it can skip entire files when queries are executed. Delta Lake can skip entire files and then skip row groups within the individual files. This makes Delta Lake even faster than Parquet files, especially for larger datasets spread across many files.
"},{"location":"integrations/delta-lake-datafusion/#delta-lake-features-for-datafusion-users","title":"Delta Lake features for DataFusion users","text":"Delta Lake also provides other features that are useful for DataFusion users like ACID transactions, concurrency protection, time travel, versioned data, and more.
"},{"location":"integrations/delta-lake-datafusion/#why-delta-lake-depends-on-datafusion","title":"Why Delta Lake depends on DataFusion","text":"Delta Lake depends on DataFusion to provide some end-user features.
DataFusion is useful in providing SQL-related Delta Lake features. Some examples:
- Update and merge are written in terms of SQL expressions.
- Invariants and constraints are written in terms of SQL expressions.
Anytime we have to evaluate SQL, we need some sort of SQL engine. We use DataFusion for that.
"},{"location":"integrations/delta-lake-datafusion/#conclusion","title":"Conclusion","text":"Delta Lake is a great file format for DataFusion users.
Delta Lake also uses DataFusion to provide some end-user features.
DataFusion and Delta Lake have a wonderful symbiotic relationship and play very nicely with each other.
See this guide for more information on Delta Lake and PyArrow and why PyArrow Datasets are often a better option than PyArrow tables.
"},{"location":"integrations/delta-lake-pandas/","title":"Using Delta Lake with pandas","text":"Delta Lake is a great storage system for pandas analyses. This page shows how it's easy to use Delta Lake with pandas, the unique features Delta Lake offers pandas users, and how Delta Lake can make your pandas analyses run faster.
Delta Lake is very easy to install for pandas analyses, just run pip install deltalake
.
Delta Lake allows for performance optimizations, so pandas queries can run much faster than the query run on data stored in CSV or Parquet. See the following chart for the query runtime for the a Delta tables compared with CSV/Parquet.
Z Ordered Delta tables run this query much faster than when the data is stored in Parquet or CSV. Let's dive in deeper and see how Delta Lake makes pandas faster.
"},{"location":"integrations/delta-lake-pandas/#delta-lake-makes-pandas-queries-run-faster","title":"Delta Lake makes pandas queries run faster","text":"There are a few reasons Delta Lake can make pandas queries run faster:
- column pruning: only grabbing the columns relevant for a query
- file skipping: only reading files with data for the query
- row group skipping: only reading row groups with data for the query
- Z ordering data: colocating similar data in the same files, so file skipping is more effective
Reading less data (fewer columns and/or fewer rows) is how Delta Lake makes pandas queries run faster.
Parquet allows for column pruning and row group skipping, but doesn't support file-level skipping or Z Ordering. CSV doesn't support any of these performance optimizations.
Let's take a look at a sample dataset and run a query to see the performance enhancements offered by Delta Lake.
Suppose you have a 1 billion row dataset with 9 columns, here are the first three rows of the dataset:
+-------+-------+--------------+-------+-------+--------+------+------+---------+\n| id1 | id2 | id3 | id4 | id5 | id6 | v1 | v2 | v3 |\n|-------+-------+--------------+-------+-------+--------+------+------+---------|\n| id016 | id046 | id0000109363 | 88 | 13 | 146094 | 4 | 6 | 18.8377 |\n| id039 | id087 | id0000466766 | 14 | 30 | 111330 | 4 | 14 | 46.7973 |\n| id047 | id098 | id0000307804 | 85 | 23 | 187639 | 3 | 5 | 47.5773 |\n+-------+-------+--------------+-------+-------+--------+------+------+---------+\n
The dataset is roughly 50 GB when stored as an uncompressed CSV files. Let's run some queries on a 2021 Macbook M1 with 64 GB of RAM.
Start by running the query on an uncompressed CSV file:
(\n pd.read_csv(f\"{Path.home()}/data/G1_1e9_1e2_0_0.csv\", usecols=[\"id1\", \"id2\", \"v1\"])\n .query(\"id1 == 'id016'\")\n .groupby(\"id2\")\n .agg({\"v1\": \"sum\"})\n)\n
This query takes 234 seconds to execute. It runs out of memory if the usecols
parameter is not set.
Now let's convert the CSV dataset to Parquet and run the same query on the data stored in a Parquet file.
(\n pd.read_parquet(\n f\"{Path.home()}/data/G1_1e9_1e2_0_0.parquet\", columns=[\"id1\", \"id2\", \"v1\"]\n )\n .query(\"id1 == 'id016'\")\n .groupby(\"id2\")\n .agg({\"v1\": \"sum\"})\n)\n
This query takes 118 seconds to execute.
Parquet stores data in row groups and allows for skipping when the filters
predicates are set. Run the Parquet query again with row group skipping enabled:
(\n pd.read_parquet(\n f\"{Path.home()}/data/G1_1e9_1e2_0_0.parquet\",\n columns=[\"id1\", \"id2\", \"v1\"],\n filters=[(\"id1\", \"==\", \"id016\")],\n )\n .query(\"id1 == 'id016'\")\n .groupby(\"id2\")\n .agg({\"v1\": \"sum\"})\n)\n
This query runs in 19 seconds. Lots of row groups can be skipped for this particular query.
Now let's run the same query on a Delta table to see the out-of-the box performance:
(\n DeltaTable(f\"{Path.home()}/data/deltalake_baseline_G1_1e9_1e2_0_0\", version=0)\n .to_pandas(filters=[(\"id1\", \"==\", \"id016\")], columns=[\"id1\", \"id2\", \"v1\"])\n .query(\"id1 == 'id016'\")\n .groupby(\"id2\")\n .agg({\"v1\": \"sum\"})\n)\n
This query runs in 8 seconds, which is a significant performance enhancement.
Now let's Z Order the Delta table by id1
which will make the data skipping even better. Run the query again on the Z Ordered Delta table:
(\n DeltaTable(f\"{Path.home()}/data/deltalake_baseline_G1_1e9_1e2_0_0\", version=1)\n .to_pandas(filters=[(\"id1\", \"==\", \"id016\")], columns=[\"id1\", \"id2\", \"v1\"])\n .query(\"id1 == 'id016'\")\n .groupby(\"id2\")\n .agg({\"v1\": \"sum\"})\n)\n
The query now executes in 2.4 seconds.
Delta tables can make certain pandas queries run much faster.
"},{"location":"integrations/delta-lake-pandas/#delta-lake-lets-pandas-users-time-travel","title":"Delta Lake lets pandas users time travel","text":"Start by creating a Delta table:
from deltalake import write_deltalake, DeltaTable\n\ndf = pd.DataFrame({\"num\": [1, 2, 3], \"letter\": [\"a\", \"b\", \"c\"]})\nwrite_deltalake(\"tmp/some-table\", df)\n
Here are the contents of the Delta table (version 0 of the Delta table):
+-------+----------+\n| num | letter |\n|-------+----------|\n| 1 | a |\n| 2 | b |\n| 3 | c |\n+-------+----------+\n
Now append two rows to the Delta table:
df = pd.DataFrame({\"num\": [8, 9], \"letter\": [\"dd\", \"ee\"]})\nwrite_deltalake(\"tmp/some-table\", df, mode=\"append\")\n
Here are the contents after the append operation (version 1 of the Delta table):
+-------+----------+\n| num | letter |\n|-------+----------|\n| 1 | a |\n| 2 | b |\n| 3 | c |\n| 8 | dd |\n| 9 | ee |\n+-------+----------+\n
Now perform an overwrite transaction:
df = pd.DataFrame({\"num\": [11, 22], \"letter\": [\"aa\", \"bb\"]})\nwrite_deltalake(\"tmp/some-table\", df, mode=\"overwrite\")\n
Here are the contents after the overwrite operation (version 2 of the Delta table):
+-------+----------+\n| num | letter |\n|-------+----------|\n| 8 | dd |\n| 9 | ee |\n+-------+----------+\n
Read in the Delta table and it will grab the latest version by default:
DeltaTable(\"tmp/some-table\").to_pandas()\n\n+-------+----------+\n| num | letter |\n|-------+----------|\n| 11 | aa |\n| 22 | bb |\n+-------+----------+\n
You can easily time travel back to version 0 of the Delta table:
DeltaTable(\"tmp/some-table\", version=0).to_pandas()\n\n+-------+----------+\n| num | letter |\n|-------+----------|\n| 1 | a |\n| 2 | b |\n| 3 | c |\n+-------+----------+\n
You can also time travel to version 1 of the Delta table:
DeltaTable(\"tmp/some-table\", version=1).to_pandas()\n\n+-------+----------+\n| num | letter |\n|-------+----------|\n| 1 | a |\n| 2 | b |\n| 3 | c |\n| 8 | dd |\n| 9 | ee |\n+-------+----------+\n
Time travel is a powerful feature that pandas users cannot access with CSV or Parquet.
"},{"location":"integrations/delta-lake-pandas/#schema-enforcement","title":"Schema enforcement","text":"Delta tables only allow you to append DataFrame with matching schema by default. Suppose you have a DataFrame with num
and animal
columns, which is different from the Delta table that has columns with num
and letter
columns.
Try to append this DataFrame with a mismatched schema to the existing table:
df = pd.DataFrame({\"num\": [5, 6], \"animal\": [\"cat\", \"dog\"]})\nwrite_deltalake(\"tmp/some-table\", df)\n
This transaction will be rejected and will return the following error message:
ValueError: Schema of data does not match table schema\nData schema:\nnum: int64\nanimal: string\n-- schema metadata --\npandas: '{\"index_columns\": [{\"kind\": \"range\", \"name\": null, \"start\": 0, \"' + 474\nTable Schema:\nnum: int64\nletter: string\n
Schema enforcement protects your table from getting corrupted by appending data with mismatched schema. Parquet and CSV don't offer schema enforcement for pandas users.
"},{"location":"integrations/delta-lake-pandas/#overwriting-schema-of-table","title":"Overwriting schema of table","text":"You can overwrite the table contents and schema by setting the overwrite_schema
option. Here's how to overwrite the table contents:
write_deltalake(\"tmp/some-table\", df, mode=\"overwrite\", overwrite_schema=True)\n
Here are the contents of the table after the values and schema have been overwritten:
+-------+----------+\n| num | animal |\n|-------+----------|\n| 5 | cat |\n| 6 | dog |\n+-------+----------+\n
"},{"location":"integrations/delta-lake-pandas/#in-memory-vs-in-storage-data-changes","title":"In-memory vs. in-storage data changes","text":"It's important to distinguish between data stored in-memory and data stored on disk when understanding the functionality offered by Delta Lake.
pandas loads data from storage (CSV, Parquet, or Delta Lake) into in-memory DataFrames.
pandas makes it easy to modify the data in memory, say update a column value. It's not easy to update a column value in storage systems like CSV or Parquet using pandas.
Delta Lake makes it easy for pandas users to update data in storage.
"},{"location":"integrations/delta-lake-pandas/#why-delta-lake-allows-for-faster-queries","title":"Why Delta Lake allows for faster queries","text":"Delta tables store data in many files and metadata about the files in the transaction log. Delta Lake allows for certain queries to skip entire files, which makes pandas queries run much faster.
"},{"location":"integrations/delta-lake-pandas/#more-resources","title":"More resources","text":"See this talk on why Delta Lake is the best file format for pandas analyses to learn more:
"},{"location":"integrations/delta-lake-pandas/#conclusion","title":"Conclusion","text":"Delta Lake provides many features that make it an excellent format for pandas analyses:
- performance optimizations make pandas queries run faster
- data management features make pandas analyses more reliable
- advanced features allow you to perform more complex pandas analyses
Python deltalake offers pandas users a better experience compared with CSV/Parquet.
"},{"location":"integrations/delta-lake-polars/","title":"Using Delta Lake with polars","text":"This page explains why Delta Lake is a great storage system for Polars analyses.
You will learn how to create Delta tables with Polars, how to query Delta tables with Polars, and the unique advantages Delta Lake offers the Polars community.
Here are some amazing benefits that Delta Lake provides Polars users:
- time travel
- ACID transactions for reliable writes
- better performance with file skipping
- enhanced file skipping via Z Ordering
- ability to rollback mistakes
- and many, many more
Let's start by showing how to use Polars with Delta Lake, explore how Delta Lake can make Polars queries run faster, and then look at all the cool features Delta Lake offers Polars users.
"},{"location":"integrations/delta-lake-polars/#creating-a-delta-lake-table-with-polars","title":"Creating a Delta Lake table with Polars","text":"Create a Polars DataFrame and write it out to a Delta table:
import polars as pl\n\ndf = pl.DataFrame({\"x\": [1, 2, 3]})\ndf.write_delta(\"tmp/bear_delta_lake\")\n
Inspect the contents of the Delta table:
print(pl.read_delta(\"tmp/bear_delta_lake\"))\n\n+-----+\n| x |\n| --- |\n| i64 |\n+=====+\n| 1 |\n| 2 |\n| 3 |\n+-----+\n
Now create another Polars DataFrame and append it to the existing Delta table:
df2 = pl.DataFrame({\"x\": [8, 9, 10]})\ndf2.write_delta(\"tmp/bear_delta_lake\", mode=\"append\")\n
Re-inspect the contents of the Delta table:
print(pl.read_delta(\"tmp/bear_delta_lake\"))\n\n+-----+\n| x |\n| --- |\n| i64 |\n+=====+\n| 1 |\n| 2 |\n| 3 |\n| 8 |\n| 9 |\n| 10 |\n+-----+\n
Now overwrite the existing Delta table:
df3 = pl.DataFrame({\"x\": [55, 66, 77]})\ndf3.write_delta(\"tmp/bear_delta_lake\", mode=\"overwrite\")\n
Inspect the Delta table:
print(pl.read_delta(\"tmp/bear_delta_lake\"))\n\n+-----+\n| x |\n| --- |\n| i64 |\n+=====+\n| 55 |\n| 66 |\n| 77 |\n+-----+\n
The Delta table now has three versions, as shown in the following diagram:
"},{"location":"integrations/delta-lake-polars/#time-travel-with-delta-lake-for-polars","title":"Time travel with Delta Lake for Polars","text":"Time travel back to version 0 of the Delta table:
print(pl.read_delta(\"tmp/bear_delta_lake\", version=0))\n\n+-----+\n| x |\n| --- |\n| i64 |\n+=====+\n| 1 |\n| 2 |\n| 3 |\n+-----+\n
Time travel back to version 1 of the Delta table:
print(pl.read_delta(\"tmp/bear_delta_lake\", version=1))\n\n+-----+\n| x |\n| --- |\n| i64 |\n+=====+\n| 1 |\n| 2 |\n| 3 |\n| 9 |\n| 8 |\n| 10 |\n+-----+\n
Read the Delta table wihout specifying a version and see how it reads the latest version by default:
print(pl.read_delta(\"tmp/bear_delta_lake\"))\n\n+-----+\n| x |\n| --- |\n| i64 |\n+=====+\n| 55 |\n| 66 |\n| 77 |\n+-----+\n
Let's dive into how to read Delta tables with Polars in more detail and compare the query runtime performance on larger datasets.
"},{"location":"integrations/delta-lake-polars/#reading-a-delta-lake-table-with-polars","title":"Reading a Delta Lake table with Polars","text":"Let's look at the h2o groupby dataset that has 1 billion rows and 9 columns. Here are the first three rows of the dataset:
+-------+-------+--------------+-------+-------+--------+------+------+---------+\n| id1 | id2 | id3 | id4 | id5 | id6 | v1 | v2 | v3 |\n|-------+-------+--------------+-------+-------+--------+------+------+---------|\n| id016 | id046 | id0000109363 | 88 | 13 | 146094 | 4 | 6 | 18.8377 |\n| id039 | id087 | id0000466766 | 14 | 30 | 111330 | 4 | 14 | 46.7973 |\n| id047 | id098 | id0000307804 | 85 | 23 | 187639 | 3 | 5 | 47.5773 |\n+-------+-------+--------------+-------+-------+--------+------+------+---------+\n
This dataset is 50GB when stored in an uncompressed CSV file. Let's run some queries on this dataset when it's stored in different file formats with Polars.
This section will show the runtime for a query when the data is stored in CSV, Parquet, and Delta Lake and explain why Delta tables are the fastest.
Start by running a query on an uncompressed CSV file with read_csv
:
pl.read_csv(\"~/data/G1_1e9_1e2_0_0.csv\").filter(pl.col(\"id1\") < \"id016\").group_by(\n [\"id1\", \"id2\"]\n).agg(pl.sum(\"v1\").alias(\"v1_sum\")).collect()\n
This query errors out after running for several minutes. The machine runs out of memory. Let's try it again with scan_csv
.
pl.scan_csv(\"~/data/G1_1e9_1e2_0_0.csv\").filter(pl.col(\"id1\") < \"id016\").group_by(\n [\"id1\", \"id2\"]\n).agg(pl.sum(\"v1\").alias(\"v1_sum\")).collect()\n
This query runs in 56.2 seconds.
Now let's run the same query when the data is stored in a Parquet file:
pl.scan_parquet(\"~/data/G1_1e9_1e2_0_0.parquet\").filter(\n pl.col(\"id1\") < \"id016\"\n).group_by([\"id1\", \"id2\"]).agg(pl.sum(\"v1\").alias(\"v1_sum\")).collect()\n
This query runs in 8.3 seconds. It's much faster because Polars is optimized to skip row groups in Parquet files that don't contain data that's relevant for the query.
Then run the query on newly created Delta table:
pl.scan_delta(\"~/data/deltalake/G1_1e9_1e2_0_0\", version=1).filter(\n pl.col(\"id1\") < \"id016\"\n).group_by([\"id1\", \"id2\"]).agg(pl.sum(\"v1\").alias(\"v1_sum\")).collect()\n
This query runs in 7.2 seconds. Polars can run this query faster because it can inspect the Delta transaction log and skip entire files that don't contain relevant data before performing the ordinary Parquet row group skipping.
Finally run the query on the Delta table after it has been Z Ordered by id1
:
pl.scan_delta(\"~/data/deltalake/G1_1e9_1e2_0_0\", version=2).filter(\n pl.col(\"id1\") < \"id016\"\n).group_by([\"id1\", \"id2\"]).agg(pl.sum(\"v1\").alias(\"v1_sum\")).collect()\n
This query runs in 3.5 seconds. The query on the Z Ordered Delta table is even faster because similar data has been co-located in the same files. This allows for even greater data skipping.
Polars can leverage file skipping to query Delta tables very quickly.
"},{"location":"integrations/delta-lake-polars/#why-polars-is-fast-with-delta-lake","title":"Why Polars is fast with Delta Lake","text":"Delta tables consist of metadata in a transaction log and data stored in Parquet files.
When Polars queries a Delta table, it starts by consulting the transaction log to understand the metadata of each file in the Delta table. This allows for Polars to quickly identify which files should be skipped by the query.
CSV files don't contain any such metadata, so file skipping isn't an option. Polars can skip Parquet files based on metadata, but it needs to open up each file and read the metadata, which is slower that grabbing the file-level metadata directly from the transaction log.
Parquet doesn't allow users to easily Z Order the data and colocate similar data in the same row groups. The Z Order optimizations are only supported in Delta tables.
Delta Lake offers Polars users with unique performance optimizations.
"},{"location":"integrations/delta-lake-polars/#other-delta-lake-features-relevant-for-polars-users","title":"Other Delta Lake features relevant for Polars users","text":" - ACID transactions for reliable writes
- better performance with file skipping
- enhanced file skipping via Z Ordering
- ability to rollback mistakes
"},{"location":"integrations/delta-lake-polars/#conclusion","title":"Conclusion","text":"This guide shows how Delta Lake is a great storage format for Polars analyses.
Delta Lake is easy to use, fast, and full of features that are great for Polars users.
"},{"location":"usage/","title":"Usage","text":"A DeltaTable represents the state of a delta table at a particular version. This includes which files are currently part of the table, the schema of the table, and other metadata such as creation time.
Python Rust DeltaTable
from deltalake import DeltaTable\n\ndt = DeltaTable(\"../rust/tests/data/delta-0.2.0\")\nprint(f\"Version: {dt.version()}\")\nprint(f\"Files: {dt.files()}\")\n
DeltaTable
let table = deltalake::open_table(\"../rust/tests/data/simple_table\").await.unwrap();\nprintln!(\"Version: {}\", table.version());\nprintln!(\"Files: {}\", table.get_files());\n
"},{"location":"usage/appending-overwriting-delta-lake-table/","title":"Appending to and overwriting a Delta Lake table","text":"This section explains how to append to an exising Delta table and how to overwrite a Delta table.
"},{"location":"usage/appending-overwriting-delta-lake-table/#delta-lake-append-transactions","title":"Delta Lake append transactions","text":"Suppose you have a Delta table with the following contents:
+-------+----------+\n| num | letter |\n|-------+----------|\n| 1 | a |\n| 2 | b |\n| 3 | c |\n+-------+----------+\n
Append two additional rows of data to the table:
from deltalake import write_deltalake, DeltaTable\n\ndf = pd.DataFrame({\"num\": [8, 9], \"letter\": [\"dd\", \"ee\"]})\nwrite_deltalake(\"tmp/some-table\", df, mode=\"append\")\n
Here are the updated contents of the Delta table:
+-------+----------+\n| num | letter |\n|-------+----------|\n| 1 | a |\n| 2 | b |\n| 3 | c |\n| 8 | dd |\n| 9 | ee |\n+-------+----------+\n
Now let's see how to perform an overwrite transaction.
"},{"location":"usage/appending-overwriting-delta-lake-table/#delta-lake-overwrite-transactions","title":"Delta Lake overwrite transactions","text":"Now let's see how to overwrite the exisitng Delta table.
df = pd.DataFrame({\"num\": [11, 22], \"letter\": [\"aa\", \"bb\"]})\nwrite_deltalake(\"tmp/some-table\", df, mode=\"overwrite\")\n
Here are the contents of the Delta table after the overwrite operation:
+-------+----------+\n| num | letter |\n|-------+----------|\n| 11 | aa |\n| 22 | bb |\n+-------+----------+\n
Overwriting just performs a logical delete. It doesn't physically remove the previous data from storage. Time travel back to the previous version to confirm that the old version of the table is still accessable.
dt = DeltaTable(\"tmp/some-table\", version=1)\n\n+-------+----------+\n| num | letter |\n|-------+----------|\n| 1 | a |\n| 2 | b |\n| 3 | c |\n| 8 | dd |\n| 9 | ee |\n+-------+----------+\n
"},{"location":"usage/create-delta-lake-table/","title":"Creating a Delta Lake Table","text":"This section explains how to create a Delta Lake table.
You can easily write a DataFrame to a Delta table.
from deltalake import write_deltalake\nimport pandas as pd\n\ndf = pd.DataFrame({\"num\": [1, 2, 3], \"letter\": [\"a\", \"b\", \"c\"]})\nwrite_deltalake(\"tmp/some-table\", df)\n
Here are the contents of the Delta table in storage:
+-------+----------+\n| num | letter |\n|-------+----------|\n| 1 | a |\n| 2 | b |\n| 3 | c |\n+-------+----------+\n
"},{"location":"usage/deleting-rows-from-delta-lake-table/","title":"Deleting rows from a Delta Lake table","text":"This section explains how to delete rows from a Delta Lake table.
Suppose you have the following Delta table with four rows:
+-------+----------+\n| num | letter |\n|-------+----------|\n| 1 | a |\n| 2 | b |\n| 3 | c |\n| 4 | d |\n+-------+----------+\n
Here's how to delete all the rows where the num
is greater than 2:
dt = DeltaTable(\"tmp/my-table\")\ndt.delete(\"num > 2\")\n
Here are the contents of the Delta table after the delete operation has been performed:
+-------+----------+\n| num | letter |\n|-------+----------|\n| 1 | a |\n| 2 | b |\n+-------+----------+\n
"},{"location":"usage/examining-table/","title":"Examining a Table","text":""},{"location":"usage/examining-table/#metadata","title":"Metadata","text":"The delta log maintains basic metadata about a table, including:
- A unique
id
- A
name
, if provided - A
description
, if provided - The list of
partitionColumns
. - The
created_time
of the table - A map of table
configuration
. This includes fields such as delta.appendOnly
, which if true
indicates the table is not meant to have data deleted from it.
Get metadata from a table with the DeltaTable.metadata() method:
>>> from deltalake import DeltaTable\n>>> dt = DeltaTable(\"../rust/tests/data/simple_table\")\n>>> dt.metadata()\nMetadata(id: 5fba94ed-9794-4965-ba6e-6ee3c0d22af9, name: None, description: None, partitionColumns: [], created_time: 1587968585495, configuration={})\n
"},{"location":"usage/examining-table/#schema","title":"Schema","text":"The schema for the table is also saved in the transaction log. It can either be retrieved in the Delta Lake form as Schema or as a PyArrow schema. The first allows you to introspect any column-level metadata stored in the schema, while the latter represents the schema the table will be loaded into.
Use DeltaTable.schema to retrieve the delta lake schema:
>>> from deltalake import DeltaTable\n>>> dt = DeltaTable(\"../rust/tests/data/simple_table\")\n>>> dt.schema()\nSchema([Field(id, PrimitiveType(\"long\"), nullable=True)])\n
These schemas have a JSON representation that can be retrieved. To reconstruct from json, use DeltaTable.schema.to_json().
>>> dt.schema().to_json()\n'{\"type\":\"struct\",\"fields\":[{\"name\":\"id\",\"type\":\"long\",\"nullable\":true,\"metadata\":{}}]}'\n
Use DeltaTable.schema.to_pyarrow() to retrieve the PyArrow schema:
>>> dt.schema().to_pyarrow()\nid: int64\n
"},{"location":"usage/examining-table/#history","title":"History","text":"Depending on what system wrote the table, the delta table may have provenance information describing what operations were performed on the table, when, and by whom. This information is retained for 30 days by default, unless otherwise specified by the table configuration delta.logRetentionDuration
.
Note
This information is not written by all writers and different writers may use different schemas to encode the actions. For Spark\\'s format, see: https://docs.delta.io/latest/delta-utility.html#history-schema
To view the available history, use DeltaTable.history
:
>>> from deltalake import DeltaTable\n>>> dt = DeltaTable(\"../rust/tests/data/simple_table\")\n>>> dt.history()\n[{'timestamp': 1587968626537, 'operation': 'DELETE', 'operationParameters': {'predicate': '[\"((`id` % CAST(2 AS BIGINT)) = CAST(0 AS BIGINT))\"]'}, 'readVersion': 3, 'isBlindAppend': False},\n {'timestamp': 1587968614187, 'operation': 'UPDATE', 'operationParameters': {'predicate': '((id#697L % cast(2 as bigint)) = cast(0 as bigint))'}, 'readVersion': 2, 'isBlindAppend': False},\n {'timestamp': 1587968604143, 'operation': 'WRITE', 'operationParameters': {'mode': 'Overwrite', 'partitionBy': '[]'}, 'readVersion': 1, 'isBlindAppend': False},\n {'timestamp': 1587968596254, 'operation': 'MERGE', 'operationParameters': {'predicate': '(oldData.`id` = newData.`id`)'}, 'readVersion': 0, 'isBlindAppend': False},\n {'timestamp': 1587968586154, 'operation': 'WRITE', 'operationParameters': {'mode': 'ErrorIfExists', 'partitionBy': '[]'}, 'isBlindAppend': True}]\n
"},{"location":"usage/examining-table/#current-add-actions","title":"Current Add Actions","text":"The active state for a delta table is determined by the Add actions, which provide the list of files that are part of the table and metadata about them, such as creation time, size, and statistics. You can get a data frame of the add actions data using DeltaTable.get_add_actions
:
>>> from deltalake import DeltaTable\n>>> dt = DeltaTable(\"../rust/tests/data/delta-0.8.0\")\n>>> dt.get_add_actions(flatten=True).to_pandas()\n path size_bytes modification_time data_change num_records null_count.value min.value max.value\n0 part-00000-c9b90f86-73e6-46c8-93ba-ff6bfaf892a... 440 2021-03-06 15:16:07 True 2 0 0 2\n1 part-00000-04ec9591-0b73-459e-8d18-ba5711d6cbe... 440 2021-03-06 15:16:16 True 2 0 2 4\n
This works even with past versions of the table:
>>> dt = DeltaTable(\"../rust/tests/data/delta-0.8.0\", version=0)\n>>> dt.get_add_actions(flatten=True).to_pandas()\n path size_bytes modification_time data_change num_records null_count.value min.value max.value\n0 part-00000-c9b90f86-73e6-46c8-93ba-ff6bfaf892a... 440 2021-03-06 15:16:07 True 2 0 0 2\n1 part-00001-911a94a2-43f6-4acb-8620-5e68c265498... 445 2021-03-06 15:16:07 True 3 0 2 4\n
"},{"location":"usage/installation/","title":"Installation","text":"The deltalake
project can be installed via pip for Python or Cargo for Rust.
"},{"location":"usage/installation/#install-delta-lake-for-python","title":"Install Delta Lake for Python","text":"With pip:
pip install deltalake\n
With Conda:
conda install -c conda-forge deltalake\n
"},{"location":"usage/installation/#install-delta-lake-for-rust","title":"Install Delta Lake for Rust","text":"With Cargo:
cargo add deltalake\n
"},{"location":"usage/installation/#run-delta-lake-and-pandas-in-a-jupyter-notebook","title":"Run Delta Lake and pandas in a Jupyter Notebook","text":"You can easily run Delta Lake and pandas in a Jupyter notebook.
Create an environment file with the dependencies as follows:
name: deltalake-minimal\nchannels:\n - conda-forge\n - defaults\ndependencies:\n - python=3.11\n - ipykernel\n - pandas\n - polars\n - jupyterlab\n - pip\n - pip:\n - deltalake\n
Create a virtual environment with the dependencies:
conda env create -f deltalake-minimal.yml\n
Open the Jupyter notebook and run commands as follows:
"},{"location":"usage/loading-table/","title":"Loading a Delta Table","text":"To load the current version, use the constructor:
>>> dt = DeltaTable(\"../rust/tests/data/delta-0.2.0\")\n
Depending on your storage backend, you could use the storage_options
parameter to provide some configuration. Configuration is defined for specific backends - s3 options, azure options, gcs options.
>>> storage_options = {\"AWS_ACCESS_KEY_ID\": \"THE_AWS_ACCESS_KEY_ID\", \"AWS_SECRET_ACCESS_KEY\":\"THE_AWS_SECRET_ACCESS_KEY\"}\n>>> dt = DeltaTable(\"../rust/tests/data/delta-0.2.0\", storage_options=storage_options)\n
The configuration can also be provided via the environment, and the basic service provider is derived from the URL being used. We try to support many of the well-known formats to identify basic service properties.
S3:
- s3://\\<bucket>/\\<path>
- s3a://\\<bucket>/\\<path>
Azure:
- az://\\<container>/\\<path>
- adl://\\<container>/\\<path>
- abfs://\\<container>/\\<path>
GCS:
- gs://\\<bucket>/\\<path>
Alternatively, if you have a data catalog you can load it by reference to a database and table name. Currently only AWS Glue is supported.
For AWS Glue catalog, use AWS environment variables to authenticate.
>>> from deltalake import DeltaTable\n>>> from deltalake import DataCatalog\n>>> database_name = \"simple_database\"\n>>> table_name = \"simple_table\"\n>>> data_catalog = DataCatalog.AWS\n>>> dt = DeltaTable.from_data_catalog(data_catalog=data_catalog, database_name=database_name, table_name=table_name)\n>>> dt.to_pyarrow_table().to_pydict()\n{'id': [5, 7, 9, 5, 6, 7, 8, 9]}\n
"},{"location":"usage/loading-table/#custom-storage-backends","title":"Custom Storage Backends","text":"While delta always needs its internal storage backend to work and be properly configured, in order to manage the delta log, it may sometime be advantageous - and is common practice in the arrow world - to customize the storage interface used for reading the bulk data.
deltalake
will work with any storage compliant with pyarrow.fs.FileSystem
, however the root of the filesystem has to be adjusted to point at the root of the Delta table. We can achieve this by wrapping the custom filesystem into a pyarrow.fs.SubTreeFileSystem
.
import pyarrow.fs as fs\nfrom deltalake import DeltaTable\n\npath = \"<path/to/table>\"\nfilesystem = fs.SubTreeFileSystem(path, fs.LocalFileSystem())\n\ndt = DeltaTable(path)\nds = dt.to_pyarrow_dataset(filesystem=filesystem)\n
When using the pyarrow factory method for file systems, the normalized path is provided on creation. In case of S3 this would look something like:
import pyarrow.fs as fs\nfrom deltalake import DeltaTable\n\ntable_uri = \"s3://<bucket>/<path>\"\nraw_fs, normalized_path = fs.FileSystem.from_uri(table_uri)\nfilesystem = fs.SubTreeFileSystem(normalized_path, raw_fs)\n\ndt = DeltaTable(table_uri)\nds = dt.to_pyarrow_dataset(filesystem=filesystem)\n
"},{"location":"usage/loading-table/#time-travel","title":"Time Travel","text":"To load previous table states, you can provide the version number you wish to load:
>>> dt = DeltaTable(\"../rust/tests/data/simple_table\", version=2)\n
Once you\\'ve loaded a table, you can also change versions using either a version number or datetime string:
>>> dt.load_version(1)\n>>> dt.load_with_datetime(\"2021-11-04 00:05:23.283+00:00\")\n
Warning
Previous table versions may not exist if they have been vacuumed, in which case an exception will be thrown. See Vacuuming tables for more information.
"},{"location":"usage/managing-tables/","title":"Managing Delta Tables","text":""},{"location":"usage/managing-tables/#vacuuming-tables","title":"Vacuuming tables","text":"Vacuuming a table will delete any files that have been marked for deletion. This may make some past versions of a table invalid, so this can break time travel. However, it will save storage space. Vacuum will retain files in a certain window, by default one week, so time travel will still work in shorter ranges.
Delta tables usually don't delete old files automatically, so vacuuming regularly is considered good practice, unless the table is only appended to.
Use DeltaTable.vacuum
to perform the vacuum operation. Note that to prevent accidental deletion, the function performs a dry-run by default: it will only list the files to be deleted. Pass dry_run=False
to actually delete files.
>>> dt = DeltaTable(\"../rust/tests/data/simple_table\")\n>>> dt.vacuum()\n['../rust/tests/data/simple_table/part-00006-46f2ff20-eb5d-4dda-8498-7bfb2940713b-c000.snappy.parquet',\n '../rust/tests/data/simple_table/part-00190-8ac0ae67-fb1d-461d-a3d3-8dc112766ff5-c000.snappy.parquet',\n '../rust/tests/data/simple_table/part-00164-bf40481c-4afd-4c02-befa-90f056c2d77a-c000.snappy.parquet',\n ...]\n>>> dt.vacuum(dry_run=False) # Don't run this unless you are sure!\n
"},{"location":"usage/managing-tables/#optimizing-tables","title":"Optimizing tables","text":"Optimizing tables is not currently supported.
"},{"location":"usage/querying-delta-tables/","title":"Querying Delta Tables","text":"Delta tables can be queried in several ways. By loading as Arrow data or an Arrow dataset, they can be used by compatible engines such as Pandas and DuckDB. By passing on the list of files, they can be loaded into other engines such as Dask.
Delta tables are often larger than can fit into memory on a single computer, so this module provides ways to read only the parts of the data you need. Partition filters allow you to skip reading files that are part of irrelevant partitions. Only loading the columns required also saves memory. Finally, some methods allow reading tables batch-by-batch, allowing you to process the whole table while only having a portion loaded at any given time.
To load into Pandas or a PyArrow table use the DeltaTable.to_pandas
and DeltaTable.to_pyarrow_table
methods, respectively. Both of these support filtering partitions and selecting particular columns.
>>> from deltalake import DeltaTable\n>>> dt = DeltaTable(\"../rust/tests/data/delta-0.8.0-partitioned\")\n>>> dt.schema().to_pyarrow()\nvalue: string\nyear: string\nmonth: string\nday: string\n>>> dt.to_pandas(partitions=[(\"year\", \"=\", \"2021\")], columns=[\"value\"])\n value\n0 6\n1 7\n2 5\n3 4\n>>> dt.to_pyarrow_table(partitions=[(\"year\", \"=\", \"2021\")], columns=[\"value\"])\npyarrow.Table\nvalue: string\n
Converting to a PyArrow Dataset allows you to filter on columns other than partition columns and load the result as a stream of batches rather than a single table. Convert to a dataset using DeltaTable.to_pyarrow_dataset
. Filters applied to datasets will use the partition values and file statistics from the Delta transaction log and push down any other filters to the scanning operation.
>>> import pyarrow.dataset as ds\n>>> dataset = dt.to_pyarrow_dataset()\n>>> condition = (ds.field(\"year\") == \"2021\") & (ds.field(\"value\") > \"4\")\n>>> dataset.to_table(filter=condition, columns=[\"value\"]).to_pandas()\n value\n0 6\n1 7\n2 5\n>>> batch_iter = dataset.to_batches(filter=condition, columns=[\"value\"], batch_size=2)\n>>> for batch in batch_iter: print(batch.to_pandas())\n value\n0 6\n1 7\n value\n0 5\n
PyArrow datasets may also be passed to compatible query engines, such as DuckDB
>>> import duckdb\n>>> ex_data = duckdb.arrow(dataset)\n>>> ex_data.filter(\"year = 2021 and value > 4\").project(\"value\")\n---------------------\n-- Expression Tree --\n---------------------\nProjection [value]\n Filter [year=2021 AND value>4]\n arrow_scan(140409099470144, 4828104688, 1000000)\n\n---------------------\n-- Result Columns --\n---------------------\n- value (VARCHAR)\n\n---------------------\n-- Result Preview --\n---------------------\nvalue\nVARCHAR\n[ Rows: 3]\n6\n7\n5\n
Finally, you can always pass the list of file paths to an engine. For example, you can pass them to dask.dataframe.read_parquet
:
>>> import dask.dataframe as dd\n>>> df = dd.read_parquet(dt.file_uris())\n>>> df\nDask DataFrame Structure:\n value year month day\nnpartitions=6\n object category[known] category[known] category[known]\n ... ... ... ...\n... ... ... ... ...\n ... ... ... ...\n ... ... ... ...\nDask Name: read-parquet, 6 tasks\n>>> df.compute()\n value year month day\n0 1 2020 1 1\n0 2 2020 2 3\n0 3 2020 2 5\n0 4 2021 4 5\n0 5 2021 12 4\n0 6 2021 12 20\n1 7 2021 12 20\n
"},{"location":"usage/writing-delta-tables/","title":"Writing Delta Tables","text":"For overwrites and appends, use write_deltalake
. If the table does not already exist, it will be created. The data
parameter will accept a Pandas DataFrame, a PyArrow Table, or an iterator of PyArrow Record Batches.
>>> from deltalake import write_deltalake\n>>> df = pd.DataFrame({'x': [1, 2, 3]})\n>>> write_deltalake('path/to/table', df)\n
Note: write_deltalake
accepts a Pandas DataFrame, but will convert it to a Arrow table before writing. See caveats in pyarrow:python/pandas
.
By default, writes create a new table and error if it already exists. This is controlled by the mode
parameter, which mirrors the behavior of Spark's pyspark.sql.DataFrameWriter.saveAsTable
DataFrame method. To overwrite pass in mode='overwrite'
and to append pass in mode='append'
:
>>> write_deltalake('path/to/table', df, mode='overwrite')\n>>> write_deltalake('path/to/table', df, mode='append')\n
write_deltalake
will raise ValueError
if the schema of the data passed to it differs from the existing table's schema. If you wish to alter the schema as part of an overwrite pass in overwrite_schema=True
.
"},{"location":"usage/writing-delta-tables/#overwriting-a-partition","title":"Overwriting a partition","text":"You can overwrite a specific partition by using mode=\"overwrite\"
together with partition_filters
. This will remove all files within the matching partition and insert your data as new files. This can only be done on one partition at a time. All of the input data must belong to that partition or else the method will raise an error.
>>> from deltalake import write_deltalake\n>>> df = pd.DataFrame({'x': [1, 2, 3], 'y': ['a', 'a', 'b']})\n>>> write_deltalake('path/to/table', df, partition_by=['y'])\n\n>>> table = DeltaTable('path/to/table')\n>>> df2 = pd.DataFrame({'x': [100], 'y': ['b']})\n>>> write_deltalake(table, df2, partition_filters=[('y', '=', 'b')], mode=\"overwrite\")\n\n>>> table.to_pandas()\n x y\n0 1 a\n1 2 a\n2 100 b\n
This method could also be used to insert a new partition if one doesn't already exist, making this operation idempotent.
"},{"location":"usage/optimize/delta-lake-z-order/","title":"Delta Lake Z Order","text":"This section explains how to Z Order a Delta table.
Z Ordering colocates similar data in the same files, which allows for better file skipping and faster queries.
Suppose you have a table with first_name
, age
, and country
columns.
If you Z Order the data by the country
column, then individuals from the same country will be stored in the same files. When you subquently query the data for individuals from a given country, it will execute faster because more data can be skipped.
Here's how to Z Order a Delta table:
dt = DeltaTable(\"tmp\")\ndt.optimize.z_order([country])\n
"},{"location":"usage/optimize/small-file-compaction-with-optimize/","title":"Delta Lake small file compaction with optimize","text":"This post shows you how to perform small file compaction with using the optimize
method. This was added to the DeltaTable
class in version 0.9.0. This command rearranges the small files into larger files which will reduce the number of files and speed up queries.
This is very helpful for workloads that append frequently. For example, if you have a table that is appended to every 10 minutes, after a year you will have 52,560 files in the table. If the table is partitioned by another dimension, you will have 52,560 files per partition; with just 100 unique values that's millions of files. By running optimize
periodically, you can reduce the number of files in the table to a more manageable number.
Typically, you will run optimize less frequently than you append data. If possible, you might run optimize once you know you have finished writing to a particular partition. For example, on a table partitioned by date, you might append data every 10 minutes, but only run optimize once a day at the end of the day. This will ensure you don't need to compact the same data twice.
This section will also teach you about how to use vacuum
to physically remove files from storage that are no longer needed. You\u2019ll often want vacuum after running optimize to remove the small files from storage once they\u2019ve been compacted into larger files.
Let\u2019s start with an example to explain these key concepts. All the code covered in this post is stored in this notebook in case you\u2019d like to follow along.
"},{"location":"usage/optimize/small-file-compaction-with-optimize/#create-a-delta-table-with-small-files","title":"Create a Delta table with small files","text":"Let\u2019s start by creating a Delta table with a lot of small files so we can demonstrate the usefulness of the optimize
command.
Start by writing a function that generates on thousand rows of random data given a timestamp.
def record_observations(date: datetime) -> pa.Table:\n \"\"\"Pulls data for a certain datetime\"\"\"\n nrows = 1000\n return pa.table(\n {\n \"date\": pa.array([date.date()] * nrows),\n \"timestamp\": pa.array([date] * nrows),\n \"value\": pc.random(nrows),\n }\n )\n
Let\u2019s run this function and observe the output:
record_observations(datetime(2021, 1, 1, 12)).to_pandas()\n\n date timestamp value\n0 2021-01-01 2021-01-01 12:00:00 0.3186397383362023\n1 2021-01-01 2021-01-01 12:00:00 0.04253766974259088\n2 2021-01-01 2021-01-01 12:00:00 0.9355682965171573\n\u2026\n999 2021-01-01 2021-01-01 12:00:00 0.23207037062879843\n
Let\u2019s write 100 hours worth of data to the Delta table.
# Every hour starting at midnight on 2021-01-01\nhours_iter = (datetime(2021, 1, 1) + timedelta(hours=i) for i in itertools.count())\n\n# Write 100 hours worth of data\nfor timestamp in itertools.islice(hours_iter, 100):\n write_deltalake(\n \"observation_data\",\n record_observations(timestamp),\n partition_by=[\"date\"],\n mode=\"append\",\n )\n
This data was appended to the Delta table in 100 separate transactions, so the table will contain 100 transaction log entries and 100 data files. You can see the number of files with the files()
method.
dt = DeltaTable(\"observation_data\")\nlen(dt.files()) # 100\n
Here\u2019s how the files are persisted in storage.
observation_data\n\u251c\u2500\u2500 _delta_log\n\u2502 \u251c\u2500\u2500 00000000000000000000.json\n\u2502 \u251c\u2500\u2500 \u2026\n\u2502 \u2514\u2500\u2500 00000000000000000099.json\n\u251c\u2500\u2500 date=2021-01-01\n\u2502 \u251c\u2500\u2500 0-cfe227c6-edd9-4369-a1b0-db4559a2e693-0.parquet\n\u2502 \u251c\u2500\u2500 \u2026\n\u2502 \u251c\u2500\u2500 23-a4ace29e-e73e-40a1-81d3-0f5dc13093de-0.parquet\n\u251c\u2500\u2500 date=2021-01-02\n\u2502 \u251c\u2500\u2500 24-9698b456-66eb-4075-8732-fe56d81edb60-0.parquet\n\u2502 \u251c\u2500\u2500 \u2026\n\u2502 \u2514\u2500\u2500 47-d3fce527-e018-4c02-8acd-a649f6f523d2-0.parquet\n\u251c\u2500\u2500 date=2021-01-03\n\u2502 \u251c\u2500\u2500 48-fd90a7fa-5a14-42ed-9f59-9fe48d87899d-0.parquet\n\u2502 \u251c\u2500\u2500 \u2026\n\u2502 \u2514\u2500\u2500 71-5f143ade-8ae2-4854-bdc5-61154175665f-0.parquet\n\u251c\u2500\u2500 date=2021-01-04\n\u2502 \u251c\u2500\u2500 72-477c10fe-dc09-4087-80f0-56006e4a7911-0.parquet\n\u2502 \u251c\u2500\u2500 \u2026\n\u2502 \u2514\u2500\u2500 95-1c92cbce-8af4-4fe4-9c11-832245cf4d40-0.parquet\n\u2514\u2500\u2500 date=2021-01-05\n \u251c\u2500\u2500 96-1b878ee5-25fd-431a-bc3e-6dcacc96b470-0.parquet\n \u251c\u2500\u2500 \u2026\n \u2514\u2500\u2500 99-9650ed63-c195-433d-a86b-9469088c14ba-0.parquet\n
Each of these Parquet files are tiny - they\u2019re only 10 KB. Let\u2019s see how to compact these tiny files into larger files, which is more efficient for data queries.
"},{"location":"usage/optimize/small-file-compaction-with-optimize/#compact-small-files-in-the-delta-table-with-optimize","title":"Compact small files in the Delta table with optimize","text":"Let\u2019s run the optimize command to compact the existing small files into larger files:
dt = DeltaTable(\"observation_data\")\n\ndt.optimize()\n
Here\u2019s the output of the command:
{'numFilesAdded': 5,\n 'numFilesRemoved': 100,\n 'filesAdded': {'min': 39000,\n 'max': 238282,\n 'avg': 198425.6,\n 'totalFiles': 5,\n 'totalSize': 992128},\n 'filesRemoved': {'min': 10244,\n 'max': 10244,\n 'avg': 10244.0,\n 'totalFiles': 100,\n 'totalSize': 1024400},\n 'partitionsOptimized': 5,\n 'numBatches': 1,\n 'totalConsideredFiles': 100,\n 'totalFilesSkipped': 0,\n 'preserveInsertionOrder': True}\n
The optimize operation has added 5 new files and marked 100 exisitng files for removal (this is also known as \u201ctombstoning\u201d files). It has compacted the 100 tiny files into 5 larger files.
Let\u2019s append some more data to the Delta table and see how we can selectively run optimize on the new data that\u2019s added.
"},{"location":"usage/optimize/small-file-compaction-with-optimize/#handling-incremental-updates-with-optimize","title":"Handling incremental updates with optimize","text":"Let\u2019s append another 24 hours of data to the Delta table:
for timestamp in itertools.islice(hours_iter, 24):\n write_deltalake(\n dt,\n record_observations(timestamp),\n partition_by=[\"date\"],\n mode=\"append\",\n )\n
We can use get_add_actions()
to introspect the table state. We can see that 2021-01-06
has only a few hours of data so far, so we don't want to optimize that yet. But 2021-01-05
has all 24 hours of data, so it's ready to be optimized.
dt.get_add_actions(flatten=True).to_pandas()[\n \"partition.date\"\n].value_counts().sort_index()\n\n2021-01-01 1\n2021-01-02 1\n2021-01-03 1\n2021-01-04 1\n2021-01-05 21\n2021-01-06 4\n
To optimize a single partition, you can pass in a partition_filters
argument speficying which partitions to optimize.
dt.optimize(partition_filters=[(\"date\", \"=\", \"2021-01-05\")])\n\n{'numFilesAdded': 1,\n 'numFilesRemoved': 21,\n 'filesAdded': {'min': 238282,\n 'max': 238282,\n 'avg': 238282.0,\n 'totalFiles': 1,\n 'totalSize': 238282},\n 'filesRemoved': {'min': 10244,\n 'max': 39000,\n 'avg': 11613.333333333334,\n 'totalFiles': 21,\n 'totalSize': 243880},\n 'partitionsOptimized': 1,\n 'numBatches': 1,\n 'totalConsideredFiles': 21,\n 'totalFilesSkipped': 0,\n 'preserveInsertionOrder': True}\n
This optimize operation tombstones 21 small data files and adds one file with all the existing data properly condensed. Let\u2019s take a look a portion of the _delta_log/00000000000000000125.json
file, which is the transaction log entry that corresponds with this incremental optimize command.
{\n \"remove\": {\n \"path\": \"date=2021-01-05/part-00000-41178aab-2491-488f-943d-8f03867295ee-c000.snappy.parquet\",\n \"deletionTimestamp\": 1683465499480,\n \"dataChange\": false,\n \"extendedFileMetadata\": null,\n \"partitionValues\": {\n \"date\": \"2021-01-05\"\n },\n \"size\": 39000,\n \"tags\": null\n }\n}\n\n{\n \"remove\": {\n \"path\": \"date=2021-01-05/101-79ae6fc9-c0cc-49ec-bb94-9aba879ac949-0.parquet\",\n \"deletionTimestamp\": 1683465499481,\n \"dataChange\": false,\n \"extendedFileMetadata\": null,\n \"partitionValues\": {\n \"date\": \"2021-01-05\"\n },\n \"size\": 10244,\n \"tags\": null\n }\n}\n\n\u2026\n\n{\n \"add\": {\n \"path\": \"date=2021-01-05/part-00000-4b020a40-c836-4a11-851f-4691370c9f3a-c000.snappy.parquet\",\n \"size\": 238282,\n \"partitionValues\": {\n \"date\": \"2021-01-05\"\n },\n \"modificationTime\": 1683465499493,\n \"dataChange\": false,\n \"stats\": \"{\\\"numRecords\\\":24000,\\\"minValues\\\":{\\\"value\\\":0.00005581532256615507,\\\"timestamp\\\":\\\"2021-01-05T00:00:00.000Z\\\"},\\\"maxValues\\\":{\\\"timestamp\\\":\\\"2021-01-05T23:00:00.000Z\\\",\\\"value\\\":0.9999911402868216},\\\"nullCount\\\":{\\\"timestamp\\\":0,\\\"value\\\":0}}\",\n \"tags\": null\n }\n}\n
The trasaction log indicates that many files have been tombstoned and one file is added, as expected.
The Delta Lake optimize command \u201cremoves\u201d data by marking the data files as removed in the transaction log. The optimize command doesn\u2019t physically delete the Parquet file from storage. Optimize performs a \u201clogical remove\u201d not a \u201cphysical remove\u201d.
Delta Lake uses logical operations so you can time travel back to earlier versions of your data. You can vacuum your Delta table to physically remove Parquet files from storage if you don\u2019t need to time travel and don\u2019t want to pay to store the tombstoned files.
"},{"location":"usage/optimize/small-file-compaction-with-optimize/#vacuuming-after-optimizing","title":"Vacuuming after optimizing","text":"The vacuum command deletes all files from storage that are marked for removal in the transaction log and older than the retention period which is 7 days by default.
It\u2019s normally a good idea to have a retention period of at least 7 days. For purposes of this example, we will set the retention period to zero, just so you can see how the files get removed from storage. Adjusting the retention period in this manner isn\u2019t recommended for production use cases.
Let\u2019s run the vacuum command:
dt.vacuum(retention_hours=0, enforce_retention_duration=False, dry_run=False)\n
The command returns a list of all the files that are removed from storage:
['date=2021-01-02/39-a98680f2-0e0e-4f26-a491-18b183f9eb05-0.parquet',\n 'date=2021-01-02/41-e96bc8bb-c571-484c-b534-e897424fb7da-0.parquet',\n \u2026\n 'date=2021-01-01/0-cfe227c6-edd9-4369-a1b0-db4559a2e693-0.parquet',\n 'date=2021-01-01/18-ded53418-172b-4e40-bf2e-7c8142e71bd1-0.parquet']\n
Let\u2019s look at the content of the Delta table now that all the really small files have been removed from storage:
observation_data\n\u251c\u2500\u2500 _delta_log\n\u2502 \u251c\u2500\u2500 00000000000000000000.json\n\u2502 \u251c\u2500\u2500 00000000000000000001.json\n\u2502 \u251c\u2500\u2500 \u2026\n\u2502 \u251c\u2500\u2500 00000000000000000124.json\n\u2502 \u2514\u2500\u2500 00000000000000000125.json\n\u251c\u2500\u2500 date=2021-01-01\n\u2502 \u2514\u2500\u2500 part-00000-31e3df5a-8bbe-425c-b85d-77794f922837-c000.snappy.parquet\n\u251c\u2500\u2500 date=2021-01-02\n\u2502 \u2514\u2500\u2500 part-00000-8af07878-b179-49ce-a900-d58595ffb60a-c000.snappy.parquet\n\u251c\u2500\u2500 date=2021-01-03\n\u2502 \u2514\u2500\u2500 part-00000-5e980864-b32f-4686-a58d-a75fae455c1e-c000.snappy.parquet\n\u251c\u2500\u2500 date=2021-01-04\n\u2502 \u2514\u2500\u2500 part-00000-1e82d23b-084d-47e3-9790-d68289c39837-c000.snappy.parquet\n\u251c\u2500\u2500 date=2021-01-05\n\u2502 \u2514\u2500\u2500 part-00000-4b020a40-c836-4a11-851f-4691370c9f3a-c000.snappy.parquet\n\u2514\u2500\u2500 date=2021-01-06\n \u251c\u2500\u2500 121-0ecb5d70-4a28-4cd4-b2d2-89ee2285eaaa-0.parquet\n \u251c\u2500\u2500 122-6b2d2758-9154-4392-b287-fe371ee507ec-0.parquet\n \u251c\u2500\u2500 123-551d318f-4968-441f-83fc-89f98cd15daf-0.parquet\n \u2514\u2500\u2500 124-287309d3-662e-449d-b4da-2e67b7cc0557-0.parquet\n
All the partitions only contain a single file now, except for the date=2021-01-06
partition that has not been compacted yet.
An entire partition won\u2019t necessarily get compacted to a single data file when optimize is run. Each partition has data files that are condensed to the target file size.
"},{"location":"usage/optimize/small-file-compaction-with-optimize/#what-causes-the-small-file-problem","title":"What causes the small file problem?","text":"Delta tables can accumulate small files for a variety of reasons:
- User error: users can accidentally write files that are too small. Users should sometimes repartition in memory before writing to disk to avoid appending files that are too small.
- Frequent appends: systems that append more often tend to append more smaller files. A pipeline that appends every minute will generally generate ten times as many small files compared to a system that appends every ten minutes.
- Appending to partitioned data lakes with high cardinality columns can also cause small files. If you append every hour to a table that\u2019s partitioned on a column with 1,000 distinct values, then every append could create 1,000 new files. Partitioning by date avoids this problem because the data isn\u2019t split up across partitions in this manner.
"},{"location":"usage/optimize/small-file-compaction-with-optimize/#conclusion","title":"Conclusion","text":"This page showed you how to create a Delta table with many small files, compact the small files into larger files with optimize, and remove the tombstoned files from storage with vacuum.
You also learned about how to incrementally optimize partitioned Delta tables, so you only compact newly added data.
An excessive number of small files slows down Delta table queries, so periodic compaction is important. Make sure to properly maintain your Delta tables, so performance does not degrade over time.
"}]}
\ No newline at end of file
+{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"The deltalake package","text":"This is the documentation for the native Rust/Python implementation of Delta Lake. It is based on the delta-rs Rust library and requires no Spark or JVM dependencies. For the PySpark implementation, see delta-spark instead.
This module provides the capability to read, write, and manage Delta Lake tables with Python or Rust without Spark or Java. It uses Apache Arrow under the hood, so is compatible with other Arrow-native or integrated libraries such as pandas, DuckDB, and Polars.
"},{"location":"#important-terminology","title":"Important terminology","text":" - \"Rust deltalake\" refers to the Rust API of delta-rs (no Spark dependency)
- \"Python deltalake\" refers to the Python API of delta-rs (no Spark dependency)
- \"Delta Spark\" refers to the Scala impementation of the Delta Lake transaction log protocol. This depends on Spark and Java.
"},{"location":"#why-implement-the-delta-lake-transaction-log-protocol-in-rust-and-scala","title":"Why implement the Delta Lake transaction log protocol in Rust and Scala?","text":"Delta Spark depends on Java and Spark, which is fine for many use cases, but not all Delta Lake users want to depend on these libraries. delta-rs allows using Delta Lake in Rust or other native projects when using a JVM is often not an option.
Python deltalake lets you query Delta tables without depending on Java/Scala.
Suppose you want to query a Delta table with pandas on your local machine. Python deltalake makes it easy to query the table with a simple pip install
command - no need to install Java.
"},{"location":"#contributing","title":"Contributing","text":"The Delta Lake community welcomes contributors from all developers, regardless of your experience or programming background.
You can write Rust code, Python code, documentation, submit bugs, or give talks to the community. We welcome all of these contributions.
Feel free to join our Slack and message us in the #delta-rs channel any time!
We value kind communication and building a productive, friendly environment for maximum collaboration and fun.
"},{"location":"#project-history","title":"Project history","text":"Check out this video by Denny Lee & QP Hou to learn about the genesis of the delta-rs project:
"},{"location":"api/catalog/","title":"Catalog","text":"","boost":2},{"location":"api/catalog/#deltalake.data_catalog.DataCatalog","title":"deltalake.data_catalog.DataCatalog","text":" Bases: Enum
List of the Data Catalogs
","boost":2},{"location":"api/catalog/#deltalake.data_catalog.DataCatalog.AWS","title":"AWS class-attribute
instance-attribute
","text":"AWS = 'glue'\n
Refers to the AWS Glue Data Catalog <https://docs.aws.amazon.com/glue/latest/dg/catalog-and-crawler.html>
_
","boost":2},{"location":"api/catalog/#deltalake.data_catalog.DataCatalog.UNITY","title":"UNITY class-attribute
instance-attribute
","text":"UNITY = 'unity'\n
Refers to the Databricks Unity Catalog <https://docs.databricks.com/data-governance/unity-catalog/index.html>
_
","boost":2},{"location":"api/delta_writer/","title":"Writer","text":"","boost":10},{"location":"api/delta_writer/#write-to-delta-tables","title":"Write to Delta Tables","text":"","boost":10},{"location":"api/delta_writer/#deltalake.write_deltalake","title":"deltalake.write_deltalake","text":"write_deltalake(table_or_uri: Union[str, Path, DeltaTable], data: Union[pd.DataFrame, ds.Dataset, pa.Table, pa.RecordBatch, Iterable[pa.RecordBatch], RecordBatchReader], *, schema: Optional[Union[pa.Schema, DeltaSchema]] = None, partition_by: Optional[Union[List[str], str]] = None, filesystem: Optional[pa_fs.FileSystem] = None, mode: Literal['error', 'append', 'overwrite', 'ignore'] = 'error', file_options: Optional[ds.ParquetFileWriteOptions] = None, max_partitions: Optional[int] = None, max_open_files: int = 1024, max_rows_per_file: int = 10 * 1024 * 1024, min_rows_per_group: int = 64 * 1024, max_rows_per_group: int = 128 * 1024, name: Optional[str] = None, description: Optional[str] = None, configuration: Optional[Mapping[str, Optional[str]]] = None, overwrite_schema: bool = False, storage_options: Optional[Dict[str, str]] = None, partition_filters: Optional[List[Tuple[str, str, Any]]] = None, predicate: Optional[str] = None, large_dtypes: bool = False, engine: Literal['pyarrow', 'rust'] = 'pyarrow', writer_properties: Optional[WriterProperties] = None, custom_metadata: Optional[Dict[str, str]] = None) -> None\n
Write to a Delta Lake table
If the table does not already exist, it will be created.
This function only supports writer protocol version 2 currently. When attempting to write to an existing table with a higher min_writer_version, this function will throw DeltaProtocolError.
Note that this function does NOT register this table in a data catalog.
A locking mechanism is needed to prevent unsafe concurrent writes to a delta lake directory when writing to S3. DynamoDB is the only available locking provider at the moment in delta-rs. To enable DynamoDB as the locking provider, you need to set the AWS_S3_LOCKING_PROVIDER
to 'dynamodb' as a storage_option or as an environment variable.
Additionally, you must create a DynamoDB table with the name 'delta_rs_lock_table' so that it can be automatically discovered by delta-rs. Alternatively, you can use a table name of your choice, but you must set the DYNAMO_LOCK_TABLE_NAME
variable to match your chosen table name. The required schema for the DynamoDB table is as follows:
- Key Schema: AttributeName=key, KeyType=HASH
- Attribute Definitions: AttributeName=key, AttributeType=S
Please note that this locking mechanism is not compatible with any other locking mechanisms, including the one used by Spark.
Parameters:
Name Type Description Default table_or_uri
Union[str, Path, DeltaTable]
URI of a table or a DeltaTable object.
required data
Union[DataFrame, Dataset, Table, RecordBatch, Iterable[RecordBatch], RecordBatchReader]
Data to write. If passing iterable, the schema must also be given.
required schema
Optional[Union[Schema, Schema]]
Optional schema to write.
None
partition_by
Optional[Union[List[str], str]]
List of columns to partition the table by. Only required when creating a new table.
None
filesystem
Optional[FileSystem]
Optional filesystem to pass to PyArrow. If not provided will be inferred from uri. The file system has to be rooted in the table root. Use the pyarrow.fs.SubTreeFileSystem, to adopt the root of pyarrow file systems.
None
mode
Literal['error', 'append', 'overwrite', 'ignore']
How to handle existing data. Default is to error if table already exists. If 'append', will add new data. If 'overwrite', will replace table with new data. If 'ignore', will not write anything if table already exists.
'error'
file_options
Optional[ParquetFileWriteOptions]
Optional write options for Parquet (ParquetFileWriteOptions). Can be provided with defaults using ParquetFileWriteOptions().make_write_options(). Please refer to https://github.com/apache/arrow/blob/master/python/pyarrow/_dataset_parquet.pyx#L492-L533 for the list of available options. Only used in pyarrow engine.
None
max_partitions
Optional[int]
the maximum number of partitions that will be used. Only used in pyarrow engine.
None
max_open_files
int
Limits the maximum number of files that can be left open while writing. If an attempt is made to open too many files then the least recently used file will be closed. If this setting is set too low you may end up fragmenting your data into many small files. Only used in pyarrow engine.
1024
max_rows_per_file
int
Maximum number of rows per file. If greater than 0 then this will limit how many rows are placed in any single file. Otherwise there will be no limit and one file will be created in each output directory unless files need to be closed to respect max_open_files min_rows_per_group: Minimum number of rows per group. When the value is set, the dataset writer will batch incoming data and only write the row groups to the disk when sufficient rows have accumulated. Only used in pyarrow engine.
10 * 1024 * 1024
max_rows_per_group
int
Maximum number of rows per group. If the value is set, then the dataset writer may split up large incoming batches into multiple row groups. If this value is set, then min_rows_per_group should also be set.
128 * 1024
name
Optional[str]
User-provided identifier for this table.
None
description
Optional[str]
User-provided description for this table.
None
configuration
Optional[Mapping[str, Optional[str]]]
A map containing configuration options for the metadata action.
None
overwrite_schema
bool
If True, allows updating the schema of the table.
False
storage_options
Optional[Dict[str, str]]
options passed to the native delta filesystem. Unused if 'filesystem' is defined.
None
predicate
Optional[str]
When using Overwrite
mode, replace data that matches a predicate. Only used in rust engine.
None
partition_filters
Optional[List[Tuple[str, str, Any]]]
the partition filters that will be used for partition overwrite. Only used in pyarrow engine.
None
large_dtypes
bool
If True, the data schema is kept in large_dtypes, has no effect on pandas dataframe input.
False
engine
Literal['pyarrow', 'rust']
writer engine to write the delta table. Rust
engine is still experimental but you may see up to 4x performance improvements over pyarrow.
'pyarrow'
writer_properties
Optional[WriterProperties]
Pass writer properties to the Rust parquet writer.
None
custom_metadata
Optional[Dict[str, str]]
Custom metadata to add to the commitInfo.
None
","boost":10},{"location":"api/delta_writer/#deltalake.WriterProperties","title":"deltalake.WriterProperties dataclass
","text":"WriterProperties(data_page_size_limit: Optional[int] = None, dictionary_page_size_limit: Optional[int] = None, data_page_row_count_limit: Optional[int] = None, write_batch_size: Optional[int] = None, max_row_group_size: Optional[int] = None, compression: Optional[Literal['UNCOMPRESSED', 'SNAPPY', 'GZIP', 'BROTLI', 'LZ4', 'ZSTD', 'LZ4_RAW']] = None, compression_level: Optional[int] = None)\n
A Writer Properties instance for the Rust parquet writer.
Create a Writer Properties instance for the Rust parquet writer:
Parameters:
Name Type Description Default data_page_size_limit
Optional[int]
Limit DataPage size to this in bytes.
None
dictionary_page_size_limit
Optional[int]
Limit the size of each DataPage to store dicts to this amount in bytes.
None
data_page_row_count_limit
Optional[int]
Limit the number of rows in each DataPage.
None
write_batch_size
Optional[int]
Splits internally to smaller batch size.
None
max_row_group_size
Optional[int]
Max number of rows in row group.
None
compression
Optional[Literal['UNCOMPRESSED', 'SNAPPY', 'GZIP', 'BROTLI', 'LZ4', 'ZSTD', 'LZ4_RAW']]
compression type.
None
compression_level
Optional[int]
If none and compression has a level, the default level will be used, only relevant for GZIP: levels (1-9), BROTLI: levels (1-11), ZSTD: levels (1-22),
None
","boost":10},{"location":"api/delta_writer/#convert-to-delta-tables","title":"Convert to Delta Tables","text":"","boost":10},{"location":"api/delta_writer/#deltalake.convert_to_deltalake","title":"deltalake.convert_to_deltalake","text":"convert_to_deltalake(uri: Union[str, Path], mode: Literal['error', 'ignore'] = 'error', partition_by: Optional[pa.Schema] = None, partition_strategy: Optional[Literal['hive']] = None, name: Optional[str] = None, description: Optional[str] = None, configuration: Optional[Mapping[str, Optional[str]]] = None, storage_options: Optional[Dict[str, str]] = None, custom_metadata: Optional[Dict[str, str]] = None) -> None\n
Convert
parquet tables to delta
tables.
Currently only HIVE partitioned tables are supported. Convert to delta
creates a transaction log commit with add actions, and additional properties provided such as configuration, name, and description.
Parameters:
Name Type Description Default uri
Union[str, Path]
URI of a table.
required partition_by
Optional[Schema]
Optional partitioning schema if table is partitioned.
None
partition_strategy
Optional[Literal['hive']]
Optional partition strategy to read and convert
None
mode
Literal['error', 'ignore']
How to handle existing data. Default is to error if table already exists. If 'ignore', will not convert anything if table already exists.
'error'
name
Optional[str]
User-provided identifier for this table.
None
description
Optional[str]
User-provided description for this table.
None
configuration
Optional[Mapping[str, Optional[str]]]
A map containing configuration options for the metadata action.
None
storage_options
Optional[Dict[str, str]]
options passed to the native delta filesystem. Unused if 'filesystem' is defined.
None
custom_metadata
Optional[Dict[str, str]]
custom metadata that will be added to the transaction commit
None
","boost":10},{"location":"api/exceptions/","title":"Exceptions","text":"","boost":2},{"location":"api/exceptions/#deltalake.exceptions.DeltaError","title":"deltalake.exceptions.DeltaError","text":" Bases: builtins.Exception
The base class for Delta-specific errors.
","boost":2},{"location":"api/exceptions/#deltalake.exceptions.DeltaProtocolError","title":"deltalake.exceptions.DeltaProtocolError","text":" Bases: _internal.DeltaError
Raised when a violation with the Delta protocol specs ocurred.
","boost":2},{"location":"api/exceptions/#deltalake.exceptions.TableNotFoundError","title":"deltalake.exceptions.TableNotFoundError","text":" Bases: _internal.DeltaError
Raised when a Delta table cannot be loaded from a location.
","boost":2},{"location":"api/exceptions/#deltalake.exceptions.CommitFailedError","title":"deltalake.exceptions.CommitFailedError","text":" Bases: _internal.DeltaError
Raised when a commit to a Delta table fails.
","boost":2},{"location":"api/schema/","title":"Schema","text":"","boost":2},{"location":"api/schema/#schema-and-field","title":"Schema and field","text":"Schemas, fields, and data types are provided in the deltalake.schema
submodule.
","boost":2},{"location":"api/schema/#deltalake.Schema","title":"deltalake.Schema","text":"Schema(fields: List[Field])\n
Bases: deltalake._internal.StructType
A Delta Lake schema
Create using a list of :class:Field
:
Schema([Field(\"x\", \"integer\"), Field(\"y\", \"string\")]) Schema([Field(x, PrimitiveType(\"integer\"), nullable=True), Field(y, PrimitiveType(\"string\"), nullable=True)])
Or create from a PyArrow schema:
import pyarrow as pa Schema.from_pyarrow(pa.schema({\"x\": pa.int32(), \"y\": pa.string()})) Schema([Field(x, PrimitiveType(\"integer\"), nullable=True), Field(y, PrimitiveType(\"string\"), nullable=True)])
","boost":2},{"location":"api/schema/#deltalake.Schema.invariants","title":"invariants","text":"invariants: List[Tuple[str, str]] = <attribute 'invariants' of 'deltalake._internal.Schema' objects>\n
","boost":2},{"location":"api/schema/#deltalake.Schema.from_json","title":"from_json staticmethod
","text":"from_json(schema_json) -> Schema\n
Create a new Schema from a JSON string.
Parameters:
Name Type Description Default json
str
a JSON string
required Example A schema has the same JSON format as a StructType.
Schema.from_json('''{\n \"type\": \"struct\",\n \"fields\": [{\"name\": \"x\", \"type\": \"integer\", \"nullable\": true, \"metadata\": {}}]\n }\n)'''\n# Returns Schema([Field(x, PrimitiveType(\"integer\"), nullable=True)])\n
","boost":2},{"location":"api/schema/#deltalake.Schema.from_pyarrow","title":"from_pyarrow staticmethod
","text":"from_pyarrow(data_type) -> Schema\n
Create a Schema from a PyArrow Schema type
Will raise TypeError
if the PyArrow type is not a primitive type.
Parameters:
Name Type Description Default type
Schema
A PyArrow Schema
required Returns:
Type Description Schema
a Schema
","boost":2},{"location":"api/schema/#deltalake.Schema.to_json","title":"to_json method descriptor
","text":"to_json() -> str\n
Get the JSON string representation of the Schema.
Returns:
Type Description str
a JSON string
Example A schema has the same JSON format as a StructType.
Schema([Field(\"x\", \"integer\")]).to_json()\n# Returns '{\"type\":\"struct\",\"fields\":[{\"name\":\"x\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}'\n
","boost":2},{"location":"api/schema/#deltalake.Schema.to_pyarrow","title":"to_pyarrow method descriptor
","text":"to_pyarrow(as_large_types: bool = False) -> pyarrow.Schema\n
Return equivalent PyArrow schema
Parameters:
Name Type Description Default as_large_types
bool
get schema with all variable size types (list, binary, string) as large variants (with int64 indices). This is for compatibility with systems like Polars that only support the large versions of Arrow types.
False
Returns:
Type Description Schema
a PyArrow Schema
","boost":2},{"location":"api/schema/#deltalake.Field","title":"deltalake.Field","text":"Field(name: str, type: DataType, *, nullable: bool = True, metadata: Optional[Dict[str, Any]] = None)\n
","boost":2},{"location":"api/schema/#deltalake.Field.metadata","title":"metadata","text":"metadata: Dict[str, Any] = <attribute 'metadata' of 'deltalake._internal.Field' objects>\n
","boost":2},{"location":"api/schema/#deltalake.Field.name","title":"name","text":"name: str = <attribute 'name' of 'deltalake._internal.Field' objects>\n
","boost":2},{"location":"api/schema/#deltalake.Field.nullable","title":"nullable","text":"nullable: bool = <attribute 'nullable' of 'deltalake._internal.Field' objects>\n
","boost":2},{"location":"api/schema/#deltalake.Field.type","title":"type","text":"type: DataType = <attribute 'type' of 'deltalake._internal.Field' objects>\n
","boost":2},{"location":"api/schema/#deltalake.Field.from_json","title":"from_json staticmethod
","text":"from_json(field_json) -> Field\n
Create a Field from a JSON string.
Parameters:
Name Type Description Default json
str
the JSON string.
required Returns:
Type Description Field
Field
Example Field.from_json('''{\n \"name\": \"col\",\n \"type\": \"integer\",\n \"nullable\": true,\n \"metadata\": {}\n }'''\n)\n# Returns Field(col, PrimitiveType(\"integer\"), nullable=True)\n
","boost":2},{"location":"api/schema/#deltalake.Field.from_pyarrow","title":"from_pyarrow staticmethod
","text":"from_pyarrow(field: pyarrow.Field) -> Field\n
Create a Field from a PyArrow field Note: This currently doesn't preserve field metadata.
Parameters:
Name Type Description Default field
Field
a PyArrow Field
required Returns:
Type Description Field
a Field
","boost":2},{"location":"api/schema/#deltalake.Field.to_json","title":"to_json method descriptor
","text":"to_json() -> str\n
Get the field as JSON string.
Returns:
Type Description str
a JSON string
Example Field(\"col\", \"integer\").to_json()\n# Returns '{\"name\":\"col\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}'\n
","boost":2},{"location":"api/schema/#deltalake.Field.to_pyarrow","title":"to_pyarrow method descriptor
","text":"to_pyarrow() -> pyarrow.Field\n
Convert to an equivalent PyArrow field Note: This currently doesn't preserve field metadata.
Returns:
Type Description Field
a pyarrow Field
","boost":2},{"location":"api/schema/#data-types","title":"Data types","text":"","boost":2},{"location":"api/schema/#deltalake.schema.PrimitiveType","title":"deltalake.schema.PrimitiveType","text":"PrimitiveType(data_type: str)\n
","boost":2},{"location":"api/schema/#deltalake.schema.PrimitiveType.type","title":"type","text":"type: str = <attribute 'type' of 'deltalake._internal.PrimitiveType' objects>\n
","boost":2},{"location":"api/schema/#deltalake.schema.PrimitiveType.from_json","title":"from_json staticmethod
","text":"from_json(type_json) -> PrimitiveType\n
Create a PrimitiveType from a JSON string
The JSON representation for a primitive type is just a quoted string: PrimitiveType.from_json('\"integer\"')
Parameters:
Name Type Description Default json
str
a JSON string
required Returns:
Type Description PrimitiveType
a PrimitiveType type
","boost":2},{"location":"api/schema/#deltalake.schema.PrimitiveType.from_pyarrow","title":"from_pyarrow staticmethod
","text":"from_pyarrow(data_type) -> PrimitiveType\n
Create a PrimitiveType from a PyArrow datatype
Will raise TypeError
if the PyArrow type is not a primitive type.
Parameters:
Name Type Description Default type
DataType
A PyArrow DataType
required Returns:
Type Description PrimitiveType
a PrimitiveType
","boost":2},{"location":"api/schema/#deltalake.schema.PrimitiveType.to_pyarrow","title":"to_pyarrow method descriptor
","text":"to_pyarrow() -> pyarrow.DataType\n
Get the equivalent PyArrow type (pyarrow.DataType)
","boost":2},{"location":"api/schema/#deltalake.schema.ArrayType","title":"deltalake.schema.ArrayType","text":"ArrayType(element_type: DataType, *, contains_null: bool = True)\n
","boost":2},{"location":"api/schema/#deltalake.schema.ArrayType.contains_null","title":"contains_null","text":"contains_null: bool = <attribute 'contains_null' of 'deltalake._internal.ArrayType' objects>\n
","boost":2},{"location":"api/schema/#deltalake.schema.ArrayType.element_type","title":"element_type","text":"element_type: DataType = <attribute 'element_type' of 'deltalake._internal.ArrayType' objects>\n
","boost":2},{"location":"api/schema/#deltalake.schema.ArrayType.type","title":"type","text":"type: Literal['array'] = <attribute 'type' of 'deltalake._internal.ArrayType' objects>\n
","boost":2},{"location":"api/schema/#deltalake.schema.ArrayType.from_json","title":"from_json staticmethod
","text":"from_json(type_json) -> ArrayType\n
Create an ArrayType from a JSON string
Parameters:
Name Type Description Default json
str
a JSON string
required Returns:
Type Description ArrayType
an ArrayType
Example The JSON representation for an array type is an object with type
(set to \"array\"
), elementType
, and containsNull
.
ArrayType.from_json(\n '''{\n \"type\": \"array\",\n \"elementType\": \"integer\",\n \"containsNull\": false\n }'''\n)\n# Returns ArrayType(PrimitiveType(\"integer\"), contains_null=False)\n
","boost":2},{"location":"api/schema/#deltalake.schema.ArrayType.from_pyarrow","title":"from_pyarrow staticmethod
","text":"from_pyarrow(data_type) -> ArrayType\n
Create an ArrayType from a pyarrow.ListType.
Will raise TypeError
if a different PyArrow DataType is provided.
Parameters:
Name Type Description Default type
ListType
The PyArrow ListType
required Returns:
Type Description ArrayType
an ArrayType
","boost":2},{"location":"api/schema/#deltalake.schema.ArrayType.to_json","title":"to_json method descriptor
","text":"to_json() -> str\n
Get the JSON string representation of the type.
","boost":2},{"location":"api/schema/#deltalake.schema.ArrayType.to_pyarrow","title":"to_pyarrow method descriptor
","text":"to_pyarrow() -> pyarrow.ListType\n
Get the equivalent PyArrow type.
","boost":2},{"location":"api/schema/#deltalake.schema.MapType","title":"deltalake.schema.MapType","text":"MapType(key_type: DataType, value_type: DataType, *, value_contains_null: bool = True)\n
","boost":2},{"location":"api/schema/#deltalake.schema.MapType.key_type","title":"key_type","text":"key_type: DataType = <attribute 'key_type' of 'deltalake._internal.MapType' objects>\n
","boost":2},{"location":"api/schema/#deltalake.schema.MapType.type","title":"type","text":"type: Literal['map'] = <attribute 'type' of 'deltalake._internal.MapType' objects>\n
","boost":2},{"location":"api/schema/#deltalake.schema.MapType.value_contains_null","title":"value_contains_null","text":"value_contains_null: bool = <attribute 'value_contains_null' of 'deltalake._internal.MapType' objects>\n
","boost":2},{"location":"api/schema/#deltalake.schema.MapType.value_type","title":"value_type","text":"value_type: DataType = <attribute 'value_type' of 'deltalake._internal.MapType' objects>\n
","boost":2},{"location":"api/schema/#deltalake.schema.MapType.from_json","title":"from_json staticmethod
","text":"from_json(type_json) -> MapType\n
Create a MapType from a JSON string
Parameters:
Name Type Description Default json
str
a JSON string
required Returns:
Type Description MapType
an ArrayType
Example The JSON representation for a map type is an object with type
(set to map
), keyType
, valueType
, and valueContainsNull
:
MapType.from_json(\n '''{\n \"type\": \"map\",\n \"keyType\": \"integer\",\n \"valueType\": \"string\",\n \"valueContainsNull\": true\n }'''\n)\n# Returns MapType(PrimitiveType(\"integer\"), PrimitiveType(\"string\"), value_contains_null=True)\n
","boost":2},{"location":"api/schema/#deltalake.schema.MapType.from_pyarrow","title":"from_pyarrow staticmethod
","text":"from_pyarrow(data_type) -> MapType\n
Create a MapType from a PyArrow MapType.
Will raise TypeError
if passed a different type.
Parameters:
Name Type Description Default type
MapType
the PyArrow MapType
required Returns:
Type Description MapType
a MapType
","boost":2},{"location":"api/schema/#deltalake.schema.MapType.to_json","title":"to_json method descriptor
","text":"to_json() -> str\n
Get JSON string representation of map type.
Returns:
Type Description str
a JSON string
","boost":2},{"location":"api/schema/#deltalake.schema.MapType.to_pyarrow","title":"to_pyarrow method descriptor
","text":"to_pyarrow() -> pyarrow.MapType\n
Get the equivalent PyArrow data type.
","boost":2},{"location":"api/schema/#deltalake.schema.StructType","title":"deltalake.schema.StructType","text":"StructType(fields: List[Field])\n
","boost":2},{"location":"api/schema/#deltalake.schema.StructType.fields","title":"fields","text":"fields: List[Field] = <attribute 'fields' of 'deltalake._internal.StructType' objects>\n
","boost":2},{"location":"api/schema/#deltalake.schema.StructType.type","title":"type","text":"type: Literal['struct'] = <attribute 'type' of 'deltalake._internal.StructType' objects>\n
The string \"struct\"
","boost":2},{"location":"api/schema/#deltalake.schema.StructType.from_json","title":"from_json staticmethod
","text":"from_json(type_json) -> StructType\n
Create a new StructType from a JSON string.
Parameters:
Name Type Description Default json
str
a JSON string
required Returns:
Type Description StructType
a StructType
Example StructType.from_json(\n '''{\n \"type\": \"struct\",\n \"fields\": [{\"name\": \"x\", \"type\": \"integer\", \"nullable\": true, \"metadata\": {}}]\n }'''\n)\n# Returns StructType([Field(x, PrimitiveType(\"integer\"), nullable=True)])\n
","boost":2},{"location":"api/schema/#deltalake.schema.StructType.from_pyarrow","title":"from_pyarrow staticmethod
","text":"from_pyarrow(data_type) -> StructType\n
Create a new StructType from a PyArrow struct type.
Will raise TypeError
if a different data type is provided.
Parameters:
Name Type Description Default type
StructType
a PyArrow struct type.
required Returns:
Type Description StructType
a StructType
","boost":2},{"location":"api/schema/#deltalake.schema.StructType.to_json","title":"to_json method descriptor
","text":"to_json() -> str\n
Get the JSON representation of the type.
Returns:
Type Description str
a JSON string
Example StructType([Field(\"x\", \"integer\")]).to_json()\n# Returns '{\"type\":\"struct\",\"fields\":[{\"name\":\"x\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}'\n
","boost":2},{"location":"api/schema/#deltalake.schema.StructType.to_pyarrow","title":"to_pyarrow method descriptor
","text":"to_pyarrow() -> pyarrow.StructType\n
Get the equivalent PyArrow StructType
Returns:
Type Description StructType
a PyArrow StructType
","boost":2},{"location":"api/storage/","title":"Storage","text":"The delta filesystem handler for the pyarrow engine writer.
","boost":2},{"location":"api/storage/#deltalake.fs.DeltaStorageHandler","title":"deltalake.fs.DeltaStorageHandler","text":"DeltaStorageHandler(root: str, options: dict[str, str] | None = None, known_sizes: dict[str, int] | None = None)\n
Bases: DeltaFileSystemHandler
, FileSystemHandler
DeltaStorageHandler is a concrete implementations of a PyArrow FileSystemHandler.
","boost":2},{"location":"api/storage/#deltalake.fs.DeltaStorageHandler.get_file_info_selector","title":"get_file_info_selector","text":"get_file_info_selector(selector: FileSelector) -> List[FileInfo]\n
Get info for the files defined by FileSelector.
Parameters:
Name Type Description Default selector
FileSelector
FileSelector object
required Returns:
Type Description List[FileInfo]
list of file info objects
","boost":2},{"location":"api/storage/#deltalake.fs.DeltaStorageHandler.open_input_file","title":"open_input_file","text":"open_input_file(path: str) -> pa.PythonFile\n
Open an input file for random access reading.
Parameters:
Name Type Description Default path
str
The source to open for reading.
required Returns:
Type Description PythonFile
NativeFile
","boost":2},{"location":"api/storage/#deltalake.fs.DeltaStorageHandler.open_input_stream","title":"open_input_stream","text":"open_input_stream(path: str) -> pa.PythonFile\n
Open an input stream for sequential reading.
Parameters:
Name Type Description Default path
str
The source to open for reading.
required Returns:
Type Description PythonFile
NativeFile
","boost":2},{"location":"api/storage/#deltalake.fs.DeltaStorageHandler.open_output_stream","title":"open_output_stream","text":"open_output_stream(path: str, metadata: Optional[Dict[str, str]] = None) -> pa.PythonFile\n
Open an output stream for sequential writing.
If the target already exists, existing data is truncated.
Parameters:
Name Type Description Default path
str
The source to open for writing.
required metadata
Optional[Dict[str, str]]
If not None, a mapping of string keys to string values.
None
Returns:
Type Description PythonFile
NativeFile
","boost":2},{"location":"api/delta_table/","title":"DeltaTable","text":"","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable","title":"deltalake.DeltaTable dataclass
","text":"DeltaTable(table_uri: Union[str, Path, os.PathLike[str]], version: Optional[int] = None, storage_options: Optional[Dict[str, str]] = None, without_files: bool = False, log_buffer_size: Optional[int] = None)\n
Represents a Delta Table
Create the Delta Table from a path with an optional version. Multiple StorageBackends are currently supported: AWS S3, Azure Data Lake Storage Gen2, Google Cloud Storage (GCS) and local URI. Depending on the storage backend used, you could provide options values using the storage_options
parameter.
Parameters:
Name Type Description Default table_uri
Union[str, Path, PathLike[str]]
the path of the DeltaTable
required version
Optional[int]
version of the DeltaTable
None
storage_options
Optional[Dict[str, str]]
a dictionary of the options to use for the storage backend
None
without_files
bool
If True, will load table without tracking files. Some append-only applications might have no need of tracking any files. So, the DeltaTable will be loaded with a significant memory reduction.
False
log_buffer_size
Optional[int]
Number of files to buffer when reading the commit log. A positive integer. Setting a value greater than 1 results in concurrent calls to the storage api. This can decrease latency if there are many files in the log since the last checkpoint, but will also increase memory usage. Possible rate limits of the storage backend should also be considered for optimal performance. Defaults to 4 * number of cpus.
None
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.alter","title":"alter property
","text":"alter: TableAlterer\n
Namespace for all table alter related methods.
Returns:
Name Type Description TableAlterer
TableAlterer
TableAlterer Object
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.optimize","title":"optimize property
","text":"optimize: TableOptimizer\n
Namespace for all table optimize related methods.
Returns:
Name Type Description TableOptimizer
TableOptimizer
TableOptimizer Object
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.cleanup_metadata","title":"cleanup_metadata","text":"cleanup_metadata() -> None\n
Delete expired log files before current version from table. The table log retention is based on the configuration.logRetentionDuration
value, 30 days by default.
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.create","title":"create classmethod
","text":"create(table_uri: Union[str, Path], schema: Union[pyarrow.Schema, DeltaSchema], mode: Literal['error', 'append', 'overwrite', 'ignore'] = 'error', partition_by: Optional[Union[List[str], str]] = None, name: Optional[str] = None, description: Optional[str] = None, configuration: Optional[Mapping[str, Optional[str]]] = None, storage_options: Optional[Dict[str, str]] = None, custom_metadata: Optional[Dict[str, str]] = None) -> DeltaTable\n
CREATE
or CREATE_OR_REPLACE
a delta table given a table_uri.
Parameters:
Name Type Description Default table_uri
Union[str, Path]
URI of a table
required schema
Union[Schema, Schema]
Table schema
required mode
Literal['error', 'append', 'overwrite', 'ignore']
How to handle existing data. Default is to error if table already exists. If 'append', returns not support error if table exists. If 'overwrite', will CREATE_OR_REPLACE
table. If 'ignore', will not do anything if table already exists. Defaults to \"error\".
'error'
partition_by
Optional[Union[List[str], str]]
List of columns to partition the table by.
None
name
Optional[str]
User-provided identifier for this table.
None
description
Optional[str]
User-provided description for this table.
None
configuration
Optional[Mapping[str, Optional[str]]]
A map containing configuration options for the metadata action.
None
storage_options
Optional[Dict[str, str]]
options passed to the object store crate.
None
custom_metadata
Optional[Dict[str, str]]
custom metadata that will be added to the transaction commit.
None
Returns:
Name Type Description DeltaTable
DeltaTable
created delta table
Example import pyarrow as pa\n\nfrom deltalake import DeltaTable\n\ndt = DeltaTable.create(\n table_uri=\"my_local_table\",\n schema=pa.schema(\n [pa.field(\"foo\", pa.string()), pa.field(\"bar\", pa.string())]\n ),\n mode=\"error\",\n partition_by=\"bar\",\n)\n
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.delete","title":"delete","text":"delete(predicate: Optional[str] = None, writer_properties: Optional[WriterProperties] = None, custom_metadata: Optional[Dict[str, str]] = None) -> Dict[str, Any]\n
Delete records from a Delta Table that statisfy a predicate.
When a predicate is not provided then all records are deleted from the Delta Table. Otherwise a scan of the Delta table is performed to mark any files that contain records that satisfy the predicate. Once files are determined they are rewritten without the records.
Parameters:
Name Type Description Default predicate
Optional[str]
a SQL where clause. If not passed, will delete all rows.
None
writer_properties
Optional[WriterProperties]
Pass writer properties to the Rust parquet writer.
None
custom_metadata
Optional[Dict[str, str]]
custom metadata that will be added to the transaction commit.
None
Returns:
Type Description Dict[str, Any]
the metrics from delete.
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.file_uris","title":"file_uris","text":"file_uris(partition_filters: Optional[List[Tuple[str, str, Any]]] = None) -> List[str]\n
Get the list of files as absolute URIs, including the scheme (e.g. \"s3://\").
Local files will be just plain absolute paths, without a scheme. (That is, no 'file://' prefix.)
Use the partition_filters parameter to retrieve a subset of files that match the given filters.
Parameters:
Name Type Description Default partition_filters
Optional[List[Tuple[str, str, Any]]]
the partition filters that will be used for getting the matched files
None
Returns:
Type Description List[str]
list of the .parquet files with an absolute URI referenced for the current version of the DeltaTable
Predicates are expressed in disjunctive normal form (DNF), like [(\"x\", \"=\", \"a\"), ...]. DNF allows arbitrary boolean logical combinations of single partition predicates. The innermost tuples each describe a single partition predicate. The list of inner predicates is interpreted as a conjunction (AND), forming a more selective and multiple partition predicates. Each tuple has format: (key, op, value) and compares the key with the value. The supported op are: =
, !=
, in
, and not in
. If the op is in or not in, the value must be a collection such as a list, a set or a tuple. The supported type for value is str. Use empty string ''
for Null partition value.
Example (\"x\", \"=\", \"a\")\n(\"x\", \"!=\", \"a\")\n(\"y\", \"in\", [\"a\", \"b\", \"c\"])\n(\"z\", \"not in\", [\"a\",\"b\"])\n
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.files","title":"files","text":"files(partition_filters: Optional[List[Tuple[str, str, Any]]] = None) -> List[str]\n
Get the .parquet files of the DeltaTable.
The paths are as they are saved in the delta log, which may either be relative to the table root or absolute URIs.
Parameters:
Name Type Description Default partition_filters
Optional[List[Tuple[str, str, Any]]]
the partition filters that will be used for getting the matched files
None
Returns:
Type Description List[str]
list of the .parquet files referenced for the current version of the DeltaTable
Predicates are expressed in disjunctive normal form (DNF), like [(\"x\", \"=\", \"a\"), ...]. DNF allows arbitrary boolean logical combinations of single partition predicates. The innermost tuples each describe a single partition predicate. The list of inner predicates is interpreted as a conjunction (AND), forming a more selective and multiple partition predicates. Each tuple has format: (key, op, value) and compares the key with the value. The supported op are: =
, !=
, in
, and not in
. If the op is in or not in, the value must be a collection such as a list, a set or a tuple. The supported type for value is str. Use empty string ''
for Null partition value.
Example (\"x\", \"=\", \"a\")\n(\"x\", \"!=\", \"a\")\n(\"y\", \"in\", [\"a\", \"b\", \"c\"])\n(\"z\", \"not in\", [\"a\",\"b\"])\n
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.from_data_catalog","title":"from_data_catalog classmethod
","text":"from_data_catalog(data_catalog: DataCatalog, database_name: str, table_name: str, data_catalog_id: Optional[str] = None, version: Optional[int] = None, log_buffer_size: Optional[int] = None) -> DeltaTable\n
Create the Delta Table from a Data Catalog.
Parameters:
Name Type Description Default data_catalog
DataCatalog
the Catalog to use for getting the storage location of the Delta Table
required database_name
str
the database name inside the Data Catalog
required table_name
str
the table name inside the Data Catalog
required data_catalog_id
Optional[str]
the identifier of the Data Catalog
None
version
Optional[int]
version of the DeltaTable
None
log_buffer_size
Optional[int]
Number of files to buffer when reading the commit log. A positive integer. Setting a value greater than 1 results in concurrent calls to the storage api. This can decrease latency if there are many files in the log since the last checkpoint, but will also increase memory usage. Possible rate limits of the storage backend should also be considered for optimal performance. Defaults to 4 * number of cpus.
None
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.get_add_actions","title":"get_add_actions","text":"get_add_actions(flatten: bool = False) -> pyarrow.RecordBatch\n
Return a dataframe with all current add actions.
Add actions represent the files that currently make up the table. This data is a low-level representation parsed from the transaction log.
Parameters:
Name Type Description Default flatten
bool
whether to flatten the schema. Partition values columns are given the prefix partition.
, statistics (null_count, min, and max) are given the prefix null_count.
, min.
, and max.
, and tags the prefix tags.
. Nested field names are concatenated with .
.
False
Returns:
Type Description RecordBatch
a PyArrow RecordBatch containing the add action data.
Example from pprint import pprint\nfrom deltalake import DeltaTable, write_deltalake\nimport pyarrow as pa\ndata = pa.table({\"x\": [1, 2, 3], \"y\": [4, 5, 6]})\nwrite_deltalake(\"tmp\", data, partition_by=[\"x\"])\ndt = DeltaTable(\"tmp\")\ndf = dt.get_add_actions().to_pandas()\ndf[\"path\"].sort_values(ignore_index=True)\n0 x=1/0\n1 x=2/0\n2 x=3/0\n
df = dt.get_add_actions(flatten=True).to_pandas()\ndf[\"partition.x\"].sort_values(ignore_index=True)\n0 1\n1 2\n2 3\n
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.history","title":"history","text":"history(limit: Optional[int] = None) -> List[Dict[str, Any]]\n
Run the history command on the DeltaTable. The operations are returned in reverse chronological order.
Parameters:
Name Type Description Default limit
Optional[int]
the commit info limit to return
None
Returns:
Type Description List[Dict[str, Any]]
list of the commit infos registered in the transaction log
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.load_as_version","title":"load_as_version","text":"load_as_version(version: Union[int, str, datetime]) -> None\n
Load/time travel a DeltaTable to a specified version number, or a timestamp version of the table. If a string is passed then the argument should be an RFC 3339 and ISO 8601 date and time string format.
Parameters:
Name Type Description Default version
Union[int, str, datetime]
the identifier of the version of the DeltaTable to load
required Example Use a version number
dt = DeltaTable(\"test_table\")\ndt.load_as_version(1)\n
Use a datetime object
dt.load_as_version(datetime(2023,1,1))\n
Use a datetime in string format
dt.load_as_version(\"2018-01-26T18:30:09Z\")\ndt.load_as_version(\"2018-12-19T16:39:57-08:00\")\ndt.load_as_version(\"2018-01-26T18:30:09.453+00:00\")\n
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.load_version","title":"load_version","text":"load_version(version: int) -> None\n
Load a DeltaTable with a specified version.
Deprecated
Load_version and load_with_datetime have been combined into DeltaTable.load_as_version
.
Parameters:
Name Type Description Default version
int
the identifier of the version of the DeltaTable to load
required","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.load_with_datetime","title":"load_with_datetime","text":"load_with_datetime(datetime_string: str) -> None\n
Time travel Delta table to the latest version that's created at or before provided datetime_string
argument. The datetime_string
argument should be an RFC 3339 and ISO 8601 date and time string.
Deprecated
Load_version and load_with_datetime have been combined into DeltaTable.load_as_version
.
Parameters:
Name Type Description Default datetime_string
str
the identifier of the datetime point of the DeltaTable to load
required Example \"2018-01-26T18:30:09Z\"\n\"2018-12-19T16:39:57-08:00\"\n\"2018-01-26T18:30:09.453+00:00\"\n
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.merge","title":"merge","text":"merge(source: Union[pyarrow.Table, pyarrow.RecordBatch, pyarrow.RecordBatchReader, ds.Dataset, pandas.DataFrame], predicate: str, source_alias: Optional[str] = None, target_alias: Optional[str] = None, error_on_type_mismatch: bool = True, writer_properties: Optional[WriterProperties] = None, large_dtypes: bool = True, custom_metadata: Optional[Dict[str, str]] = None) -> TableMerger\n
Pass the source data which you want to merge on the target delta table, providing a predicate in SQL query like format. You can also specify on what to do when the underlying data types do not match the underlying table.
Parameters:
Name Type Description Default source
Union[Table, RecordBatch, RecordBatchReader, Dataset, DataFrame]
source data
required predicate
str
SQL like predicate on how to merge
required source_alias
Optional[str]
Alias for the source table
None
target_alias
Optional[str]
Alias for the target table
None
error_on_type_mismatch
bool
specify if merge will return error if data types are mismatching :default = True
True
writer_properties
Optional[WriterProperties]
Pass writer properties to the Rust parquet writer
None
large_dtypes
bool
If True, the data schema is kept in large_dtypes.
True
custom_metadata
Optional[Dict[str, str]]
custom metadata that will be added to the transaction commit.
None
Returns:
Name Type Description TableMerger
TableMerger
TableMerger Object
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.metadata","title":"metadata","text":"metadata() -> Metadata\n
Get the current metadata of the DeltaTable.
Returns:
Type Description Metadata
the current Metadata registered in the transaction log
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.protocol","title":"protocol","text":"protocol() -> ProtocolVersions\n
Get the reader and writer protocol versions of the DeltaTable.
Returns:
Type Description ProtocolVersions
the current ProtocolVersions registered in the transaction log
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.repair","title":"repair","text":"repair(dry_run: bool = False, custom_metadata: Optional[Dict[str, str]] = None) -> Dict[str, Any]\n
Repair the Delta Table by auditing active files that do not exist in the underlying filesystem and removes them. This can be useful when there are accidental deletions or corrupted files.
Active files are ones that have an add action in the log, but no corresponding remove action. This operation creates a new FSCK transaction containing a remove action for each of the missing or corrupted files.
Parameters:
Name Type Description Default dry_run
bool
when activated, list only the files, otherwise add remove actions to transaction log. Defaults to False.
False
custom_metadata
Optional[Dict[str, str]]
custom metadata that will be added to the transaction commit.
None
Returns: The metrics from repair (FSCK) action.
Example from deltalake import DeltaTable\ndt = DeltaTable('TEST')\ndt.repair(dry_run=False)\n
Results in {'dry_run': False, 'files_removed': ['6-0d084325-6885-4847-b008-82c1cf30674c-0.parquet', 5-4fba1d3e-3e20-4de1-933d-a8e13ac59f53-0.parquet']}\n
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.restore","title":"restore","text":"restore(target: Union[int, datetime, str], *, ignore_missing_files: bool = False, protocol_downgrade_allowed: bool = False, custom_metadata: Optional[Dict[str, str]] = None) -> Dict[str, Any]\n
Run the Restore command on the Delta Table: restore table to a given version or datetime.
Parameters:
Name Type Description Default target
Union[int, datetime, str]
the expected version will restore, which represented by int, date str or datetime.
required ignore_missing_files
bool
whether the operation carry on when some data files missing.
False
protocol_downgrade_allowed
bool
whether the operation when protocol version upgraded.
False
custom_metadata
Optional[Dict[str, str]]
custom metadata that will be added to the transaction commit.
None
Returns:
Type Description Dict[str, Any]
the metrics from restore.
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.schema","title":"schema","text":"schema() -> DeltaSchema\n
Get the current schema of the DeltaTable.
Returns:
Type Description Schema
the current Schema registered in the transaction log
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.to_pandas","title":"to_pandas","text":"to_pandas(partitions: Optional[List[Tuple[str, str, Any]]] = None, columns: Optional[List[str]] = None, filesystem: Optional[Union[str, pa_fs.FileSystem]] = None, filters: Optional[FilterType] = None) -> pandas.DataFrame\n
Build a pandas dataframe using data from the DeltaTable.
Parameters:
Name Type Description Default partitions
Optional[List[Tuple[str, str, Any]]]
A list of partition filters, see help(DeltaTable.files_by_partitions) for filter syntax
None
columns
Optional[List[str]]
The columns to project. This can be a list of column names to include (order and duplicates will be preserved)
None
filesystem
Optional[Union[str, FileSystem]]
A concrete implementation of the Pyarrow FileSystem or a fsspec-compatible interface. If None, the first file path will be used to determine the right FileSystem
None
filters
Optional[FilterType]
A disjunctive normal form (DNF) predicate for filtering rows. If you pass a filter you do not need to pass partitions
None
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.to_pyarrow_dataset","title":"to_pyarrow_dataset","text":"to_pyarrow_dataset(partitions: Optional[List[Tuple[str, str, Any]]] = None, filesystem: Optional[Union[str, pa_fs.FileSystem]] = None, parquet_read_options: Optional[ParquetReadOptions] = None) -> pyarrow.dataset.Dataset\n
Build a PyArrow Dataset using data from the DeltaTable.
Parameters:
Name Type Description Default partitions
Optional[List[Tuple[str, str, Any]]]
A list of partition filters, see help(DeltaTable.files_by_partitions) for filter syntax
None
filesystem
Optional[Union[str, FileSystem]]
A concrete implementation of the Pyarrow FileSystem or a fsspec-compatible interface. If None, the first file path will be used to determine the right FileSystem
None
parquet_read_options
Optional[ParquetReadOptions]
Optional read options for Parquet. Use this to handle INT96 to timestamp conversion for edge cases like 0001-01-01 or 9999-12-31
None
More info: https://arrow.apache.org/docs/python/generated/pyarrow.dataset.ParquetReadOptions.html
Returns:
Type Description Dataset
the PyArrow dataset in PyArrow
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.to_pyarrow_table","title":"to_pyarrow_table","text":"to_pyarrow_table(partitions: Optional[List[Tuple[str, str, Any]]] = None, columns: Optional[List[str]] = None, filesystem: Optional[Union[str, pa_fs.FileSystem]] = None, filters: Optional[FilterType] = None) -> pyarrow.Table\n
Build a PyArrow Table using data from the DeltaTable.
Parameters:
Name Type Description Default partitions
Optional[List[Tuple[str, str, Any]]]
A list of partition filters, see help(DeltaTable.files_by_partitions) for filter syntax
None
columns
Optional[List[str]]
The columns to project. This can be a list of column names to include (order and duplicates will be preserved)
None
filesystem
Optional[Union[str, FileSystem]]
A concrete implementation of the Pyarrow FileSystem or a fsspec-compatible interface. If None, the first file path will be used to determine the right FileSystem
None
filters
Optional[FilterType]
A disjunctive normal form (DNF) predicate for filtering rows. If you pass a filter you do not need to pass partitions
None
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.update","title":"update","text":"update(updates: Optional[Dict[str, str]] = None, new_values: Optional[Dict[str, Union[int, float, str, datetime, bool, List[Any]]]] = None, predicate: Optional[str] = None, writer_properties: Optional[WriterProperties] = None, error_on_type_mismatch: bool = True, custom_metadata: Optional[Dict[str, str]] = None) -> Dict[str, Any]\n
UPDATE
records in the Delta Table that matches an optional predicate. Either updates or new_values needs to be passed for it to execute.
Parameters:
Name Type Description Default updates
Optional[Dict[str, str]]
a mapping of column name to update SQL expression.
None
new_values
Optional[Dict[str, Union[int, float, str, datetime, bool, List[Any]]]]
a mapping of column name to python datatype.
None
predicate
Optional[str]
a logical expression.
None
writer_properties
Optional[WriterProperties]
Pass writer properties to the Rust parquet writer.
None
error_on_type_mismatch
bool
specify if update will return error if data types are mismatching :default = True
True
custom_metadata
Optional[Dict[str, str]]
custom metadata that will be added to the transaction commit.
None
Returns: the metrics from update
Example Update some row values with SQL predicate
This is equivalent to UPDATE table SET deleted = true WHERE id = '3'
from deltalake import write_deltalake, DeltaTable\nimport pandas as pd\ndf = pd.DataFrame(\n {\"id\": [\"1\", \"2\", \"3\"],\n \"deleted\": [False, False, False],\n \"price\": [10., 15., 20.]\n })\nwrite_deltalake(\"tmp\", df)\ndt = DeltaTable(\"tmp\")\ndt.update(predicate=\"id = '3'\", updates = {\"deleted\": 'True'})\n\n{'num_added_files': 1, 'num_removed_files': 1, 'num_updated_rows': 1, 'num_copied_rows': 2, 'execution_time_ms': ..., 'scan_time_ms': ...}\n
Update all row values
This is equivalent to UPDATE table SET deleted = true, id = concat(id, '_old')
.
dt.update(updates = {\"deleted\": 'True', \"id\": \"concat(id, '_old')\"})\n\n{'num_added_files': 1, 'num_removed_files': 1, 'num_updated_rows': 3, 'num_copied_rows': 0, 'execution_time_ms': ..., 'scan_time_ms': ...}\n
Use Python objects instead of SQL strings
Use the new_values
parameter instead of the updates
parameter. For example, this is equivalent to UPDATE table SET price = 150.10 WHERE id = '1'
dt.update(predicate=\"id = '1_old'\", new_values = {\"price\": 150.10})\n\n{'num_added_files': 1, 'num_removed_files': 1, 'num_updated_rows': 1, 'num_copied_rows': 2, 'execution_time_ms': ..., 'scan_time_ms': ...}\n
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.update_incremental","title":"update_incremental","text":"update_incremental() -> None\n
Updates the DeltaTable to the latest version by incrementally applying newer versions.
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.vacuum","title":"vacuum","text":"vacuum(retention_hours: Optional[int] = None, dry_run: bool = True, enforce_retention_duration: bool = True, custom_metadata: Optional[Dict[str, str]] = None) -> List[str]\n
Run the Vacuum command on the Delta Table: list and delete files no longer referenced by the Delta table and are older than the retention threshold.
Parameters:
Name Type Description Default retention_hours
Optional[int]
the retention threshold in hours, if none then the value from configuration.deletedFileRetentionDuration
is used or default of 1 week otherwise.
None
dry_run
bool
when activated, list only the files, delete otherwise
True
enforce_retention_duration
bool
when disabled, accepts retention hours smaller than the value from configuration.deletedFileRetentionDuration
.
True
custom_metadata
Optional[Dict[str, str]]
custom metadata that will be added to the transaction commit.
None
Returns: the list of files no longer referenced by the Delta Table and are older than the retention threshold.
","boost":2},{"location":"api/delta_table/#deltalake.DeltaTable.version","title":"version","text":"version() -> int\n
Get the version of the DeltaTable.
Returns:
Type Description int
The current version of the DeltaTable
","boost":2},{"location":"api/delta_table/delta_table_alterer/","title":"TableAlterer","text":"","boost":10},{"location":"api/delta_table/delta_table_alterer/#deltalake.table.TableAlterer","title":"deltalake.table.TableAlterer","text":"TableAlterer(table: DeltaTable)\n
API for various table alteration commands.
","boost":10},{"location":"api/delta_table/delta_table_alterer/#deltalake.table.TableAlterer.add_constraint","title":"add_constraint","text":"add_constraint(constraints: Dict[str, str], custom_metadata: Optional[Dict[str, str]] = None) -> None\n
Add constraints to the table. Limited to single constraint
at once.
Parameters:
Name Type Description Default constraints
Dict[str, str]
mapping of constraint name to SQL-expression to evaluate on write
required custom_metadata
Optional[Dict[str, str]]
custom metadata that will be added to the transaction commit.
None
Example:
from deltalake import DeltaTable\ndt = DeltaTable(\"test_table_constraints\")\ndt.alter.add_constraint({\n \"value_gt_5\": \"value > 5\",\n})\n
**Check configuration**\n```\ndt.metadata().configuration\n{'delta.constraints.value_gt_5': 'value > 5'}\n```\n
","boost":10},{"location":"api/delta_table/delta_table_merger/","title":"TableMerger","text":"","boost":2},{"location":"api/delta_table/delta_table_merger/#deltalake.table.TableMerger","title":"deltalake.table.TableMerger","text":"TableMerger(table: DeltaTable, source: pyarrow.RecordBatchReader, predicate: str, source_alias: Optional[str] = None, target_alias: Optional[str] = None, safe_cast: bool = True, writer_properties: Optional[WriterProperties] = None, custom_metadata: Optional[Dict[str, str]] = None)\n
API for various table MERGE
commands.
","boost":2},{"location":"api/delta_table/delta_table_merger/#deltalake.table.TableMerger.execute","title":"execute","text":"execute() -> Dict[str, Any]\n
Executes MERGE
with the previously provided settings in Rust with Apache Datafusion query engine.
Returns:
Name Type Description Dict
Dict[str, Any]
metrics
","boost":2},{"location":"api/delta_table/delta_table_merger/#deltalake.table.TableMerger.when_matched_delete","title":"when_matched_delete","text":"when_matched_delete(predicate: Optional[str] = None) -> TableMerger\n
Delete a matched row from the table only if the given predicate
(if specified) is true for the matched row. If not specified it deletes all matches.
Parameters:
Name Type Description Default predicate
(str | None, Optional)
SQL like predicate on when to delete.
None
Returns:
Name Type Description TableMerger
TableMerger
TableMerger Object
Example Delete on a predicate
from deltalake import DeltaTable, write_deltalake\nimport pyarrow as pa\n\ndata = pa.table({\"x\": [1, 2, 3], \"y\": [4, 5, 6]})\nwrite_deltalake(\"tmp\", data)\ndt = DeltaTable(\"tmp\")\nnew_data = pa.table({\"x\": [2, 3], \"deleted\": [False, True]})\n\n(\n dt.merge(\n source=new_data,\n predicate='target.x = source.x',\n source_alias='source',\n target_alias='target')\n .when_matched_delete(\n predicate=\"source.deleted = true\")\n .execute()\n)\n{'num_source_rows': 2, 'num_target_rows_inserted': 0, 'num_target_rows_updated': 0, 'num_target_rows_deleted': 1, 'num_target_rows_copied': 2, 'num_output_rows': 2, 'num_target_files_added': 1, 'num_target_files_removed': 1, 'execution_time_ms': ..., 'scan_time_ms': ..., 'rewrite_time_ms': ...}\n\ndt.to_pandas().sort_values(\"x\", ignore_index=True)\n x y\n0 1 4\n1 2 5\n
Delete all records that were matched
dt = DeltaTable(\"tmp\")\n(\n dt.merge(\n source=new_data,\n predicate='target.x = source.x',\n source_alias='source',\n target_alias='target')\n .when_matched_delete()\n .execute()\n)\n{'num_source_rows': 2, 'num_target_rows_inserted': 0, 'num_target_rows_updated': 0, 'num_target_rows_deleted': 1, 'num_target_rows_copied': 1, 'num_output_rows': 1, 'num_target_files_added': 1, 'num_target_files_removed': 1, 'execution_time_ms': ..., 'scan_time_ms': ..., 'rewrite_time_ms': ...}\n\ndt.to_pandas()\n x y\n0 1 4\n
","boost":2},{"location":"api/delta_table/delta_table_merger/#deltalake.table.TableMerger.when_matched_update","title":"when_matched_update","text":"when_matched_update(updates: Dict[str, str], predicate: Optional[str] = None) -> TableMerger\n
Update a matched table row based on the rules defined by updates
. If a predicate
is specified, then it must evaluate to true for the row to be updated.
Parameters:
Name Type Description Default updates
Dict[str, str]
a mapping of column name to update SQL expression.
required predicate
Optional[str]
SQL like predicate on when to update.
None
Returns:
Name Type Description TableMerger
TableMerger
TableMerger Object
Example from deltalake import DeltaTable, write_deltalake\nimport pyarrow as pa\n\ndata = pa.table({\"x\": [1, 2, 3], \"y\": [4, 5, 6]})\nwrite_deltalake(\"tmp\", data)\ndt = DeltaTable(\"tmp\")\nnew_data = pa.table({\"x\": [1], \"y\": [7]})\n\n(\n dt.merge(\n source=new_data,\n predicate=\"target.x = source.x\",\n source_alias=\"source\",\n target_alias=\"target\")\n .when_matched_update(updates={\"x\": \"source.x\", \"y\": \"source.y\"})\n .execute()\n)\n{'num_source_rows': 1, 'num_target_rows_inserted': 0, 'num_target_rows_updated': 1, 'num_target_rows_deleted': 0, 'num_target_rows_copied': 2, 'num_output_rows': 3, 'num_target_files_added': 1, 'num_target_files_removed': 1, 'execution_time_ms': ..., 'scan_time_ms': ..., 'rewrite_time_ms': ...}\n\ndt.to_pandas()\n x y\n0 1 7\n1 2 5\n2 3 6\n
","boost":2},{"location":"api/delta_table/delta_table_merger/#deltalake.table.TableMerger.when_matched_update_all","title":"when_matched_update_all","text":"when_matched_update_all(predicate: Optional[str] = None) -> TableMerger\n
Updating all source fields to target fields, source and target are required to have the same field names. If a predicate
is specified, then it must evaluate to true for the row to be updated.
Parameters:
Name Type Description Default predicate
Optional[str]
SQL like predicate on when to update all columns.
None
Returns:
Name Type Description TableMerger
TableMerger
TableMerger Object
Example from deltalake import DeltaTable, write_deltalake\nimport pyarrow as pa\n\ndata = pa.table({\"x\": [1, 2, 3], \"y\": [4, 5, 6]})\nwrite_deltalake(\"tmp\", data)\ndt = DeltaTable(\"tmp\")\nnew_data = pa.table({\"x\": [1], \"y\": [7]})\n\n(\n dt.merge(\n source=new_data,\n predicate=\"target.x = source.x\",\n source_alias=\"source\",\n target_alias=\"target\")\n .when_matched_update_all()\n .execute()\n)\n{'num_source_rows': 1, 'num_target_rows_inserted': 0, 'num_target_rows_updated': 1, 'num_target_rows_deleted': 0, 'num_target_rows_copied': 2, 'num_output_rows': 3, 'num_target_files_added': 1, 'num_target_files_removed': 1, 'execution_time_ms': ..., 'scan_time_ms': ..., 'rewrite_time_ms': ...}\n\ndt.to_pandas()\n x y\n0 1 7\n1 2 5\n2 3 6\n
","boost":2},{"location":"api/delta_table/delta_table_merger/#deltalake.table.TableMerger.when_not_matched_by_source_delete","title":"when_not_matched_by_source_delete","text":"when_not_matched_by_source_delete(predicate: Optional[str] = None) -> TableMerger\n
Delete a target row that has no matches in the source from the table only if the given predicate
(if specified) is true for the target row.
Parameters:
Name Type Description Default predicate
Optional[str]
SQL like predicate on when to delete when not matched by source.
None
Returns:
Name Type Description TableMerger
TableMerger
TableMerger Object
","boost":2},{"location":"api/delta_table/delta_table_merger/#deltalake.table.TableMerger.when_not_matched_by_source_update","title":"when_not_matched_by_source_update","text":"when_not_matched_by_source_update(updates: Dict[str, str], predicate: Optional[str] = None) -> TableMerger\n
Update a target row that has no matches in the source based on the rules defined by updates
. If a predicate
is specified, then it must evaluate to true for the row to be updated.
Parameters:
Name Type Description Default updates
Dict[str, str]
a mapping of column name to update SQL expression.
required predicate
Optional[str]
SQL like predicate on when to update.
None
Returns:
Name Type Description TableMerger
TableMerger
TableMerger Object
Example from deltalake import DeltaTable, write_deltalake\nimport pyarrow as pa\n\ndata = pa.table({\"x\": [1, 2, 3], \"y\": [4, 5, 6]})\nwrite_deltalake(\"tmp\", data)\ndt = DeltaTable(\"tmp\")\nnew_data = pa.table({\"x\": [2, 3, 4]})\n\n(\n dt.merge(\n source=new_data,\n predicate='target.x = source.x',\n source_alias='source',\n target_alias='target')\n .when_not_matched_by_source_update(\n predicate = \"y > 3\",\n updates = {\"y\": \"0\"})\n .execute()\n)\n{'num_source_rows': 3, 'num_target_rows_inserted': 0, 'num_target_rows_updated': 1, 'num_target_rows_deleted': 0, 'num_target_rows_copied': 2, 'num_output_rows': 3, 'num_target_files_added': 1, 'num_target_files_removed': 1, 'execution_time_ms': ..., 'scan_time_ms': ..., 'rewrite_time_ms': ...}\n\ndt.to_pandas().sort_values(\"x\", ignore_index=True)\n x y\n0 1 0\n1 2 5\n2 3 6\n
","boost":2},{"location":"api/delta_table/delta_table_merger/#deltalake.table.TableMerger.when_not_matched_insert","title":"when_not_matched_insert","text":"when_not_matched_insert(updates: Dict[str, str], predicate: Optional[str] = None) -> TableMerger\n
Insert a new row to the target table based on the rules defined by updates
. If a predicate
is specified, then it must evaluate to true for the new row to be inserted.
Parameters:
Name Type Description Default updates
dict
a mapping of column name to insert SQL expression.
required predicate
(str | None, Optional)
SQL like predicate on when to insert.
None
Returns:
Name Type Description TableMerger
TableMerger
TableMerger Object
Example from deltalake import DeltaTable, write_deltalake\nimport pyarrow as pa\n\ndata = pa.table({\"x\": [1, 2, 3], \"y\": [4, 5, 6]})\nwrite_deltalake(\"tmp\", data)\ndt = DeltaTable(\"tmp\")\nnew_data = pa.table({\"x\": [4], \"y\": [7]})\n\n(\n dt.merge(\n source=new_data,\n predicate=\"target.x = source.x\",\n source_alias=\"source\",\n target_alias=\"target\",)\n .when_not_matched_insert(\n updates={\n \"x\": \"source.x\",\n \"y\": \"source.y\",\n })\n .execute()\n)\n{'num_source_rows': 1, 'num_target_rows_inserted': 1, 'num_target_rows_updated': 0, 'num_target_rows_deleted': 0, 'num_target_rows_copied': 3, 'num_output_rows': 4, 'num_target_files_added': 1, 'num_target_files_removed': 1, 'execution_time_ms': ..., 'scan_time_ms': ..., 'rewrite_time_ms': ...}\n\ndt.to_pandas().sort_values(\"x\", ignore_index=True)\n x y\n0 1 4\n1 2 5\n2 3 6\n3 4 7\n
","boost":2},{"location":"api/delta_table/delta_table_merger/#deltalake.table.TableMerger.when_not_matched_insert_all","title":"when_not_matched_insert_all","text":"when_not_matched_insert_all(predicate: Optional[str] = None) -> TableMerger\n
Insert a new row to the target table, updating all source fields to target fields. Source and target are required to have the same field names. If a predicate
is specified, then it must evaluate to true for the new row to be inserted.
Parameters:
Name Type Description Default predicate
Optional[str]
SQL like predicate on when to insert.
None
Returns:
Name Type Description TableMerger
TableMerger
TableMerger Object
Example from deltalake import DeltaTable, write_deltalake\nimport pyarrow as pa\n\ndata = pa.table({\"x\": [1, 2, 3], \"y\": [4, 5, 6]})\nwrite_deltalake(\"tmp\", data)\ndt = DeltaTable(\"tmp\")\nnew_data = pa.table({\"x\": [4], \"y\": [7]})\n\n(\n dt.merge(\n source=new_data,\n predicate='target.x = source.x',\n source_alias='source',\n target_alias='target')\n .when_not_matched_insert_all()\n .execute()\n)\n{'num_source_rows': 1, 'num_target_rows_inserted': 1, 'num_target_rows_updated': 0, 'num_target_rows_deleted': 0, 'num_target_rows_copied': 3, 'num_output_rows': 4, 'num_target_files_added': 1, 'num_target_files_removed': 1, 'execution_time_ms': ..., 'scan_time_ms': ..., 'rewrite_time_ms': ...}\n\ndt.to_pandas().sort_values(\"x\", ignore_index=True)\n x y\n0 1 4\n1 2 5\n2 3 6\n3 4 7\n
","boost":2},{"location":"api/delta_table/delta_table_merger/#deltalake.table.TableMerger.with_writer_properties","title":"with_writer_properties","text":"with_writer_properties(data_page_size_limit: Optional[int] = None, dictionary_page_size_limit: Optional[int] = None, data_page_row_count_limit: Optional[int] = None, write_batch_size: Optional[int] = None, max_row_group_size: Optional[int] = None) -> TableMerger\n
Deprecated
Use .merge(writer_properties = WriterProperties())
instead
Pass writer properties to the Rust parquet writer, see options https://arrow.apache.org/rust/parquet/file/properties/struct.WriterProperties.html:
Parameters:
Name Type Description Default data_page_size_limit
Optional[int]
Limit DataPage size to this in bytes.
None
dictionary_page_size_limit
Optional[int]
Limit the size of each DataPage to store dicts to this amount in bytes.
None
data_page_row_count_limit
Optional[int]
Limit the number of rows in each DataPage.
None
write_batch_size
Optional[int]
Splits internally to smaller batch size.
None
max_row_group_size
Optional[int]
Max number of rows in row group.
None
Returns:
Name Type Description TableMerger
TableMerger
TableMerger Object
","boost":2},{"location":"api/delta_table/delta_table_optimizer/","title":"TableOptimizer","text":"","boost":10},{"location":"api/delta_table/delta_table_optimizer/#deltalake.table.TableOptimizer","title":"deltalake.table.TableOptimizer","text":"TableOptimizer(table: DeltaTable)\n
API for various table optimization commands.
","boost":10},{"location":"api/delta_table/delta_table_optimizer/#deltalake.table.TableOptimizer.compact","title":"compact","text":"compact(partition_filters: Optional[FilterType] = None, target_size: Optional[int] = None, max_concurrent_tasks: Optional[int] = None, min_commit_interval: Optional[Union[int, timedelta]] = None, writer_properties: Optional[WriterProperties] = None, custom_metadata: Optional[Dict[str, str]] = None) -> Dict[str, Any]\n
Compacts small files to reduce the total number of files in the table.
This operation is idempotent; if run twice on the same table (assuming it has not been updated) it will do nothing the second time.
If this operation happens concurrently with any operations other than append, it will fail.
Parameters:
Name Type Description Default partition_filters
Optional[FilterType]
the partition filters that will be used for getting the matched files
None
target_size
Optional[int]
desired file size after bin-packing files, in bytes. If not provided, will attempt to read the table configuration value delta.targetFileSize
. If that value isn't set, will use default value of 256MB.
None
max_concurrent_tasks
Optional[int]
the maximum number of concurrent tasks to use for file compaction. Defaults to number of CPUs. More concurrent tasks can make compaction faster, but will also use more memory.
None
min_commit_interval
Optional[Union[int, timedelta]]
minimum interval in seconds or as timedeltas before a new commit is created. Interval is useful for long running executions. Set to 0 or timedelta(0), if you want a commit per partition.
None
writer_properties
Optional[WriterProperties]
Pass writer properties to the Rust parquet writer.
None
custom_metadata
Optional[Dict[str, str]]
custom metadata that will be added to the transaction commit.
None
Returns:
Type Description Dict[str, Any]
the metrics from optimize
Example Use a timedelta object to specify the seconds, minutes or hours of the interval.
from deltalake import DeltaTable, write_deltalake\nfrom datetime import timedelta\nimport pyarrow as pa\n\nwrite_deltalake(\"tmp\", pa.table({\"x\": [1], \"y\": [4]}))\nwrite_deltalake(\"tmp\", pa.table({\"x\": [2], \"y\": [5]}), mode=\"append\")\n\ndt = DeltaTable(\"tmp\")\ntime_delta = timedelta(minutes=10)\ndt.optimize.compact(min_commit_interval=time_delta)\n{'numFilesAdded': 1, 'numFilesRemoved': 2, 'filesAdded': ..., 'filesRemoved': ..., 'partitionsOptimized': 1, 'numBatches': 2, 'totalConsideredFiles': 2, 'totalFilesSkipped': 0, 'preserveInsertionOrder': True}\n
","boost":10},{"location":"api/delta_table/delta_table_optimizer/#deltalake.table.TableOptimizer.z_order","title":"z_order","text":"z_order(columns: Iterable[str], partition_filters: Optional[FilterType] = None, target_size: Optional[int] = None, max_concurrent_tasks: Optional[int] = None, max_spill_size: int = 20 * 1024 * 1024 * 1024, min_commit_interval: Optional[Union[int, timedelta]] = None, writer_properties: Optional[WriterProperties] = None, custom_metadata: Optional[Dict[str, str]] = None) -> Dict[str, Any]\n
Reorders the data using a Z-order curve to improve data skipping.
This also performs compaction, so the same parameters as compact() apply.
Parameters:
Name Type Description Default columns
Iterable[str]
the columns to use for Z-ordering. There must be at least one column. partition_filters: the partition filters that will be used for getting the matched files
required target_size
Optional[int]
desired file size after bin-packing files, in bytes. If not provided, will attempt to read the table configuration value delta.targetFileSize
. If that value isn't set, will use default value of 256MB.
None
max_concurrent_tasks
Optional[int]
the maximum number of concurrent tasks to use for file compaction. Defaults to number of CPUs. More concurrent tasks can make compaction faster, but will also use more memory.
None
max_spill_size
int
the maximum number of bytes to spill to disk. Defaults to 20GB.
20 * 1024 * 1024 * 1024
min_commit_interval
Optional[Union[int, timedelta]]
minimum interval in seconds or as timedeltas before a new commit is created. Interval is useful for long running executions. Set to 0 or timedelta(0), if you want a commit per partition.
None
writer_properties
Optional[WriterProperties]
Pass writer properties to the Rust parquet writer.
None
custom_metadata
Optional[Dict[str, str]]
custom metadata that will be added to the transaction commit.
None
Returns:
Type Description Dict[str, Any]
the metrics from optimize
Example Use a timedelta object to specify the seconds, minutes or hours of the interval.
from deltalake import DeltaTable, write_deltalake\nfrom datetime import timedelta\nimport pyarrow as pa\n\nwrite_deltalake(\"tmp\", pa.table({\"x\": [1], \"y\": [4]}))\nwrite_deltalake(\"tmp\", pa.table({\"x\": [2], \"y\": [5]}), mode=\"append\")\n\ndt = DeltaTable(\"tmp\")\ntime_delta = timedelta(minutes=10)\ndt.optimize.z_order([\"x\"], min_commit_interval=time_delta)\n{'numFilesAdded': 1, 'numFilesRemoved': 2, 'filesAdded': ..., 'filesRemoved': ..., 'partitionsOptimized': 0, 'numBatches': 1, 'totalConsideredFiles': 2, 'totalFilesSkipped': 0, 'preserveInsertionOrder': True}\n
","boost":10},{"location":"api/delta_table/metadata/","title":"Metadata","text":"","boost":2},{"location":"api/delta_table/metadata/#deltalake.Metadata","title":"deltalake.Metadata dataclass
","text":"Metadata(table: RawDeltaTable)\n
Create a Metadata instance.
","boost":2},{"location":"api/delta_table/metadata/#deltalake.Metadata.configuration","title":"configuration property
","text":"configuration: Dict[str, str]\n
Return the DeltaTable properties.
","boost":2},{"location":"api/delta_table/metadata/#deltalake.Metadata.created_time","title":"created_time property
","text":"created_time: int\n
Return The time when this metadata action is created, in milliseconds since the Unix epoch of the DeltaTable.
","boost":2},{"location":"api/delta_table/metadata/#deltalake.Metadata.description","title":"description property
","text":"description: str\n
Return the user-provided description of the DeltaTable.
","boost":2},{"location":"api/delta_table/metadata/#deltalake.Metadata.id","title":"id property
","text":"id: int\n
Return the unique identifier of the DeltaTable.
","boost":2},{"location":"api/delta_table/metadata/#deltalake.Metadata.name","title":"name property
","text":"name: str\n
Return the user-provided identifier of the DeltaTable.
","boost":2},{"location":"api/delta_table/metadata/#deltalake.Metadata.partition_columns","title":"partition_columns property
","text":"partition_columns: List[str]\n
Return an array containing the names of the partitioned columns of the DeltaTable.
","boost":2},{"location":"how-delta-lake-works/architecture-of-delta-table/","title":"Architecture of a Delta Lake table","text":"A Delta table consists of Parquet files that contain data and a transaction log that stores metadata about the transactions.
Let's create a Delta table, perform some operations, and inspect the files that are created.
"},{"location":"how-delta-lake-works/architecture-of-delta-table/#delta-lake-transaction-examples","title":"Delta Lake transaction examples","text":"Start by creating a pandas DataFrame and writing it out to a Delta table.
import pandas as pd\nfrom deltalake import DeltaTable, write_deltalake\n\ndf = pd.DataFrame({\"num\": [1, 2, 3], \"letter\": [\"a\", \"b\", \"c\"]})\nwrite_deltalake(\"tmp/some-table\", df)\n
Now inspect the files created in storage:
tmp/some-table\n\u251c\u2500\u2500 0-62dffa23-bbe1-4496-8fb5-bff6724dc677-0.parquet\n\u2514\u2500\u2500 _delta_log\n \u2514\u2500\u2500 00000000000000000000.json\n
The Parquet file stores the data that was written. The _delta_log
directory stores metadata about the transactions. Let's inspect the _delta_log/00000000000000000000.json
file.
{\n \"protocol\": {\n \"minReaderVersion\": 1,\n \"minWriterVersion\": 1\n }\n}\n{\n \"metaData\": {\n \"id\": \"b96ea1a2-1830-4da2-8827-5334cc6104ed\",\n \"name\": null,\n \"description\": null,\n \"format\": {\n \"provider\": \"parquet\",\n \"options\": {}\n },\n \"schemaString\": \"{\\\"type\\\":\\\"struct\\\",\\\"fields\\\":[{\\\"name\\\":\\\"num\\\",\\\"type\\\":\\\"long\\\",\\\"nullable\\\":true,\\\"metadata\\\":{}},{\\\"name\\\":\\\"letter\\\",\\\"type\\\":\\\"string\\\",\\\"nullable\\\":true,\\\"metadata\\\":{}}]}\",\n \"partitionColumns\": [],\n \"createdTime\": 1701740315599,\n \"configuration\": {}\n }\n}\n{\n \"add\": {\n \"path\": \"0-62dffa23-bbe1-4496-8fb5-bff6724dc677-0.parquet\",\n \"size\": 2208,\n \"partitionValues\": {},\n \"modificationTime\": 1701740315597,\n \"dataChange\": true,\n \"stats\": \"{\\\"numRecords\\\": 3, \\\"minValues\\\": {\\\"num\\\": 1, \\\"letter\\\": \\\"a\\\"}, \\\"maxValues\\\": {\\\"num\\\": 3, \\\"letter\\\": \\\"c\\\"}, \\\"nullCount\\\": {\\\"num\\\": 0, \\\"letter\\\": 0}}\"\n }\n}\n{\n \"commitInfo\": {\n \"timestamp\": 1701740315602,\n \"operation\": \"CREATE TABLE\",\n \"operationParameters\": {\n \"location\": \"file:///Users/matthew.powers/Documents/code/delta/delta-examples/notebooks/python-deltalake/tmp/some-table\",\n \"metadata\": \"{\\\"configuration\\\":{},\\\"created_time\\\":1701740315599,\\\"description\\\":null,\\\"format\\\":{\\\"options\\\":{},\\\"provider\\\":\\\"parquet\\\"},\\\"id\\\":\\\"b96ea1a2-1830-4da2-8827-5334cc6104ed\\\",\\\"name\\\":null,\\\"partition_columns\\\":[],\\\"schema\\\":{\\\"fields\\\":[{\\\"metadata\\\":{},\\\"name\\\":\\\"num\\\",\\\"nullable\\\":true,\\\"type\\\":\\\"long\\\"},{\\\"metadata\\\":{},\\\"name\\\":\\\"letter\\\",\\\"nullable\\\":true,\\\"type\\\":\\\"string\\\"}],\\\"type\\\":\\\"struct\\\"}}\",\n \"protocol\": \"{\\\"minReaderVersion\\\":1,\\\"minWriterVersion\\\":1}\",\n \"mode\": \"ErrorIfExists\"\n },\n \"clientVersion\": \"delta-rs.0.17.0\"\n }\n}\n
The tranasction log file contains the following information:
- the files added to the Delta table
- schema of the files
- column level metadata including the min/max value for each file
Create another pandas DataFrame and append it to the Delta table to see how this transaction is recorded.
df = pd.DataFrame({\"num\": [8, 9], \"letter\": [\"dd\", \"ee\"]})\nwrite_deltalake(f\"{cwd}/tmp/delta-table\", df, mode=\"append\")\n
Here are the files in storage:
tmp/some-table\n\u251c\u2500\u2500 0-62dffa23-bbe1-4496-8fb5-bff6724dc677-0.parquet\n\u251c\u2500\u2500 1-57abb6fb-2249-43ba-a7be-cf09bcc230de-0.parquet\n\u2514\u2500\u2500 _delta_log\n \u251c\u2500\u2500 00000000000000000000.json\n \u2514\u2500\u2500 00000000000000000001.json\n
Here are the contents of the _delta_log/00000000000000000001.json
file:
{\n \"add\": {\n \"path\": \"1-57abb6fb-2249-43ba-a7be-cf09bcc230de-0.parquet\",\n \"size\": 2204,\n \"partitionValues\": {},\n \"modificationTime\": 1701740386169,\n \"dataChange\": true,\n \"stats\": \"{\\\"numRecords\\\": 2, \\\"minValues\\\": {\\\"num\\\": 8, \\\"letter\\\": \\\"dd\\\"}, \\\"maxValues\\\": {\\\"num\\\": 9, \\\"letter\\\": \\\"ee\\\"}, \\\"nullCount\\\": {\\\"num\\\": 0, \\\"letter\\\": 0}}\"\n }\n}\n{\n \"commitInfo\": {\n \"timestamp\": 1701740386169,\n \"operation\": \"WRITE\",\n \"operationParameters\": {\n \"partitionBy\": \"[]\",\n \"mode\": \"Append\"\n },\n \"clientVersion\": \"delta-rs.0.17.0\"\n }\n}\n
The transaction log records that the second file has been persisted in the Delta table.
Now create a third pandas DataFrame and overwrite the Delta table with the new data.
df = pd.DataFrame({\"num\": [11, 22], \"letter\": [\"aa\", \"bb\"]})\nwrite_deltalake(f\"{cwd}/tmp/delta-table\", df, mode=\"append\")\n
Here are the files in storage:
tmp/some-table\n\u251c\u2500\u2500 0-62dffa23-bbe1-4496-8fb5-bff6724dc677-0.parquet\n\u251c\u2500\u2500 1-57abb6fb-2249-43ba-a7be-cf09bcc230de-0.parquet\n\u251c\u2500\u2500 2-95ef2108-480c-4b89-96f0-ff9185dab9ad-0.parquet\n\u2514\u2500\u2500 _delta_log\n \u251c\u2500\u2500 00000000000000000000.json\n \u251c\u2500\u2500 00000000000000000001.json\n \u2514\u2500\u2500 00000000000000000002.json\n
Here are the contents of the _delta_log/0002.json
file:
{\n \"add\": {\n \"path\": \"2-95ef2108-480c-4b89-96f0-ff9185dab9ad-0.parquet\",\n \"size\": 2204,\n \"partitionValues\": {},\n \"modificationTime\": 1701740465102,\n \"dataChange\": true,\n \"stats\": \"{\\\"numRecords\\\": 2, \\\"minValues\\\": {\\\"num\\\": 11, \\\"letter\\\": \\\"aa\\\"}, \\\"maxValues\\\": {\\\"num\\\": 22, \\\"letter\\\": \\\"bb\\\"}, \\\"nullCount\\\": {\\\"num\\\": 0, \\\"letter\\\": 0}}\"\n }\n}\n{\n \"remove\": {\n \"path\": \"0-62dffa23-bbe1-4496-8fb5-bff6724dc677-0.parquet\",\n \"deletionTimestamp\": 1701740465102,\n \"dataChange\": true,\n \"extendedFileMetadata\": false,\n \"partitionValues\": {},\n \"size\": 2208\n }\n}\n{\n \"remove\": {\n \"path\": \"1-57abb6fb-2249-43ba-a7be-cf09bcc230de-0.parquet\",\n \"deletionTimestamp\": 1701740465102,\n \"dataChange\": true,\n \"extendedFileMetadata\": false,\n \"partitionValues\": {},\n \"size\": 2204\n }\n}\n{\n \"commitInfo\": {\n \"timestamp\": 1701740465102,\n \"operation\": \"WRITE\",\n \"operationParameters\": {\n \"mode\": \"Overwrite\",\n \"partitionBy\": \"[]\"\n },\n \"clientVersion\": \"delta-rs.0.17.0\"\n }\n}\n
This transaction adds a data file and marks the two exising data files for removal. Marking a file for removal in the transaction log is known as \"tombstoning the file\" or a \"logical delete\". This is different from a \"physical delete\" which actually removes the data file from storage.
"},{"location":"how-delta-lake-works/architecture-of-delta-table/#how-delta-table-operations-differ-from-data-lakes","title":"How Delta table operations differ from data lakes","text":"Data lakes consist of data files persisted in storage. They don't have a transaction log that retain metadata about the transactions.
Data lakes perform transactions differently than Delta tables.
When you perform an overwrite tranasction with a Delta table, you logically delete the exiting data without physically removing it.
Data lakes don't support logical deletes, so you have to physically delete the data from storage.
Logical data operations are safer because they can be rolled back if they don't complete successfully. Physically removing data from storage can be dangerous, especially if it's before a transaction is complete.
We're now ready to look into Delta Lake ACID transactions in more detail.
"},{"location":"integrations/delta-lake-arrow/","title":"Delta Lake Arrow Integrations","text":"Delta Lake tables can be exposed as Arrow tables and Arrow datasets, which allows for interoperability with a variety of query engines.
This page shows you how to convert Delta tables to Arrow data structures and teaches you the difference between Arrow tables and Arrow datasets. Tables are \"eager\" and datasets are \"lazy\", which has important performance implications, keep reading to learn more!
"},{"location":"integrations/delta-lake-arrow/#delta-lake-to-arrow-dataset","title":"Delta Lake to Arrow Dataset","text":"Delta tables can easily be exposed as Arrow datasets. This makes it easy for any query engine that can read Arrow datasets to read a Delta table.
Let's take a look at the h2o groupby dataset that contains 9 columns of data. Here are three representative rows of data:
+-------+-------+--------------+-------+-------+--------+------+------+---------+\n| id1 | id2 | id3 | id4 | id5 | id6 | v1 | v2 | v3 |\n|-------+-------+--------------+-------+-------+--------+------+------+---------|\n| id016 | id046 | id0000109363 | 88 | 13 | 146094 | 4 | 6 | 18.8377 |\n| id039 | id087 | id0000466766 | 14 | 30 | 111330 | 4 | 14 | 46.7973 |\n| id047 | id098 | id0000307804 | 85 | 23 | 187639 | 3 | 5 | 47.5773 |\n+-------+-------+--------------+-------+-------+--------+------+------+---------+\n
Here's how to expose the Delta table as a PyArrow dataset and run a query with DuckDB:
import duckdb\nfrom deltalake import DeltaTable\n\ntable = DeltaTable(\"delta/G1_1e9_1e2_0_0\")\ndataset = table.to_pyarrow_dataset()\nquack = duckdb.arrow(dataset)\nquack.filter(\"id1 = 'id016' and v2 > 10\")\n
Here's the result:
\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 id1 \u2502 id2 \u2502 id3 \u2502 id4 \u2502 id5 \u2502 id6 \u2502 v1 \u2502 v2 \u2502 v3 \u2502\n\u2502 varchar \u2502 varchar \u2502 varchar \u2502 int32 \u2502 int32 \u2502 int32 \u2502 int32 \u2502 int32 \u2502 double \u2502\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 id016 \u2502 id054 \u2502 id0002309114 \u2502 62 \u2502 95 \u2502 7180859 \u2502 4 \u2502 13 \u2502 7.750173 \u2502\n\u2502 id016 \u2502 id044 \u2502 id0003968533 \u2502 63 \u2502 98 \u2502 2356363 \u2502 4 \u2502 14 \u2502 3.942417 \u2502\n\u2502 id016 \u2502 id034 \u2502 id0001082839 \u2502 58 \u2502 73 \u2502 8039808 \u2502 5 \u2502 12 \u2502 76.820135 \u2502\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 ? rows (>9999 rows, 3 shown) 9 columns \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
Arrow datasets allow for the predicates to get pushed down to the query engine, so the query is executed quickly.
"},{"location":"integrations/delta-lake-arrow/#delta-lake-to-arrow-table","title":"Delta Lake to Arrow Table","text":"You can also run the same query with DuckDB on an Arrow table:
quack = duckdb.arrow(table.to_pyarrow_table())\nquack.filter(\"id1 = 'id016' and v2 > 10\")\n
This returns the same result, but it runs slower.
"},{"location":"integrations/delta-lake-arrow/#difference-between-arrow-dataset-and-arrow-table","title":"Difference between Arrow Dataset and Arrow Table","text":"Arrow Datasets are lazy and allow for full predicate pushdown unlike Arrow tables which are eagerly loaded into memory.
The previous DuckDB queries were run on a 1 billion row dataset that's roughly 50 GB when stored as an uncompressed CSV file. Here are the runtimes when the data is stored in a Delta table and the queries are executed on a 2021 Macbook M1 with 64 GB of RAM:
- Arrow table: 17.1 seconds
- Arrow dataset: 0.01 seconds
The query runs much faster on an Arrow dataset because the predicates can be pushed down to the query engine and lots of data can be skipped.
Arrow tables are eagerly materialized in memory and don't allow for the same amount of data skipping.
"},{"location":"integrations/delta-lake-arrow/#multiple-query-engines-can-query-arrow-datasets","title":"Multiple query engines can query Arrow Datasets","text":"Other query engines like DataFusion can also query Arrow datasets, see the following example:
from datafusion import SessionContext\n\nctx = SessionContext()\nctx.register_dataset(\"my_dataset\", table.to_pyarrow_dataset())\nctx.sql(\"select * from my_dataset where v2 > 5\")\n
Here's the result:
+-------+-------+--------------+-----+-----+--------+----+----+-----------+\n| id1 | id2 | id3 | id4 | id5 | id6 | v1 | v2 | v3 |\n+-------+-------+--------------+-----+-----+--------+----+----+-----------+\n| id082 | id049 | id0000022715 | 97 | 55 | 756924 | 2 | 11 | 74.161136 |\n| id053 | id052 | id0000113549 | 19 | 56 | 139048 | 1 | 10 | 95.178444 |\n| id090 | id043 | id0000637409 | 94 | 50 | 12448 | 3 | 12 | 60.21896 |\n+-------+-------+--------------+-----+-----+--------+----+----+-----------+\n
Any query engine that's capable of reading an Arrow table/dataset can read a Delta table.
"},{"location":"integrations/delta-lake-arrow/#conclusion","title":"Conclusion","text":"Delta tables can easily be exposed as Arrow tables/datasets.
Therefore any query engine that can read an Arrow table/dataset can also read a Delta table.
Arrow datasets allow for more predicates to be pushed down to the query engine, so they can perform better performance than Arrow tables.
"},{"location":"integrations/delta-lake-datafusion/","title":"Using Delta Lake with DataFusion","text":"This page explains how to use Delta Lake with DataFusion.
Delta Lake offers DataFusion users better performance and more features compared to other formats like CSV or Parquet.
Delta Lake works well with the DataFusion Rust API and the DataFusion Python API. It's a great option for all DataFusion users.
Delta Lake also depends on DataFusion to implement SQL-related functionality under the hood. We will also discuss this dependency at the end of this guide in case you're interested in learning more about the symbiotic relationship between the two libraries.
"},{"location":"integrations/delta-lake-datafusion/#delta-lake-performance-benefits-for-datafusion-users","title":"Delta Lake performance benefits for DataFusion users","text":"Let's run some DataFusion queries on a Parquet file and a Delta table with the same data to learn more about the performance benefits of Delta Lake.
Suppose you have the following dataset with 1 billion rows and 9 columns. Here are the first three rows of data:
+-------+-------+--------------+-------+-------+--------+------+------+---------+\n| id1 | id2 | id3 | id4 | id5 | id6 | v1 | v2 | v3 |\n|-------+-------+--------------+-------+-------+--------+------+------+---------|\n| id016 | id046 | id0000109363 | 88 | 13 | 146094 | 4 | 6 | 18.8377 |\n| id039 | id087 | id0000466766 | 14 | 30 | 111330 | 4 | 14 | 46.7973 |\n| id047 | id098 | id0000307804 | 85 | 23 | 187639 | 3 | 5 | 47.5773 |\n+-------+-------+--------------+-------+-------+--------+------+------+---------+\n
Here's how to register a Delta Lake table as a PyArrow dataset:
from datafusion import SessionContext\nfrom deltalake import DeltaTable\n\nctx = SessionContext()\ntable = DeltaTable(\"G1_1e9_1e2_0_0\")\nctx.register_dataset(\"my_delta_table\", table.to_pyarrow_dataset())\n
Now query the table:
ctx.sql(\"select id1, sum(v1) as v1 from my_delta_table where id1='id096' group by id1\")\n
That query takes 2.8 seconds to execute.
Let's register the same dataset as a Parquet table, run the same query, and compare the runtime difference.
Register the Parquet table and run the query:
path = \"G1_1e9_1e2_0_0.parquet\"\nctx.register_parquet(\"my_parquet_table\", path)\nctx.sql(\"select id1, sum(v1) as v1 from my_parquet_table where id1='id096' group by id1\")\n
This query takes 5.3 seconds to run.
Parquet stores data in row groups and DataFusion can intelligently skip row groups that don't contain relevant data, so the query is faster than a file format like CSV which doesn't support row group skipping.
Delta Lake stores file-level metadata information in the transaction log, so it can skip entire files when queries are executed. Delta Lake can skip entire files and then skip row groups within the individual files. This makes Delta Lake even faster than Parquet files, especially for larger datasets spread across many files.
"},{"location":"integrations/delta-lake-datafusion/#delta-lake-features-for-datafusion-users","title":"Delta Lake features for DataFusion users","text":"Delta Lake also provides other features that are useful for DataFusion users like ACID transactions, concurrency protection, time travel, versioned data, and more.
"},{"location":"integrations/delta-lake-datafusion/#why-delta-lake-depends-on-datafusion","title":"Why Delta Lake depends on DataFusion","text":"Delta Lake depends on DataFusion to provide some end-user features.
DataFusion is useful in providing SQL-related Delta Lake features. Some examples:
- Update and merge are written in terms of SQL expressions.
- Invariants and constraints are written in terms of SQL expressions.
Anytime we have to evaluate SQL, we need some sort of SQL engine. We use DataFusion for that.
"},{"location":"integrations/delta-lake-datafusion/#conclusion","title":"Conclusion","text":"Delta Lake is a great file format for DataFusion users.
Delta Lake also uses DataFusion to provide some end-user features.
DataFusion and Delta Lake have a wonderful symbiotic relationship and play very nicely with each other.
See this guide for more information on Delta Lake and PyArrow and why PyArrow Datasets are often a better option than PyArrow tables.
"},{"location":"integrations/delta-lake-pandas/","title":"Using Delta Lake with pandas","text":"Delta Lake is a great storage system for pandas analyses. This page shows how it's easy to use Delta Lake with pandas, the unique features Delta Lake offers pandas users, and how Delta Lake can make your pandas analyses run faster.
Delta Lake is very easy to install for pandas analyses, just run pip install deltalake
.
Delta Lake allows for performance optimizations, so pandas queries can run much faster than the query run on data stored in CSV or Parquet. See the following chart for the query runtime for the a Delta tables compared with CSV/Parquet.
Z Ordered Delta tables run this query much faster than when the data is stored in Parquet or CSV. Let's dive in deeper and see how Delta Lake makes pandas faster.
"},{"location":"integrations/delta-lake-pandas/#delta-lake-makes-pandas-queries-run-faster","title":"Delta Lake makes pandas queries run faster","text":"There are a few reasons Delta Lake can make pandas queries run faster:
- column pruning: only grabbing the columns relevant for a query
- file skipping: only reading files with data for the query
- row group skipping: only reading row groups with data for the query
- Z ordering data: colocating similar data in the same files, so file skipping is more effective
Reading less data (fewer columns and/or fewer rows) is how Delta Lake makes pandas queries run faster.
Parquet allows for column pruning and row group skipping, but doesn't support file-level skipping or Z Ordering. CSV doesn't support any of these performance optimizations.
Let's take a look at a sample dataset and run a query to see the performance enhancements offered by Delta Lake.
Suppose you have a 1 billion row dataset with 9 columns, here are the first three rows of the dataset:
+-------+-------+--------------+-------+-------+--------+------+------+---------+\n| id1 | id2 | id3 | id4 | id5 | id6 | v1 | v2 | v3 |\n|-------+-------+--------------+-------+-------+--------+------+------+---------|\n| id016 | id046 | id0000109363 | 88 | 13 | 146094 | 4 | 6 | 18.8377 |\n| id039 | id087 | id0000466766 | 14 | 30 | 111330 | 4 | 14 | 46.7973 |\n| id047 | id098 | id0000307804 | 85 | 23 | 187639 | 3 | 5 | 47.5773 |\n+-------+-------+--------------+-------+-------+--------+------+------+---------+\n
The dataset is roughly 50 GB when stored as an uncompressed CSV files. Let's run some queries on a 2021 Macbook M1 with 64 GB of RAM.
Start by running the query on an uncompressed CSV file:
(\n pd.read_csv(f\"{Path.home()}/data/G1_1e9_1e2_0_0.csv\", usecols=[\"id1\", \"id2\", \"v1\"])\n .query(\"id1 == 'id016'\")\n .groupby(\"id2\")\n .agg({\"v1\": \"sum\"})\n)\n
This query takes 234 seconds to execute. It runs out of memory if the usecols
parameter is not set.
Now let's convert the CSV dataset to Parquet and run the same query on the data stored in a Parquet file.
(\n pd.read_parquet(\n f\"{Path.home()}/data/G1_1e9_1e2_0_0.parquet\", columns=[\"id1\", \"id2\", \"v1\"]\n )\n .query(\"id1 == 'id016'\")\n .groupby(\"id2\")\n .agg({\"v1\": \"sum\"})\n)\n
This query takes 118 seconds to execute.
Parquet stores data in row groups and allows for skipping when the filters
predicates are set. Run the Parquet query again with row group skipping enabled:
(\n pd.read_parquet(\n f\"{Path.home()}/data/G1_1e9_1e2_0_0.parquet\",\n columns=[\"id1\", \"id2\", \"v1\"],\n filters=[(\"id1\", \"==\", \"id016\")],\n )\n .query(\"id1 == 'id016'\")\n .groupby(\"id2\")\n .agg({\"v1\": \"sum\"})\n)\n
This query runs in 19 seconds. Lots of row groups can be skipped for this particular query.
Now let's run the same query on a Delta table to see the out-of-the box performance:
(\n DeltaTable(f\"{Path.home()}/data/deltalake_baseline_G1_1e9_1e2_0_0\", version=0)\n .to_pandas(filters=[(\"id1\", \"==\", \"id016\")], columns=[\"id1\", \"id2\", \"v1\"])\n .query(\"id1 == 'id016'\")\n .groupby(\"id2\")\n .agg({\"v1\": \"sum\"})\n)\n
This query runs in 8 seconds, which is a significant performance enhancement.
Now let's Z Order the Delta table by id1
which will make the data skipping even better. Run the query again on the Z Ordered Delta table:
(\n DeltaTable(f\"{Path.home()}/data/deltalake_baseline_G1_1e9_1e2_0_0\", version=1)\n .to_pandas(filters=[(\"id1\", \"==\", \"id016\")], columns=[\"id1\", \"id2\", \"v1\"])\n .query(\"id1 == 'id016'\")\n .groupby(\"id2\")\n .agg({\"v1\": \"sum\"})\n)\n
The query now executes in 2.4 seconds.
Delta tables can make certain pandas queries run much faster.
"},{"location":"integrations/delta-lake-pandas/#delta-lake-lets-pandas-users-time-travel","title":"Delta Lake lets pandas users time travel","text":"Start by creating a Delta table:
from deltalake import write_deltalake, DeltaTable\n\ndf = pd.DataFrame({\"num\": [1, 2, 3], \"letter\": [\"a\", \"b\", \"c\"]})\nwrite_deltalake(\"tmp/some-table\", df)\n
Here are the contents of the Delta table (version 0 of the Delta table):
+-------+----------+\n| num | letter |\n|-------+----------|\n| 1 | a |\n| 2 | b |\n| 3 | c |\n+-------+----------+\n
Now append two rows to the Delta table:
df = pd.DataFrame({\"num\": [8, 9], \"letter\": [\"dd\", \"ee\"]})\nwrite_deltalake(\"tmp/some-table\", df, mode=\"append\")\n
Here are the contents after the append operation (version 1 of the Delta table):
+-------+----------+\n| num | letter |\n|-------+----------|\n| 1 | a |\n| 2 | b |\n| 3 | c |\n| 8 | dd |\n| 9 | ee |\n+-------+----------+\n
Now perform an overwrite transaction:
df = pd.DataFrame({\"num\": [11, 22], \"letter\": [\"aa\", \"bb\"]})\nwrite_deltalake(\"tmp/some-table\", df, mode=\"overwrite\")\n
Here are the contents after the overwrite operation (version 2 of the Delta table):
+-------+----------+\n| num | letter |\n|-------+----------|\n| 8 | dd |\n| 9 | ee |\n+-------+----------+\n
Read in the Delta table and it will grab the latest version by default:
DeltaTable(\"tmp/some-table\").to_pandas()\n\n+-------+----------+\n| num | letter |\n|-------+----------|\n| 11 | aa |\n| 22 | bb |\n+-------+----------+\n
You can easily time travel back to version 0 of the Delta table:
DeltaTable(\"tmp/some-table\", version=0).to_pandas()\n\n+-------+----------+\n| num | letter |\n|-------+----------|\n| 1 | a |\n| 2 | b |\n| 3 | c |\n+-------+----------+\n
You can also time travel to version 1 of the Delta table:
DeltaTable(\"tmp/some-table\", version=1).to_pandas()\n\n+-------+----------+\n| num | letter |\n|-------+----------|\n| 1 | a |\n| 2 | b |\n| 3 | c |\n| 8 | dd |\n| 9 | ee |\n+-------+----------+\n
Time travel is a powerful feature that pandas users cannot access with CSV or Parquet.
"},{"location":"integrations/delta-lake-pandas/#schema-enforcement","title":"Schema enforcement","text":"Delta tables only allow you to append DataFrame with matching schema by default. Suppose you have a DataFrame with num
and animal
columns, which is different from the Delta table that has columns with num
and letter
columns.
Try to append this DataFrame with a mismatched schema to the existing table:
df = pd.DataFrame({\"num\": [5, 6], \"animal\": [\"cat\", \"dog\"]})\nwrite_deltalake(\"tmp/some-table\", df)\n
This transaction will be rejected and will return the following error message:
ValueError: Schema of data does not match table schema\nData schema:\nnum: int64\nanimal: string\n-- schema metadata --\npandas: '{\"index_columns\": [{\"kind\": \"range\", \"name\": null, \"start\": 0, \"' + 474\nTable Schema:\nnum: int64\nletter: string\n
Schema enforcement protects your table from getting corrupted by appending data with mismatched schema. Parquet and CSV don't offer schema enforcement for pandas users.
"},{"location":"integrations/delta-lake-pandas/#overwriting-schema-of-table","title":"Overwriting schema of table","text":"You can overwrite the table contents and schema by setting the overwrite_schema
option. Here's how to overwrite the table contents:
write_deltalake(\"tmp/some-table\", df, mode=\"overwrite\", overwrite_schema=True)\n
Here are the contents of the table after the values and schema have been overwritten:
+-------+----------+\n| num | animal |\n|-------+----------|\n| 5 | cat |\n| 6 | dog |\n+-------+----------+\n
"},{"location":"integrations/delta-lake-pandas/#in-memory-vs-in-storage-data-changes","title":"In-memory vs. in-storage data changes","text":"It's important to distinguish between data stored in-memory and data stored on disk when understanding the functionality offered by Delta Lake.
pandas loads data from storage (CSV, Parquet, or Delta Lake) into in-memory DataFrames.
pandas makes it easy to modify the data in memory, say update a column value. It's not easy to update a column value in storage systems like CSV or Parquet using pandas.
Delta Lake makes it easy for pandas users to update data in storage.
"},{"location":"integrations/delta-lake-pandas/#why-delta-lake-allows-for-faster-queries","title":"Why Delta Lake allows for faster queries","text":"Delta tables store data in many files and metadata about the files in the transaction log. Delta Lake allows for certain queries to skip entire files, which makes pandas queries run much faster.
"},{"location":"integrations/delta-lake-pandas/#more-resources","title":"More resources","text":"See this talk on why Delta Lake is the best file format for pandas analyses to learn more:
"},{"location":"integrations/delta-lake-pandas/#conclusion","title":"Conclusion","text":"Delta Lake provides many features that make it an excellent format for pandas analyses:
- performance optimizations make pandas queries run faster
- data management features make pandas analyses more reliable
- advanced features allow you to perform more complex pandas analyses
Python deltalake offers pandas users a better experience compared with CSV/Parquet.
"},{"location":"integrations/delta-lake-polars/","title":"Using Delta Lake with polars","text":"This page explains why Delta Lake is a great storage system for Polars analyses.
You will learn how to create Delta tables with Polars, how to query Delta tables with Polars, and the unique advantages Delta Lake offers the Polars community.
Here are some amazing benefits that Delta Lake provides Polars users:
- time travel
- ACID transactions for reliable writes
- better performance with file skipping
- enhanced file skipping via Z Ordering
- ability to rollback mistakes
- and many, many more
Let's start by showing how to use Polars with Delta Lake, explore how Delta Lake can make Polars queries run faster, and then look at all the cool features Delta Lake offers Polars users.
"},{"location":"integrations/delta-lake-polars/#creating-a-delta-lake-table-with-polars","title":"Creating a Delta Lake table with Polars","text":"Create a Polars DataFrame and write it out to a Delta table:
import polars as pl\n\ndf = pl.DataFrame({\"x\": [1, 2, 3]})\ndf.write_delta(\"tmp/bear_delta_lake\")\n
Inspect the contents of the Delta table:
print(pl.read_delta(\"tmp/bear_delta_lake\"))\n\n+-----+\n| x |\n| --- |\n| i64 |\n+=====+\n| 1 |\n| 2 |\n| 3 |\n+-----+\n
Now create another Polars DataFrame and append it to the existing Delta table:
df2 = pl.DataFrame({\"x\": [8, 9, 10]})\ndf2.write_delta(\"tmp/bear_delta_lake\", mode=\"append\")\n
Re-inspect the contents of the Delta table:
print(pl.read_delta(\"tmp/bear_delta_lake\"))\n\n+-----+\n| x |\n| --- |\n| i64 |\n+=====+\n| 1 |\n| 2 |\n| 3 |\n| 8 |\n| 9 |\n| 10 |\n+-----+\n
Now overwrite the existing Delta table:
df3 = pl.DataFrame({\"x\": [55, 66, 77]})\ndf3.write_delta(\"tmp/bear_delta_lake\", mode=\"overwrite\")\n
Inspect the Delta table:
print(pl.read_delta(\"tmp/bear_delta_lake\"))\n\n+-----+\n| x |\n| --- |\n| i64 |\n+=====+\n| 55 |\n| 66 |\n| 77 |\n+-----+\n
The Delta table now has three versions, as shown in the following diagram:
"},{"location":"integrations/delta-lake-polars/#time-travel-with-delta-lake-for-polars","title":"Time travel with Delta Lake for Polars","text":"Time travel back to version 0 of the Delta table:
print(pl.read_delta(\"tmp/bear_delta_lake\", version=0))\n\n+-----+\n| x |\n| --- |\n| i64 |\n+=====+\n| 1 |\n| 2 |\n| 3 |\n+-----+\n
Time travel back to version 1 of the Delta table:
print(pl.read_delta(\"tmp/bear_delta_lake\", version=1))\n\n+-----+\n| x |\n| --- |\n| i64 |\n+=====+\n| 1 |\n| 2 |\n| 3 |\n| 9 |\n| 8 |\n| 10 |\n+-----+\n
Read the Delta table wihout specifying a version and see how it reads the latest version by default:
print(pl.read_delta(\"tmp/bear_delta_lake\"))\n\n+-----+\n| x |\n| --- |\n| i64 |\n+=====+\n| 55 |\n| 66 |\n| 77 |\n+-----+\n
Let's dive into how to read Delta tables with Polars in more detail and compare the query runtime performance on larger datasets.
"},{"location":"integrations/delta-lake-polars/#reading-a-delta-lake-table-with-polars","title":"Reading a Delta Lake table with Polars","text":"Let's look at the h2o groupby dataset that has 1 billion rows and 9 columns. Here are the first three rows of the dataset:
+-------+-------+--------------+-------+-------+--------+------+------+---------+\n| id1 | id2 | id3 | id4 | id5 | id6 | v1 | v2 | v3 |\n|-------+-------+--------------+-------+-------+--------+------+------+---------|\n| id016 | id046 | id0000109363 | 88 | 13 | 146094 | 4 | 6 | 18.8377 |\n| id039 | id087 | id0000466766 | 14 | 30 | 111330 | 4 | 14 | 46.7973 |\n| id047 | id098 | id0000307804 | 85 | 23 | 187639 | 3 | 5 | 47.5773 |\n+-------+-------+--------------+-------+-------+--------+------+------+---------+\n
This dataset is 50GB when stored in an uncompressed CSV file. Let's run some queries on this dataset when it's stored in different file formats with Polars.
This section will show the runtime for a query when the data is stored in CSV, Parquet, and Delta Lake and explain why Delta tables are the fastest.
Start by running a query on an uncompressed CSV file with read_csv
:
pl.read_csv(\"~/data/G1_1e9_1e2_0_0.csv\").filter(pl.col(\"id1\") < \"id016\").group_by(\n [\"id1\", \"id2\"]\n).agg(pl.sum(\"v1\").alias(\"v1_sum\")).collect()\n
This query errors out after running for several minutes. The machine runs out of memory. Let's try it again with scan_csv
.
pl.scan_csv(\"~/data/G1_1e9_1e2_0_0.csv\").filter(pl.col(\"id1\") < \"id016\").group_by(\n [\"id1\", \"id2\"]\n).agg(pl.sum(\"v1\").alias(\"v1_sum\")).collect()\n
This query runs in 56.2 seconds.
Now let's run the same query when the data is stored in a Parquet file:
pl.scan_parquet(\"~/data/G1_1e9_1e2_0_0.parquet\").filter(\n pl.col(\"id1\") < \"id016\"\n).group_by([\"id1\", \"id2\"]).agg(pl.sum(\"v1\").alias(\"v1_sum\")).collect()\n
This query runs in 8.3 seconds. It's much faster because Polars is optimized to skip row groups in Parquet files that don't contain data that's relevant for the query.
Then run the query on newly created Delta table:
pl.scan_delta(\"~/data/deltalake/G1_1e9_1e2_0_0\", version=1).filter(\n pl.col(\"id1\") < \"id016\"\n).group_by([\"id1\", \"id2\"]).agg(pl.sum(\"v1\").alias(\"v1_sum\")).collect()\n
This query runs in 7.2 seconds. Polars can run this query faster because it can inspect the Delta transaction log and skip entire files that don't contain relevant data before performing the ordinary Parquet row group skipping.
Finally run the query on the Delta table after it has been Z Ordered by id1
:
pl.scan_delta(\"~/data/deltalake/G1_1e9_1e2_0_0\", version=2).filter(\n pl.col(\"id1\") < \"id016\"\n).group_by([\"id1\", \"id2\"]).agg(pl.sum(\"v1\").alias(\"v1_sum\")).collect()\n
This query runs in 3.5 seconds. The query on the Z Ordered Delta table is even faster because similar data has been co-located in the same files. This allows for even greater data skipping.
Polars can leverage file skipping to query Delta tables very quickly.
"},{"location":"integrations/delta-lake-polars/#why-polars-is-fast-with-delta-lake","title":"Why Polars is fast with Delta Lake","text":"Delta tables consist of metadata in a transaction log and data stored in Parquet files.
When Polars queries a Delta table, it starts by consulting the transaction log to understand the metadata of each file in the Delta table. This allows for Polars to quickly identify which files should be skipped by the query.
CSV files don't contain any such metadata, so file skipping isn't an option. Polars can skip Parquet files based on metadata, but it needs to open up each file and read the metadata, which is slower that grabbing the file-level metadata directly from the transaction log.
Parquet doesn't allow users to easily Z Order the data and colocate similar data in the same row groups. The Z Order optimizations are only supported in Delta tables.
Delta Lake offers Polars users with unique performance optimizations.
"},{"location":"integrations/delta-lake-polars/#other-delta-lake-features-relevant-for-polars-users","title":"Other Delta Lake features relevant for Polars users","text":" - ACID transactions for reliable writes
- better performance with file skipping
- enhanced file skipping via Z Ordering
- ability to rollback mistakes
"},{"location":"integrations/delta-lake-polars/#conclusion","title":"Conclusion","text":"This guide shows how Delta Lake is a great storage format for Polars analyses.
Delta Lake is easy to use, fast, and full of features that are great for Polars users.
"},{"location":"usage/","title":"Usage","text":"A DeltaTable represents the state of a delta table at a particular version. This includes which files are currently part of the table, the schema of the table, and other metadata such as creation time.
Python Rust DeltaTable
from deltalake import DeltaTable\n\ndt = DeltaTable(\"../rust/tests/data/delta-0.2.0\")\nprint(f\"Version: {dt.version()}\")\nprint(f\"Files: {dt.files()}\")\n
DeltaTable
let table = deltalake::open_table(\"../rust/tests/data/simple_table\").await.unwrap();\nprintln!(\"Version: {}\", table.version());\nprintln!(\"Files: {}\", table.get_files());\n
"},{"location":"usage/appending-overwriting-delta-lake-table/","title":"Appending to and overwriting a Delta Lake table","text":"This section explains how to append to an exising Delta table and how to overwrite a Delta table.
"},{"location":"usage/appending-overwriting-delta-lake-table/#delta-lake-append-transactions","title":"Delta Lake append transactions","text":"Suppose you have a Delta table with the following contents:
+-------+----------+\n| num | letter |\n|-------+----------|\n| 1 | a |\n| 2 | b |\n| 3 | c |\n+-------+----------+\n
Append two additional rows of data to the table:
from deltalake import write_deltalake, DeltaTable\n\ndf = pd.DataFrame({\"num\": [8, 9], \"letter\": [\"dd\", \"ee\"]})\nwrite_deltalake(\"tmp/some-table\", df, mode=\"append\")\n
Here are the updated contents of the Delta table:
+-------+----------+\n| num | letter |\n|-------+----------|\n| 1 | a |\n| 2 | b |\n| 3 | c |\n| 8 | dd |\n| 9 | ee |\n+-------+----------+\n
Now let's see how to perform an overwrite transaction.
"},{"location":"usage/appending-overwriting-delta-lake-table/#delta-lake-overwrite-transactions","title":"Delta Lake overwrite transactions","text":"Now let's see how to overwrite the exisitng Delta table.
df = pd.DataFrame({\"num\": [11, 22], \"letter\": [\"aa\", \"bb\"]})\nwrite_deltalake(\"tmp/some-table\", df, mode=\"overwrite\")\n
Here are the contents of the Delta table after the overwrite operation:
+-------+----------+\n| num | letter |\n|-------+----------|\n| 11 | aa |\n| 22 | bb |\n+-------+----------+\n
Overwriting just performs a logical delete. It doesn't physically remove the previous data from storage. Time travel back to the previous version to confirm that the old version of the table is still accessable.
dt = DeltaTable(\"tmp/some-table\", version=1)\n\n+-------+----------+\n| num | letter |\n|-------+----------|\n| 1 | a |\n| 2 | b |\n| 3 | c |\n| 8 | dd |\n| 9 | ee |\n+-------+----------+\n
"},{"location":"usage/create-delta-lake-table/","title":"Creating a Delta Lake Table","text":"This section explains how to create a Delta Lake table.
You can easily write a DataFrame to a Delta table.
from deltalake import write_deltalake\nimport pandas as pd\n\ndf = pd.DataFrame({\"num\": [1, 2, 3], \"letter\": [\"a\", \"b\", \"c\"]})\nwrite_deltalake(\"tmp/some-table\", df)\n
Here are the contents of the Delta table in storage:
+-------+----------+\n| num | letter |\n|-------+----------|\n| 1 | a |\n| 2 | b |\n| 3 | c |\n+-------+----------+\n
"},{"location":"usage/deleting-rows-from-delta-lake-table/","title":"Deleting rows from a Delta Lake table","text":"This section explains how to delete rows from a Delta Lake table.
Suppose you have the following Delta table with four rows:
+-------+----------+\n| num | letter |\n|-------+----------|\n| 1 | a |\n| 2 | b |\n| 3 | c |\n| 4 | d |\n+-------+----------+\n
Here's how to delete all the rows where the num
is greater than 2:
dt = DeltaTable(\"tmp/my-table\")\ndt.delete(\"num > 2\")\n
Here are the contents of the Delta table after the delete operation has been performed:
+-------+----------+\n| num | letter |\n|-------+----------|\n| 1 | a |\n| 2 | b |\n+-------+----------+\n
"},{"location":"usage/examining-table/","title":"Examining a Table","text":""},{"location":"usage/examining-table/#metadata","title":"Metadata","text":"The delta log maintains basic metadata about a table, including:
- A unique
id
- A
name
, if provided - A
description
, if provided - The list of
partitionColumns
. - The
created_time
of the table - A map of table
configuration
. This includes fields such as delta.appendOnly
, which if true
indicates the table is not meant to have data deleted from it.
Get metadata from a table with the DeltaTable.metadata() method:
>>> from deltalake import DeltaTable\n>>> dt = DeltaTable(\"../rust/tests/data/simple_table\")\n>>> dt.metadata()\nMetadata(id: 5fba94ed-9794-4965-ba6e-6ee3c0d22af9, name: None, description: None, partitionColumns: [], created_time: 1587968585495, configuration={})\n
"},{"location":"usage/examining-table/#schema","title":"Schema","text":"The schema for the table is also saved in the transaction log. It can either be retrieved in the Delta Lake form as Schema or as a PyArrow schema. The first allows you to introspect any column-level metadata stored in the schema, while the latter represents the schema the table will be loaded into.
Use DeltaTable.schema to retrieve the delta lake schema:
>>> from deltalake import DeltaTable\n>>> dt = DeltaTable(\"../rust/tests/data/simple_table\")\n>>> dt.schema()\nSchema([Field(id, PrimitiveType(\"long\"), nullable=True)])\n
These schemas have a JSON representation that can be retrieved. To reconstruct from json, use DeltaTable.schema.to_json().
>>> dt.schema().to_json()\n'{\"type\":\"struct\",\"fields\":[{\"name\":\"id\",\"type\":\"long\",\"nullable\":true,\"metadata\":{}}]}'\n
Use DeltaTable.schema.to_pyarrow() to retrieve the PyArrow schema:
>>> dt.schema().to_pyarrow()\nid: int64\n
"},{"location":"usage/examining-table/#history","title":"History","text":"Depending on what system wrote the table, the delta table may have provenance information describing what operations were performed on the table, when, and by whom. This information is retained for 30 days by default, unless otherwise specified by the table configuration delta.logRetentionDuration
.
Note
This information is not written by all writers and different writers may use different schemas to encode the actions. For Spark\\'s format, see: https://docs.delta.io/latest/delta-utility.html#history-schema
To view the available history, use DeltaTable.history
:
>>> from deltalake import DeltaTable\n>>> dt = DeltaTable(\"../rust/tests/data/simple_table\")\n>>> dt.history()\n[{'timestamp': 1587968626537, 'operation': 'DELETE', 'operationParameters': {'predicate': '[\"((`id` % CAST(2 AS BIGINT)) = CAST(0 AS BIGINT))\"]'}, 'readVersion': 3, 'isBlindAppend': False},\n {'timestamp': 1587968614187, 'operation': 'UPDATE', 'operationParameters': {'predicate': '((id#697L % cast(2 as bigint)) = cast(0 as bigint))'}, 'readVersion': 2, 'isBlindAppend': False},\n {'timestamp': 1587968604143, 'operation': 'WRITE', 'operationParameters': {'mode': 'Overwrite', 'partitionBy': '[]'}, 'readVersion': 1, 'isBlindAppend': False},\n {'timestamp': 1587968596254, 'operation': 'MERGE', 'operationParameters': {'predicate': '(oldData.`id` = newData.`id`)'}, 'readVersion': 0, 'isBlindAppend': False},\n {'timestamp': 1587968586154, 'operation': 'WRITE', 'operationParameters': {'mode': 'ErrorIfExists', 'partitionBy': '[]'}, 'isBlindAppend': True}]\n
"},{"location":"usage/examining-table/#current-add-actions","title":"Current Add Actions","text":"The active state for a delta table is determined by the Add actions, which provide the list of files that are part of the table and metadata about them, such as creation time, size, and statistics. You can get a data frame of the add actions data using DeltaTable.get_add_actions
:
>>> from deltalake import DeltaTable\n>>> dt = DeltaTable(\"../rust/tests/data/delta-0.8.0\")\n>>> dt.get_add_actions(flatten=True).to_pandas()\n path size_bytes modification_time data_change num_records null_count.value min.value max.value\n0 part-00000-c9b90f86-73e6-46c8-93ba-ff6bfaf892a... 440 2021-03-06 15:16:07 True 2 0 0 2\n1 part-00000-04ec9591-0b73-459e-8d18-ba5711d6cbe... 440 2021-03-06 15:16:16 True 2 0 2 4\n
This works even with past versions of the table:
>>> dt = DeltaTable(\"../rust/tests/data/delta-0.8.0\", version=0)\n>>> dt.get_add_actions(flatten=True).to_pandas()\n path size_bytes modification_time data_change num_records null_count.value min.value max.value\n0 part-00000-c9b90f86-73e6-46c8-93ba-ff6bfaf892a... 440 2021-03-06 15:16:07 True 2 0 0 2\n1 part-00001-911a94a2-43f6-4acb-8620-5e68c265498... 445 2021-03-06 15:16:07 True 3 0 2 4\n
"},{"location":"usage/installation/","title":"Installation","text":"The deltalake
project can be installed via pip for Python or Cargo for Rust.
"},{"location":"usage/installation/#install-delta-lake-for-python","title":"Install Delta Lake for Python","text":"With pip:
pip install deltalake\n
With Conda:
conda install -c conda-forge deltalake\n
"},{"location":"usage/installation/#install-delta-lake-for-rust","title":"Install Delta Lake for Rust","text":"With Cargo:
cargo add deltalake\n
"},{"location":"usage/installation/#run-delta-lake-and-pandas-in-a-jupyter-notebook","title":"Run Delta Lake and pandas in a Jupyter Notebook","text":"You can easily run Delta Lake and pandas in a Jupyter notebook.
Create an environment file with the dependencies as follows:
name: deltalake-minimal\nchannels:\n - conda-forge\n - defaults\ndependencies:\n - python=3.11\n - ipykernel\n - pandas\n - polars\n - jupyterlab\n - pip\n - pip:\n - deltalake\n
Create a virtual environment with the dependencies:
conda env create -f deltalake-minimal.yml\n
Open the Jupyter notebook and run commands as follows:
"},{"location":"usage/loading-table/","title":"Loading a Delta Table","text":"To load the current version, use the constructor:
>>> dt = DeltaTable(\"../rust/tests/data/delta-0.2.0\")\n
Depending on your storage backend, you could use the storage_options
parameter to provide some configuration. Configuration is defined for specific backends - s3 options, azure options, gcs options.
>>> storage_options = {\"AWS_ACCESS_KEY_ID\": \"THE_AWS_ACCESS_KEY_ID\", \"AWS_SECRET_ACCESS_KEY\":\"THE_AWS_SECRET_ACCESS_KEY\"}\n>>> dt = DeltaTable(\"../rust/tests/data/delta-0.2.0\", storage_options=storage_options)\n
The configuration can also be provided via the environment, and the basic service provider is derived from the URL being used. We try to support many of the well-known formats to identify basic service properties.
S3:
- s3://\\<bucket>/\\<path>
- s3a://\\<bucket>/\\<path>
Azure:
- az://\\<container>/\\<path>
- adl://\\<container>/\\<path>
- abfs://\\<container>/\\<path>
GCS:
- gs://\\<bucket>/\\<path>
Alternatively, if you have a data catalog you can load it by reference to a database and table name. Currently only AWS Glue is supported.
For AWS Glue catalog, use AWS environment variables to authenticate.
>>> from deltalake import DeltaTable\n>>> from deltalake import DataCatalog\n>>> database_name = \"simple_database\"\n>>> table_name = \"simple_table\"\n>>> data_catalog = DataCatalog.AWS\n>>> dt = DeltaTable.from_data_catalog(data_catalog=data_catalog, database_name=database_name, table_name=table_name)\n>>> dt.to_pyarrow_table().to_pydict()\n{'id': [5, 7, 9, 5, 6, 7, 8, 9]}\n
"},{"location":"usage/loading-table/#custom-storage-backends","title":"Custom Storage Backends","text":"While delta always needs its internal storage backend to work and be properly configured, in order to manage the delta log, it may sometime be advantageous - and is common practice in the arrow world - to customize the storage interface used for reading the bulk data.
deltalake
will work with any storage compliant with pyarrow.fs.FileSystem
, however the root of the filesystem has to be adjusted to point at the root of the Delta table. We can achieve this by wrapping the custom filesystem into a pyarrow.fs.SubTreeFileSystem
.
import pyarrow.fs as fs\nfrom deltalake import DeltaTable\n\npath = \"<path/to/table>\"\nfilesystem = fs.SubTreeFileSystem(path, fs.LocalFileSystem())\n\ndt = DeltaTable(path)\nds = dt.to_pyarrow_dataset(filesystem=filesystem)\n
When using the pyarrow factory method for file systems, the normalized path is provided on creation. In case of S3 this would look something like:
import pyarrow.fs as fs\nfrom deltalake import DeltaTable\n\ntable_uri = \"s3://<bucket>/<path>\"\nraw_fs, normalized_path = fs.FileSystem.from_uri(table_uri)\nfilesystem = fs.SubTreeFileSystem(normalized_path, raw_fs)\n\ndt = DeltaTable(table_uri)\nds = dt.to_pyarrow_dataset(filesystem=filesystem)\n
"},{"location":"usage/loading-table/#time-travel","title":"Time Travel","text":"To load previous table states, you can provide the version number you wish to load:
>>> dt = DeltaTable(\"../rust/tests/data/simple_table\", version=2)\n
Once you\\'ve loaded a table, you can also change versions using either a version number or datetime string:
>>> dt.load_version(1)\n>>> dt.load_with_datetime(\"2021-11-04 00:05:23.283+00:00\")\n
Warning
Previous table versions may not exist if they have been vacuumed, in which case an exception will be thrown. See Vacuuming tables for more information.
"},{"location":"usage/managing-tables/","title":"Managing Delta Tables","text":""},{"location":"usage/managing-tables/#vacuuming-tables","title":"Vacuuming tables","text":"Vacuuming a table will delete any files that have been marked for deletion. This may make some past versions of a table invalid, so this can break time travel. However, it will save storage space. Vacuum will retain files in a certain window, by default one week, so time travel will still work in shorter ranges.
Delta tables usually don't delete old files automatically, so vacuuming regularly is considered good practice, unless the table is only appended to.
Use DeltaTable.vacuum
to perform the vacuum operation. Note that to prevent accidental deletion, the function performs a dry-run by default: it will only list the files to be deleted. Pass dry_run=False
to actually delete files.
>>> dt = DeltaTable(\"../rust/tests/data/simple_table\")\n>>> dt.vacuum()\n['../rust/tests/data/simple_table/part-00006-46f2ff20-eb5d-4dda-8498-7bfb2940713b-c000.snappy.parquet',\n '../rust/tests/data/simple_table/part-00190-8ac0ae67-fb1d-461d-a3d3-8dc112766ff5-c000.snappy.parquet',\n '../rust/tests/data/simple_table/part-00164-bf40481c-4afd-4c02-befa-90f056c2d77a-c000.snappy.parquet',\n ...]\n>>> dt.vacuum(dry_run=False) # Don't run this unless you are sure!\n
"},{"location":"usage/managing-tables/#optimizing-tables","title":"Optimizing tables","text":"Optimizing tables is not currently supported.
"},{"location":"usage/querying-delta-tables/","title":"Querying Delta Tables","text":"Delta tables can be queried in several ways. By loading as Arrow data or an Arrow dataset, they can be used by compatible engines such as Pandas and DuckDB. By passing on the list of files, they can be loaded into other engines such as Dask.
Delta tables are often larger than can fit into memory on a single computer, so this module provides ways to read only the parts of the data you need. Partition filters allow you to skip reading files that are part of irrelevant partitions. Only loading the columns required also saves memory. Finally, some methods allow reading tables batch-by-batch, allowing you to process the whole table while only having a portion loaded at any given time.
To load into Pandas or a PyArrow table use the DeltaTable.to_pandas
and DeltaTable.to_pyarrow_table
methods, respectively. Both of these support filtering partitions and selecting particular columns.
>>> from deltalake import DeltaTable\n>>> dt = DeltaTable(\"../rust/tests/data/delta-0.8.0-partitioned\")\n>>> dt.schema().to_pyarrow()\nvalue: string\nyear: string\nmonth: string\nday: string\n>>> dt.to_pandas(partitions=[(\"year\", \"=\", \"2021\")], columns=[\"value\"])\n value\n0 6\n1 7\n2 5\n3 4\n>>> dt.to_pyarrow_table(partitions=[(\"year\", \"=\", \"2021\")], columns=[\"value\"])\npyarrow.Table\nvalue: string\n
Converting to a PyArrow Dataset allows you to filter on columns other than partition columns and load the result as a stream of batches rather than a single table. Convert to a dataset using DeltaTable.to_pyarrow_dataset
. Filters applied to datasets will use the partition values and file statistics from the Delta transaction log and push down any other filters to the scanning operation.
>>> import pyarrow.dataset as ds\n>>> dataset = dt.to_pyarrow_dataset()\n>>> condition = (ds.field(\"year\") == \"2021\") & (ds.field(\"value\") > \"4\")\n>>> dataset.to_table(filter=condition, columns=[\"value\"]).to_pandas()\n value\n0 6\n1 7\n2 5\n>>> batch_iter = dataset.to_batches(filter=condition, columns=[\"value\"], batch_size=2)\n>>> for batch in batch_iter: print(batch.to_pandas())\n value\n0 6\n1 7\n value\n0 5\n
PyArrow datasets may also be passed to compatible query engines, such as DuckDB
>>> import duckdb\n>>> ex_data = duckdb.arrow(dataset)\n>>> ex_data.filter(\"year = 2021 and value > 4\").project(\"value\")\n---------------------\n-- Expression Tree --\n---------------------\nProjection [value]\n Filter [year=2021 AND value>4]\n arrow_scan(140409099470144, 4828104688, 1000000)\n\n---------------------\n-- Result Columns --\n---------------------\n- value (VARCHAR)\n\n---------------------\n-- Result Preview --\n---------------------\nvalue\nVARCHAR\n[ Rows: 3]\n6\n7\n5\n
Finally, you can always pass the list of file paths to an engine. For example, you can pass them to dask.dataframe.read_parquet
:
>>> import dask.dataframe as dd\n>>> df = dd.read_parquet(dt.file_uris())\n>>> df\nDask DataFrame Structure:\n value year month day\nnpartitions=6\n object category[known] category[known] category[known]\n ... ... ... ...\n... ... ... ... ...\n ... ... ... ...\n ... ... ... ...\nDask Name: read-parquet, 6 tasks\n>>> df.compute()\n value year month day\n0 1 2020 1 1\n0 2 2020 2 3\n0 3 2020 2 5\n0 4 2021 4 5\n0 5 2021 12 4\n0 6 2021 12 20\n1 7 2021 12 20\n
"},{"location":"usage/writing-delta-tables/","title":"Writing Delta Tables","text":"For overwrites and appends, use write_deltalake
. If the table does not already exist, it will be created. The data
parameter will accept a Pandas DataFrame, a PyArrow Table, or an iterator of PyArrow Record Batches.
>>> from deltalake import write_deltalake\n>>> df = pd.DataFrame({'x': [1, 2, 3]})\n>>> write_deltalake('path/to/table', df)\n
Note: write_deltalake
accepts a Pandas DataFrame, but will convert it to a Arrow table before writing. See caveats in pyarrow:python/pandas
.
By default, writes create a new table and error if it already exists. This is controlled by the mode
parameter, which mirrors the behavior of Spark's pyspark.sql.DataFrameWriter.saveAsTable
DataFrame method. To overwrite pass in mode='overwrite'
and to append pass in mode='append'
:
>>> write_deltalake('path/to/table', df, mode='overwrite')\n>>> write_deltalake('path/to/table', df, mode='append')\n
write_deltalake
will raise ValueError
if the schema of the data passed to it differs from the existing table's schema. If you wish to alter the schema as part of an overwrite pass in overwrite_schema=True
.
"},{"location":"usage/writing-delta-tables/#overwriting-a-partition","title":"Overwriting a partition","text":"You can overwrite a specific partition by using mode=\"overwrite\"
together with partition_filters
. This will remove all files within the matching partition and insert your data as new files. This can only be done on one partition at a time. All of the input data must belong to that partition or else the method will raise an error.
>>> from deltalake import write_deltalake\n>>> df = pd.DataFrame({'x': [1, 2, 3], 'y': ['a', 'a', 'b']})\n>>> write_deltalake('path/to/table', df, partition_by=['y'])\n\n>>> table = DeltaTable('path/to/table')\n>>> df2 = pd.DataFrame({'x': [100], 'y': ['b']})\n>>> write_deltalake(table, df2, partition_filters=[('y', '=', 'b')], mode=\"overwrite\")\n\n>>> table.to_pandas()\n x y\n0 1 a\n1 2 a\n2 100 b\n
This method could also be used to insert a new partition if one doesn't already exist, making this operation idempotent.
"},{"location":"usage/optimize/delta-lake-z-order/","title":"Delta Lake Z Order","text":"This section explains how to Z Order a Delta table.
Z Ordering colocates similar data in the same files, which allows for better file skipping and faster queries.
Suppose you have a table with first_name
, age
, and country
columns.
If you Z Order the data by the country
column, then individuals from the same country will be stored in the same files. When you subquently query the data for individuals from a given country, it will execute faster because more data can be skipped.
Here's how to Z Order a Delta table:
dt = DeltaTable(\"tmp\")\ndt.optimize.z_order([country])\n
"},{"location":"usage/optimize/small-file-compaction-with-optimize/","title":"Delta Lake small file compaction with optimize","text":"This post shows you how to perform small file compaction with using the optimize
method. This was added to the DeltaTable
class in version 0.9.0. This command rearranges the small files into larger files which will reduce the number of files and speed up queries.
This is very helpful for workloads that append frequently. For example, if you have a table that is appended to every 10 minutes, after a year you will have 52,560 files in the table. If the table is partitioned by another dimension, you will have 52,560 files per partition; with just 100 unique values that's millions of files. By running optimize
periodically, you can reduce the number of files in the table to a more manageable number.
Typically, you will run optimize less frequently than you append data. If possible, you might run optimize once you know you have finished writing to a particular partition. For example, on a table partitioned by date, you might append data every 10 minutes, but only run optimize once a day at the end of the day. This will ensure you don't need to compact the same data twice.
This section will also teach you about how to use vacuum
to physically remove files from storage that are no longer needed. You\u2019ll often want vacuum after running optimize to remove the small files from storage once they\u2019ve been compacted into larger files.
Let\u2019s start with an example to explain these key concepts. All the code covered in this post is stored in this notebook in case you\u2019d like to follow along.
"},{"location":"usage/optimize/small-file-compaction-with-optimize/#create-a-delta-table-with-small-files","title":"Create a Delta table with small files","text":"Let\u2019s start by creating a Delta table with a lot of small files so we can demonstrate the usefulness of the optimize
command.
Start by writing a function that generates on thousand rows of random data given a timestamp.
def record_observations(date: datetime) -> pa.Table:\n \"\"\"Pulls data for a certain datetime\"\"\"\n nrows = 1000\n return pa.table(\n {\n \"date\": pa.array([date.date()] * nrows),\n \"timestamp\": pa.array([date] * nrows),\n \"value\": pc.random(nrows),\n }\n )\n
Let\u2019s run this function and observe the output:
record_observations(datetime(2021, 1, 1, 12)).to_pandas()\n\n date timestamp value\n0 2021-01-01 2021-01-01 12:00:00 0.3186397383362023\n1 2021-01-01 2021-01-01 12:00:00 0.04253766974259088\n2 2021-01-01 2021-01-01 12:00:00 0.9355682965171573\n\u2026\n999 2021-01-01 2021-01-01 12:00:00 0.23207037062879843\n
Let\u2019s write 100 hours worth of data to the Delta table.
# Every hour starting at midnight on 2021-01-01\nhours_iter = (datetime(2021, 1, 1) + timedelta(hours=i) for i in itertools.count())\n\n# Write 100 hours worth of data\nfor timestamp in itertools.islice(hours_iter, 100):\n write_deltalake(\n \"observation_data\",\n record_observations(timestamp),\n partition_by=[\"date\"],\n mode=\"append\",\n )\n
This data was appended to the Delta table in 100 separate transactions, so the table will contain 100 transaction log entries and 100 data files. You can see the number of files with the files()
method.
dt = DeltaTable(\"observation_data\")\nlen(dt.files()) # 100\n
Here\u2019s how the files are persisted in storage.
observation_data\n\u251c\u2500\u2500 _delta_log\n\u2502 \u251c\u2500\u2500 00000000000000000000.json\n\u2502 \u251c\u2500\u2500 \u2026\n\u2502 \u2514\u2500\u2500 00000000000000000099.json\n\u251c\u2500\u2500 date=2021-01-01\n\u2502 \u251c\u2500\u2500 0-cfe227c6-edd9-4369-a1b0-db4559a2e693-0.parquet\n\u2502 \u251c\u2500\u2500 \u2026\n\u2502 \u251c\u2500\u2500 23-a4ace29e-e73e-40a1-81d3-0f5dc13093de-0.parquet\n\u251c\u2500\u2500 date=2021-01-02\n\u2502 \u251c\u2500\u2500 24-9698b456-66eb-4075-8732-fe56d81edb60-0.parquet\n\u2502 \u251c\u2500\u2500 \u2026\n\u2502 \u2514\u2500\u2500 47-d3fce527-e018-4c02-8acd-a649f6f523d2-0.parquet\n\u251c\u2500\u2500 date=2021-01-03\n\u2502 \u251c\u2500\u2500 48-fd90a7fa-5a14-42ed-9f59-9fe48d87899d-0.parquet\n\u2502 \u251c\u2500\u2500 \u2026\n\u2502 \u2514\u2500\u2500 71-5f143ade-8ae2-4854-bdc5-61154175665f-0.parquet\n\u251c\u2500\u2500 date=2021-01-04\n\u2502 \u251c\u2500\u2500 72-477c10fe-dc09-4087-80f0-56006e4a7911-0.parquet\n\u2502 \u251c\u2500\u2500 \u2026\n\u2502 \u2514\u2500\u2500 95-1c92cbce-8af4-4fe4-9c11-832245cf4d40-0.parquet\n\u2514\u2500\u2500 date=2021-01-05\n \u251c\u2500\u2500 96-1b878ee5-25fd-431a-bc3e-6dcacc96b470-0.parquet\n \u251c\u2500\u2500 \u2026\n \u2514\u2500\u2500 99-9650ed63-c195-433d-a86b-9469088c14ba-0.parquet\n
Each of these Parquet files are tiny - they\u2019re only 10 KB. Let\u2019s see how to compact these tiny files into larger files, which is more efficient for data queries.
"},{"location":"usage/optimize/small-file-compaction-with-optimize/#compact-small-files-in-the-delta-table-with-optimize","title":"Compact small files in the Delta table with optimize","text":"Let\u2019s run the optimize command to compact the existing small files into larger files:
dt = DeltaTable(\"observation_data\")\n\ndt.optimize()\n
Here\u2019s the output of the command:
{'numFilesAdded': 5,\n 'numFilesRemoved': 100,\n 'filesAdded': {'min': 39000,\n 'max': 238282,\n 'avg': 198425.6,\n 'totalFiles': 5,\n 'totalSize': 992128},\n 'filesRemoved': {'min': 10244,\n 'max': 10244,\n 'avg': 10244.0,\n 'totalFiles': 100,\n 'totalSize': 1024400},\n 'partitionsOptimized': 5,\n 'numBatches': 1,\n 'totalConsideredFiles': 100,\n 'totalFilesSkipped': 0,\n 'preserveInsertionOrder': True}\n
The optimize operation has added 5 new files and marked 100 exisitng files for removal (this is also known as \u201ctombstoning\u201d files). It has compacted the 100 tiny files into 5 larger files.
Let\u2019s append some more data to the Delta table and see how we can selectively run optimize on the new data that\u2019s added.
"},{"location":"usage/optimize/small-file-compaction-with-optimize/#handling-incremental-updates-with-optimize","title":"Handling incremental updates with optimize","text":"Let\u2019s append another 24 hours of data to the Delta table:
for timestamp in itertools.islice(hours_iter, 24):\n write_deltalake(\n dt,\n record_observations(timestamp),\n partition_by=[\"date\"],\n mode=\"append\",\n )\n
We can use get_add_actions()
to introspect the table state. We can see that 2021-01-06
has only a few hours of data so far, so we don't want to optimize that yet. But 2021-01-05
has all 24 hours of data, so it's ready to be optimized.
dt.get_add_actions(flatten=True).to_pandas()[\n \"partition.date\"\n].value_counts().sort_index()\n\n2021-01-01 1\n2021-01-02 1\n2021-01-03 1\n2021-01-04 1\n2021-01-05 21\n2021-01-06 4\n
To optimize a single partition, you can pass in a partition_filters
argument speficying which partitions to optimize.
dt.optimize(partition_filters=[(\"date\", \"=\", \"2021-01-05\")])\n\n{'numFilesAdded': 1,\n 'numFilesRemoved': 21,\n 'filesAdded': {'min': 238282,\n 'max': 238282,\n 'avg': 238282.0,\n 'totalFiles': 1,\n 'totalSize': 238282},\n 'filesRemoved': {'min': 10244,\n 'max': 39000,\n 'avg': 11613.333333333334,\n 'totalFiles': 21,\n 'totalSize': 243880},\n 'partitionsOptimized': 1,\n 'numBatches': 1,\n 'totalConsideredFiles': 21,\n 'totalFilesSkipped': 0,\n 'preserveInsertionOrder': True}\n
This optimize operation tombstones 21 small data files and adds one file with all the existing data properly condensed. Let\u2019s take a look a portion of the _delta_log/00000000000000000125.json
file, which is the transaction log entry that corresponds with this incremental optimize command.
{\n \"remove\": {\n \"path\": \"date=2021-01-05/part-00000-41178aab-2491-488f-943d-8f03867295ee-c000.snappy.parquet\",\n \"deletionTimestamp\": 1683465499480,\n \"dataChange\": false,\n \"extendedFileMetadata\": null,\n \"partitionValues\": {\n \"date\": \"2021-01-05\"\n },\n \"size\": 39000,\n \"tags\": null\n }\n}\n\n{\n \"remove\": {\n \"path\": \"date=2021-01-05/101-79ae6fc9-c0cc-49ec-bb94-9aba879ac949-0.parquet\",\n \"deletionTimestamp\": 1683465499481,\n \"dataChange\": false,\n \"extendedFileMetadata\": null,\n \"partitionValues\": {\n \"date\": \"2021-01-05\"\n },\n \"size\": 10244,\n \"tags\": null\n }\n}\n\n\u2026\n\n{\n \"add\": {\n \"path\": \"date=2021-01-05/part-00000-4b020a40-c836-4a11-851f-4691370c9f3a-c000.snappy.parquet\",\n \"size\": 238282,\n \"partitionValues\": {\n \"date\": \"2021-01-05\"\n },\n \"modificationTime\": 1683465499493,\n \"dataChange\": false,\n \"stats\": \"{\\\"numRecords\\\":24000,\\\"minValues\\\":{\\\"value\\\":0.00005581532256615507,\\\"timestamp\\\":\\\"2021-01-05T00:00:00.000Z\\\"},\\\"maxValues\\\":{\\\"timestamp\\\":\\\"2021-01-05T23:00:00.000Z\\\",\\\"value\\\":0.9999911402868216},\\\"nullCount\\\":{\\\"timestamp\\\":0,\\\"value\\\":0}}\",\n \"tags\": null\n }\n}\n
The trasaction log indicates that many files have been tombstoned and one file is added, as expected.
The Delta Lake optimize command \u201cremoves\u201d data by marking the data files as removed in the transaction log. The optimize command doesn\u2019t physically delete the Parquet file from storage. Optimize performs a \u201clogical remove\u201d not a \u201cphysical remove\u201d.
Delta Lake uses logical operations so you can time travel back to earlier versions of your data. You can vacuum your Delta table to physically remove Parquet files from storage if you don\u2019t need to time travel and don\u2019t want to pay to store the tombstoned files.
"},{"location":"usage/optimize/small-file-compaction-with-optimize/#vacuuming-after-optimizing","title":"Vacuuming after optimizing","text":"The vacuum command deletes all files from storage that are marked for removal in the transaction log and older than the retention period which is 7 days by default.
It\u2019s normally a good idea to have a retention period of at least 7 days. For purposes of this example, we will set the retention period to zero, just so you can see how the files get removed from storage. Adjusting the retention period in this manner isn\u2019t recommended for production use cases.
Let\u2019s run the vacuum command:
dt.vacuum(retention_hours=0, enforce_retention_duration=False, dry_run=False)\n
The command returns a list of all the files that are removed from storage:
['date=2021-01-02/39-a98680f2-0e0e-4f26-a491-18b183f9eb05-0.parquet',\n 'date=2021-01-02/41-e96bc8bb-c571-484c-b534-e897424fb7da-0.parquet',\n \u2026\n 'date=2021-01-01/0-cfe227c6-edd9-4369-a1b0-db4559a2e693-0.parquet',\n 'date=2021-01-01/18-ded53418-172b-4e40-bf2e-7c8142e71bd1-0.parquet']\n
Let\u2019s look at the content of the Delta table now that all the really small files have been removed from storage:
observation_data\n\u251c\u2500\u2500 _delta_log\n\u2502 \u251c\u2500\u2500 00000000000000000000.json\n\u2502 \u251c\u2500\u2500 00000000000000000001.json\n\u2502 \u251c\u2500\u2500 \u2026\n\u2502 \u251c\u2500\u2500 00000000000000000124.json\n\u2502 \u2514\u2500\u2500 00000000000000000125.json\n\u251c\u2500\u2500 date=2021-01-01\n\u2502 \u2514\u2500\u2500 part-00000-31e3df5a-8bbe-425c-b85d-77794f922837-c000.snappy.parquet\n\u251c\u2500\u2500 date=2021-01-02\n\u2502 \u2514\u2500\u2500 part-00000-8af07878-b179-49ce-a900-d58595ffb60a-c000.snappy.parquet\n\u251c\u2500\u2500 date=2021-01-03\n\u2502 \u2514\u2500\u2500 part-00000-5e980864-b32f-4686-a58d-a75fae455c1e-c000.snappy.parquet\n\u251c\u2500\u2500 date=2021-01-04\n\u2502 \u2514\u2500\u2500 part-00000-1e82d23b-084d-47e3-9790-d68289c39837-c000.snappy.parquet\n\u251c\u2500\u2500 date=2021-01-05\n\u2502 \u2514\u2500\u2500 part-00000-4b020a40-c836-4a11-851f-4691370c9f3a-c000.snappy.parquet\n\u2514\u2500\u2500 date=2021-01-06\n \u251c\u2500\u2500 121-0ecb5d70-4a28-4cd4-b2d2-89ee2285eaaa-0.parquet\n \u251c\u2500\u2500 122-6b2d2758-9154-4392-b287-fe371ee507ec-0.parquet\n \u251c\u2500\u2500 123-551d318f-4968-441f-83fc-89f98cd15daf-0.parquet\n \u2514\u2500\u2500 124-287309d3-662e-449d-b4da-2e67b7cc0557-0.parquet\n
All the partitions only contain a single file now, except for the date=2021-01-06
partition that has not been compacted yet.
An entire partition won\u2019t necessarily get compacted to a single data file when optimize is run. Each partition has data files that are condensed to the target file size.
"},{"location":"usage/optimize/small-file-compaction-with-optimize/#what-causes-the-small-file-problem","title":"What causes the small file problem?","text":"Delta tables can accumulate small files for a variety of reasons:
- User error: users can accidentally write files that are too small. Users should sometimes repartition in memory before writing to disk to avoid appending files that are too small.
- Frequent appends: systems that append more often tend to append more smaller files. A pipeline that appends every minute will generally generate ten times as many small files compared to a system that appends every ten minutes.
- Appending to partitioned data lakes with high cardinality columns can also cause small files. If you append every hour to a table that\u2019s partitioned on a column with 1,000 distinct values, then every append could create 1,000 new files. Partitioning by date avoids this problem because the data isn\u2019t split up across partitions in this manner.
"},{"location":"usage/optimize/small-file-compaction-with-optimize/#conclusion","title":"Conclusion","text":"This page showed you how to create a Delta table with many small files, compact the small files into larger files with optimize, and remove the tombstoned files from storage with vacuum.
You also learned about how to incrementally optimize partitioned Delta tables, so you only compact newly added data.
An excessive number of small files slows down Delta table queries, so periodic compaction is important. Make sure to properly maintain your Delta tables, so performance does not degrade over time.
"}]}
\ No newline at end of file
diff --git a/sitemap.xml b/sitemap.xml
index 9866df3898..766cf39af9 100644
--- a/sitemap.xml
+++ b/sitemap.xml
@@ -2,142 +2,142 @@
https://github.com/delta-io/delta-rs/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/api/catalog/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/api/delta_writer/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/api/exceptions/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/api/schema/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/api/storage/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/api/delta_table/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/api/delta_table/delta_table_alterer/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/api/delta_table/delta_table_merger/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/api/delta_table/delta_table_optimizer/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/api/delta_table/metadata/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/how-delta-lake-works/architecture-of-delta-table/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/integrations/delta-lake-arrow/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/integrations/delta-lake-datafusion/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/integrations/delta-lake-pandas/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/integrations/delta-lake-polars/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/usage/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/usage/appending-overwriting-delta-lake-table/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/usage/create-delta-lake-table/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/usage/deleting-rows-from-delta-lake-table/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/usage/examining-table/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/usage/installation/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/usage/loading-table/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/usage/managing-tables/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/usage/querying-delta-tables/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/usage/writing-delta-tables/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/usage/optimize/delta-lake-z-order/
- 2024-01-02
+ 2024-01-07
daily
https://github.com/delta-io/delta-rs/usage/optimize/small-file-compaction-with-optimize/
- 2024-01-02
+ 2024-01-07
daily
\ No newline at end of file
diff --git a/sitemap.xml.gz b/sitemap.xml.gz
index 392d40f257..58e9c099b6 100644
Binary files a/sitemap.xml.gz and b/sitemap.xml.gz differ