diff --git a/docs/_build/API_REFERENCE_LINKS.yml b/docs/_build/API_REFERENCE_LINKS.yml index 4edeee90c58..a0c31f1cc8e 100644 --- a/docs/_build/API_REFERENCE_LINKS.yml +++ b/docs/_build/API_REFERENCE_LINKS.yml @@ -12,6 +12,7 @@ python: write_csv: https://pola-rs.github.io/polars/py-polars/html/reference/api/polars.DataFrame.write_csv.html read_json: https://pola-rs.github.io/polars/py-polars/html/reference/api/polars.read_json.html write_json: https://pola-rs.github.io/polars/py-polars/html/reference/api/polars.DataFrame.write_json.html + read_ipc: https://pola-rs.github.io/polars/py-polars/html/reference/api/polars.read_ipc.html min: https://pola-rs.github.io/polars/py-polars/html/reference/series/api/polars.Series.min.html max: https://pola-rs.github.io/polars/py-polars/html/reference/series/api/polars.Series.max.html value_counts: https://pola-rs.github.io/polars/py-polars/html/reference/expressions/api/polars.Expr.value_counts.html @@ -223,6 +224,10 @@ rust: name: scan_parquet link: https://pola-rs.github.io/polars/docs/rust/dev/polars/prelude/struct.LazyFrame.html#method.scan_parquet feature_flags: ['parquet'] + read_ipc: + name: IpcReader + link: https://pola-rs.github.io/polars/docs/rust/dev/polars_io/prelude/struct.IpcReader.html + feature_flags: ['ipc'] min: https://pola-rs.github.io/polars/docs/rust/dev/polars/series/struct.Series.html#method.min max: https://pola-rs.github.io/polars/docs/rust/dev/polars/series/struct.Series.html#method.max struct: diff --git a/docs/user-guide/io/cloud-storage.md b/docs/user-guide/io/cloud-storage.md index ca2aa9dac04..69e01750c6f 100644 --- a/docs/user-guide/io/cloud-storage.md +++ b/docs/user-guide/io/cloud-storage.md @@ -31,7 +31,7 @@ Polars can scan a Parquet file in lazy mode from cloud storage. We may need to p This query creates a `LazyFrame` without downloading the file. In the `LazyFrame` we have access to file metadata such as the schema. Polars uses the `object_store.rs` library internally to manage the interface with the cloud storage providers and so no extra dependencies are required in Python to scan a cloud Parquet file. -If we create a lazy query with [predicate and projection pushdowns](../../lazy/optimizations/) the query optimiser will apply them before the file is downloaded. This can significantly reduce the amount of data that needs to be downloaded. The query evaluation is triggered by calling `collect`. +If we create a lazy query with [predicate and projection pushdowns](../lazy/optimizations.md) the query optimiser will apply them before the file is downloaded. This can significantly reduce the amount of data that needs to be downloaded. The query evaluation is triggered by calling `collect`. {{code_block('user-guide/io/cloud-storage','scan_parquet_query',[])}}