From a516c6b26d183efc4f56293256bba92e243b7a61 Mon Sep 17 00:00:00 2001
From: ZetaSQL Team
Date: Tue, 12 Nov 2024 11:59:35 -0800
Subject: [PATCH] Export of internal ZetaSQL changes.
--
Change by ZetaSQL Team :
Update the resolved ast doc for ZetaSQL.
--
Change by Brandon Dolphin :
Refactor out shared AddMeasureColumnsToTable function for working with measure columns.
--
Change by Christoph Dibak :
Implement per-aggregation budgeting in the reference impl
--
Change by Nevena Kotlaja :
No public description
--
Change by Nevena Kotlaja :
No public description
--
Change by ZetaSQL Team :
Implement method TimestampPicoValue::FromString().
--
Change by ZetaSQL Team :
Fix standalone expression evaluation for multi-level aggregates.
--
Change by ZetaSQL Team :
Add documentation for graph query syntax
--
Change by ZetaSQL Team :
Fix a bug where information from `ExprResolutionInfo` is not propagated correctly up to parent `ExprResolutionInfos` for multi-level aggregation.
--
Change by ZetaSQL Team :
Add SQL rewriter and missing analyzer tests for map_contains_key.
--
Change by Brandon Dolphin :
Add deserialization for MeasureType.
--
Change by ZetaSQL Team :
Add the parser implementation for pipe recursive union.
--
Change by ZetaSQL Team :
Refactor ResolveMatchRecognize() into smaller methods.
--
Change by ZetaSQL Team :
Add a RET_CHECK to ensure that PIVOT expressions in the reference implementation are not multi-level aggregations.
--
Change by ZetaSQL Team :
Ensure that volatile grouping expressions are not considered grouping consts.
--
Change by ZetaSQL Team :
Rename ResolvedMatchRecognizeEmptyPattern to ResolvedMatchRecognizePatternEmpty to have a consistent prefix with other nodes.
--
Change by Brandon Dolphin :
Remove documented requirement for FEATURE_V_1_4_MULTILEVEL_AGGREGATION when analyzing measure expressions.
--
Change by ZetaSQL Team :
Add more compliance tests for multi-level aggregation.
--
Change by ZetaSQL Team :
Move the OPTIONS() list to the end of the MATCH_RECOGNIZE() syntax.
--
Change by ZetaSQL Team :
Add a new option to AnalyzerOptions and ErrorMessageOptions to enable an enhanced form of error redaction which preserves some information about the error, while keeping the output stable.
--
Change by ZetaSQL Team :
Coerce the predicate expression in MATCH_RECOGNIZE's DEFINE clause to BOOL before giving an error.
--
Change by ZetaSQL Team :
Update the resolved ast documentation for the ZetaSQL release in Q4 2024.
--
Change by ZetaSQL Team :
Add set operators to pipe syntax: UNION, INTERSECT, EXCEPT, CORRESPONDING
--
Change by Yassin Ezbakhe :
`MatcherCollection`: Add `AddMatcher` method.
--
Change by Jeff Shute :
Implement experimental terminal pipe operator for EXPORT DATA.
--
Change by Brandon Dolphin :
Fix casing in table name extraction for `TABLE x` and `FROM x` referring to TVF args.
--
Change by ZetaSQL Team :
Support pattern variable references in MATCH_RECOGNIZE's measures clause.
--
Change by ZetaSQL Team :
Add SQL rewriter for map_get function.
--
Change by Brandon Dolphin :
Update error message in JSON literal test.
--
Change by ZetaSQL Team :
small changes
--
Change by ZetaSQL Team :
Update COUNT and COUNTIF documentation.
--
Change by Brandon Dolphin :
Fix typo in measure_expression target names.
--
Change by Brandon Dolphin :
Add Java implementation of MeasureType.
--
Change by ZetaSQL Team :
Add support for graph queries
--
Change by ZetaSQL Team :
No public description
--
Change by ZetaSQL Team :
internal
--
Change by ZetaSQL Team :
Add MATCH_RECOGNIZE compliance test covering the scenario where a DEFINE expression evaluates to NULL rather than TRUE or FALSE.
--
Change by ZetaSQL Team :
Remove unimplemented optimization-related items from the pattern matching api:
--
Change by Brandon Dolphin :
Implement MeasureType::TypeNameWithModifiers.
--
Change by Brandon Dolphin :
Fix typo in sql_test_base.cc variable name.
--
Change by ZetaSQL Team :
Add ZetaSketch to the list of external applications that can be used to build sketches.
--
Change by ZetaSQL Team :
Add anonymization threshold to the AnonymizationInfo class
--
Change by ZetaSQL Team :
No public description
--
Change by ZetaSQL Team :
Fix struct construction syntax in examples of sparse vectors.
--
Change by Jeff Shute :
Initial resolved AST and analyzer for pipe FORK.
--
Change by ZetaSQL Team :
Internal fix.
--
Change by ZetaSQL Team :
No public description
--
Change by ZetaSQL Team :
Add INTERVAL to JSON encoding documentation.
--
Change by ZetaSQL Team :
Internal changes.
--
Change by ZetaSQL Team :
Validate composite measure.
--
Change by Brandon Dolphin :
Add deprecation warning for `TABLE x` syntax cases which will be affected by resolution order change.
--
Change by ZetaSQL Team :
Add SQL rewriter for MAP_CARDINALITY
--
Change by Brandon Dolphin :
Add feature `FEATURE_TABLE_SYNTAX_RESOLVE_ARGUMENT_LAST` to preserve the old (incorrect) resolution order for `TABLE x` syntax in SQL TVF bodies.
--
Change by ZetaSQL Team :
Analyzer support for ON CONFLICT clause in INSERT DML
--
Change by ZetaSQL Team :
Fix a bug in the script executor where redeclaring a variable without type parameters in a session would cause an error
--
Change by Lev Kandel :
Prepare code for breaking change in Protobuf C++ API.
Protobuf 6.30.0 will change the return types of Descriptor::name() and other methods to absl::string_view. This makes the code work both before and after such a change.
--
Change by ZetaSQL Team :
Fix an incorrect comment.
--
Change by ZetaSQL Team :
No public description
--
Change by ZetaSQL Team :
Change complexity limit for state machine algo from 15,000 states to 100,000 edges, as it's the number of edges, not states, that really determines the time and space requirements of the algorithm. Also, moved the check from NFABuilder into NFA itself so that it gets enforced inside of EpsilonRemover, which, in the worst case, can drastically increase the number of edges, while simultaneously reducing the number of states.
--
Change by Jeff Shute :
Add some support for referencing Constants with values.
--
Change by ZetaSQL Team :
Unify the creation of multi_level_aggregate_info QRI for all the paths that create a default.
--
Change by John Fremlin :
Add RowIdentityColumns function to catalog Table
--
Change by ZetaSQL Team :
Remove references to Bison from comments in gen_parse_tree.py.
--
Change by ZetaSQL Team :
Remove "Bison" from the names of some functions and variables in keywords.h.
--
Change by ZetaSQL Team :
Enable SELECT .. FOR UPDATE for external use.
--
Change by ZetaSQL Team :
Fix the order of parameters in the Regexp::Extract function.
--
Change by ZetaSQL Team :
Add support for UINT64 arguments to `TIMESTAMP_FROM_UNIX_SECONDS`, `TIMESTAMP_FROM_UNIX_MILLIS`, and `TIMESTAMP_FROM_UNIX_MICROS`.
--
Change by ZetaSQL Team :
No public description
--
Change by ZetaSQL Team :
No public description
--
Change by ZetaSQL Team :
Enable multi-level aggregation in RQG.
--
Change by ZetaSQL Team :
Fix a bug with deferred resolution for multi-level aggregates.
--
Change by ZetaSQL Team :
Fix some formatting issues with ST_EQUALS.
--
Change by ZetaSQL Team :
Remove "pipe operator" from pipe operator table to economize.
--
Change by Laramie Leavitt :
Avoid creating a new PRNG on each test loop.
--
Change by Jeff Shute :
Parser implementation for experimental pipe FORK.
--
Change by ZetaSQL Team :
Internal Change
(And 482 more changes)
GitOrigin-RevId: 158e00ddbc5bb8b8ef75e6b6473daf4fcef6174a
Change-Id: I6520749d9676a3fce2e84a49f3e1dd6804390ab7
---
.bazelrc | 9 +-
.bazelversion | 2 +-
Dockerfile | 29 +-
README.md | 2 +-
WORKSPACE | 14 +-
bazel/grpc_cf_engine.patch | 10 +
bazel/grpc_extra_deps.patch | 2 +-
bazel/icu.BUILD | 23 +-
bazel/icu4c-64_2.patch | 2 +-
bazel/maven_install.json | 1951 ++---
bazel/zetasql_deps_step_1.bzl | 22 +
bazel/zetasql_deps_step_2.bzl | 102 +-
bazel/zetasql_java_deps.bzl | 36 +-
docs/README.md | 1 +
docs/aggregate-dp-functions.md | 81 +-
docs/aggregate_functions.md | 469 +-
docs/approximate_aggregate_functions.md | 32 +-
docs/array_functions.md | 241 +-
docs/bit_functions.md | 59 +-
docs/conditional_expressions.md | 8 -
docs/conversion_functions.md | 801 +-
docs/conversion_rules.md | 40 +
docs/data-definition-language.md | 16 +-
docs/data-manipulation-language.md | 5 +-
docs/data-model.md | 23 +
docs/data-types.md | 138 +-
docs/date_functions.md | 224 +-
docs/datetime_functions.md | 212 +-
docs/debugging_functions.md | 22 +-
docs/format-elements.md | 16 +-
docs/functions-and-operators.md | 6713 ++++++++++++-----
docs/geography_functions.md | 394 +-
docs/graph-conditional-expressions.md | 11 +
docs/graph-data-types.md | 82 +
docs/graph-gql-functions.md | 916 +++
docs/graph-intro.md | 126 +
docs/graph-operators.md | 427 ++
docs/graph-patterns.md | 1838 +++++
docs/graph-query-statements.md | 1857 +++++
docs/graph-schema-statements.md | 453 ++
docs/graph-sql-queries.md | 140 +
docs/graph-subqueries.md | 298 +
docs/hash_functions.md | 27 +-
docs/hll_functions.md | 22 +-
docs/interval_functions.md | 27 +-
docs/json_functions.md | 721 +-
docs/lexical.md | 20 +-
docs/mathematical_functions.md | 334 +-
docs/modules.md | 372 -
docs/navigation_functions.md | 44 +-
docs/net_functions.md | 82 +-
docs/numbering_functions.md | 38 +-
docs/operators.md | 456 +-
docs/pipe-syntax.md | 1524 +++-
docs/protocol_buffer_functions.md | 61 +-
docs/query-syntax.md | 149 +-
docs/range-functions.md | 62 +-
docs/resolved_ast.md | 2131 ++++--
docs/security_functions.md | 7 +-
docs/sketches.md | 20 +-
docs/statistical_aggregate_functions.md | 56 +-
docs/string_functions.md | 559 +-
docs/table-functions.md | 5 -
docs/time-series-functions.md | 43 +-
docs/time_functions.md | 112 +-
docs/timestamp_functions.md | 304 +-
docs/user-defined-aggregates.md | 6 -
docs/user-defined-functions.md | 18 -
docs/window-function-calls.md | 2 +-
examples/bazel/.bazelversion | 2 +-
java/com/google/zetasql/BUILD | 42 +
java/com/google/zetasql/Catalog.java | 79 +
.../google/zetasql/FunctionArgumentType.java | 8 +
java/com/google/zetasql/GraphEdgeTable.java | 27 +
.../com/google/zetasql/GraphElementLabel.java | 39 +
.../com/google/zetasql/GraphElementTable.java | 80 +
java/com/google/zetasql/GraphElementType.java | 245 +
java/com/google/zetasql/GraphNodeTable.java | 21 +
.../zetasql/GraphNodeTableReference.java | 36 +
java/com/google/zetasql/GraphPathType.java | 90 +
.../zetasql/GraphPropertyDeclaration.java | 35 +
.../zetasql/GraphPropertyDefinition.java | 31 +
java/com/google/zetasql/MeasureType.java | 67 +
java/com/google/zetasql/PropertyGraph.java | 71 +
java/com/google/zetasql/SimpleCatalog.java | 57 +-
.../google/zetasql/SimpleGraphEdgeTable.java | 119 +
.../zetasql/SimpleGraphElementLabel.java | 91 +
.../zetasql/SimpleGraphElementTable.java | 141 +
.../google/zetasql/SimpleGraphNodeTable.java | 83 +
.../SimpleGraphNodeTableReference.java | 68 +
.../SimpleGraphPropertyDeclaration.java | 75 +
.../SimpleGraphPropertyDefinition.java | 66 +
.../google/zetasql/SimplePropertyGraph.java | 251 +
java/com/google/zetasql/SimpleTable.java | 22 +
java/com/google/zetasql/TVFSignature.java | 4 +
java/com/google/zetasql/Table.java | 2 +
.../google/zetasql/TableValuedFunction.java | 24 +
java/com/google/zetasql/Type.java | 45 +-
java/com/google/zetasql/TypeFactory.java | 111 +
java/com/google/zetasql/Value.java | 224 +-
java/com/google/zetasql/parser/BUILD | 2 +
.../AbstractDeserializationHelper.java | 16 +
java/com/google/zetasql/resolvedast/BUILD | 3 +
.../zetasql/resolvedast/DebugStrings.java | 46 +
.../resolvedast/DeserializationHelper.java | 61 +
.../resolvedast/ResolvedNodes.java.template | 71 +
java/com/google/zetasql/testing/BUILD | 1 +
.../com/google/zetasql/AnalyzerTest.java | 6 +-
javatests/com/google/zetasql/BUILD | 3 +
.../zetasql/FunctionArgumentTypeTest.java | 35 +
.../com/google/zetasql/MeasureTypeTest.java | 157 +
.../com/google/zetasql/SimpleCatalogTest.java | 19 +-
.../com/google/zetasql/SimpleTableTest.java | 27 +
.../com/google/zetasql/TypeFactoryTest.java | 5 +
javatests/com/google/zetasql/TypeTest.java | 7 +-
.../com/google/zetasql/TypeTestBase.java | 9 +-
javatests/com/google/zetasql/ValueTest.java | 160 +-
javatests/com/google/zetasql/parser/BUILD | 1 +
.../com/google/zetasql/resolvedast/BUILD | 1 +
zetasql/analyzer/BUILD | 15 +-
zetasql/analyzer/all_rewriters.cc | 2 +
.../analyzer/analytic_function_resolver.cc | 162 +-
zetasql/analyzer/analytic_function_resolver.h | 49 +-
zetasql/analyzer/analyzer_impl.cc | 3 +-
zetasql/analyzer/analyzer_test.cc | 111 +-
zetasql/analyzer/analyzer_test_options.cc | 3 +
zetasql/analyzer/analyzer_test_options.h | 4 +
.../analyzer/analyzer_test_options_test.cc | 3 +-
zetasql/analyzer/expr_matching_helpers.cc | 7 +
zetasql/analyzer/expr_resolver_helper.cc | 107 +-
zetasql/analyzer/expr_resolver_helper.h | 53 +-
zetasql/analyzer/function_resolver.cc | 39 +-
.../analyzer/function_signature_matcher.cc | 50 +-
zetasql/analyzer/graph_label_expr_resolver.cc | 222 +
zetasql/analyzer/graph_label_expr_resolver.h | 59 +
zetasql/analyzer/graph_query_resolver.cc | 3567 +++++++++
zetasql/analyzer/graph_query_resolver.h | 751 ++
zetasql/analyzer/graph_stmt_resolver.cc | 1057 +++
zetasql/analyzer/graph_stmt_resolver.h | 138 +
zetasql/analyzer/lambda_util.h | 2 +-
zetasql/analyzer/name_scope.cc | 111 +-
zetasql/analyzer/name_scope.h | 122 +-
zetasql/analyzer/query_resolver_helper.cc | 134 +-
zetasql/analyzer/query_resolver_helper.h | 279 +-
zetasql/analyzer/resolver.cc | 48 +-
zetasql/analyzer/resolver.h | 673 +-
zetasql/analyzer/resolver_alter_stmt.cc | 4 +-
zetasql/analyzer/resolver_dml.cc | 218 +-
zetasql/analyzer/resolver_expr.cc | 1285 +++-
zetasql/analyzer/resolver_query.cc | 2838 ++++++-
zetasql/analyzer/resolver_stmt.cc | 487 +-
zetasql/analyzer/rewriters/BUILD | 58 +-
.../rewriters/anonymization_helper.cc | 624 +-
.../analyzer/rewriters/flatten_rewriter.cc | 3 +
.../rewriters/insert_dml_values_rewriter.cc | 2 +-
.../rewriters/multiway_unnest_rewriter.cc | 1 +
...rder_by_and_limit_in_aggregate_rewriter.cc | 5 +
.../analyzer/rewriters/pipe_if_rewriter.cc | 108 +
zetasql/analyzer/rewriters/pipe_if_rewriter.h | 28 +
.../rewriters/pipe_if_rewriter_test.cc | 134 +
zetasql/analyzer/rewriters/pivot_rewriter.cc | 9 +
zetasql/analyzer/rewriters/privacy/BUILD | 59 +
.../privacy/approx_count_distinct_utility.cc | 547 ++
.../privacy/approx_count_distinct_utility.h | 92 +
.../rewriters/privacy/privacy_utility.cc | 69 +
.../rewriters/privacy/privacy_utility.h | 32 +
zetasql/analyzer/rewriters/registration.cc | 8 +-
zetasql/analyzer/rewriters/registration.h | 6 +-
.../analyzer/rewriters/rewrite_subpipeline.cc | 110 +
.../analyzer/rewriters/rewrite_subpipeline.h | 35 +
.../rewriters/rewriter_relevance_checker.cc | 7 +
.../rewriters/sql_function_inliner.cc | 1 +
.../analyzer/rewriters/sql_view_inliner.cc | 1 +
.../rewriters/typeof_function_rewriter.cc | 1 +
.../analyzer/rewriters/with_expr_rewriter.cc | 1 +
zetasql/analyzer/run_analyzer_test.cc | 278 +-
.../analyzer/set_operation_resolver_base.cc | 1 +
zetasql/analyzer/substitute.cc | 3 +-
zetasql/analyzer/substitute_test.cc | 2 -
.../testdata/alter_row_access_policy.test | 6 +-
zetasql/analyzer/testdata/anonymization.test | 4 +-
...nonymization_group_selection_strategy.test | 2 +-
zetasql/analyzer/testdata/array_path.test | 207 +
zetasql/analyzer/testdata/casts.test | 18 +
zetasql/analyzer/testdata/collation.test | 411 +
zetasql/analyzer/testdata/constant.test | 8 +-
zetasql/analyzer/testdata/corresponding.test | 490 +-
.../testdata/corresponding_combinations.test | 528 +-
.../corresponding_with_collation.test | 18 +-
.../testdata/create_external_table.test | 14 +-
zetasql/analyzer/testdata/create_index.test | 243 +
.../testdata/create_property_graph.test | 2906 +++++++
.../testdata/create_row_access_policy.test | 6 +-
.../analyzer/testdata/create_row_policy.test | 6 +-
.../testdata/create_table_function.test | 22 +-
.../testdata/differential_privacy.test | 8 +-
...tial_privacy_group_selection_strategy.test | 30 +-
.../dml_insert_on_conflict_clause.test | 463 ++
zetasql/analyzer/testdata/export_data.test | 22 +
.../testdata/extract_table_names.test | 65 +
.../testdata/graph_access_tracking.test | 263 +
.../analyzer/testdata/graph_correlated.test | 1003 +++
.../analyzer/testdata/graph_edge_pattern.test | 648 ++
.../analyzer/testdata/graph_expressions.test | 3277 ++++++++
.../testdata/graph_gql_aggregation.test | 6027 +++++++++++++++
.../testdata/graph_gql_composite_query1.test | 2363 ++++++
.../testdata/graph_gql_composite_query2.test | 491 ++
zetasql/analyzer/testdata/graph_gql_for.test | 695 ++
.../testdata/graph_gql_order_by_and_page.test | 4509 +++++++++++
.../testdata/graph_gql_window_function.test | 2206 ++++++
.../graph_horizontal_aggregation.test | 2190 ++++++
zetasql/analyzer/testdata/graph_label.test | 518 ++
.../analyzer/testdata/graph_node_pattern.test | 288 +
.../testdata/graph_optional_columns.test | 1781 +++++
.../graph_parenthesized_path_pattern.test | 2332 ++++++
.../testdata/graph_path_functions.test | 1397 ++++
.../analyzer/testdata/graph_path_mode.test | 6102 +++++++++++++++
.../analyzer/testdata/graph_path_pattern.test | 2106 ++++++
.../testdata/graph_path_search_prefix.test | 2614 +++++++
.../testdata/graph_path_variable.test | 2286 ++++++
.../graph_property_specification.test | 908 +++
.../graph_quantified_path_pattern_test1.test | 2000 +++++
.../graph_quantified_path_pattern_test2.test | 2349 ++++++
.../graph_quantified_path_pattern_test3.test | 2182 ++++++
...graph_quantified_pattern_gql_extended.test | 1390 ++++
.../analyzer/testdata/graph_query_hints.test | 1122 +++
.../testdata/graph_query_statement.test | 78 +
zetasql/analyzer/testdata/graph_subquery.test | 1489 ++++
zetasql/analyzer/testdata/graph_table.test | 1338 ++++
.../testdata/graph_table_gql_extended.test | 6539 ++++++++++++++++
zetasql/analyzer/testdata/json.test | 27 +
zetasql/analyzer/testdata/literals.test | 9 -
zetasql/analyzer/testdata/lock_mode.test | 1727 +++++
zetasql/analyzer/testdata/map_functions.test | 664 +-
.../analyzer/testdata/match_recognize.test | 5870 +++++++++++++-
...recognize_with_multilevel_aggregation.test | 1784 +++++
.../testdata/multi_level_aggregation.test | 211 -
.../multi_level_aggregation_basic.test | 2271 ++++++
.../multi_level_aggregation_complex.test | 2081 +++++
.../multi_level_aggregation_errors.test | 524 ++
...i_level_aggregation_not_yet_supported.test | 257 +
...el_aggregation_udas_not_yet_supported.test | 43 +
zetasql/analyzer/testdata/orderby.test | 77 +
zetasql/analyzer/testdata/pipe_aggregate.test | 1402 +++-
zetasql/analyzer/testdata/pipe_drop.test | 33 +
zetasql/analyzer/testdata/pipe_order_by.test | 97 +
zetasql/analyzer/testdata/pipe_query.test | 8 -
zetasql/analyzer/testdata/pipe_rename.test | 25 +
zetasql/analyzer/testdata/pipe_set.test | 72 +
.../analyzer/testdata/pipe_set_operation.test | 2451 +++++-
.../analyzer/testdata/recursive_views.test | 137 +-
zetasql/analyzer/testdata/set_operation.test | 124 +-
.../testdata/sql_builder_lock_mode.test | 929 +++
.../analyzer/testdata/standalone_expr.test | 36 +
.../analyzer/testdata/tvf_relation_args.test | 9 +
.../testdata/typed_hints_and_options.test | 418 +-
zetasql/analyzer/testdata/with_recursive.test | 875 ++-
zetasql/base/BUILD | 24 +-
zetasql/base/endian.h | 18 +
zetasql/base/endian_test.cc | 12 +
zetasql/base/mathlimits.cc | 126 -
zetasql/base/mathlimits.h | 273 -
zetasql/base/mathlimits_test.cc | 243 -
zetasql/base/mathutil.h | 41 +-
zetasql/base/mathutil_test.cc | 102 +-
zetasql/common/BUILD | 30 +-
.../builtin_function_differential_privacy.cc | 961 ++-
zetasql/common/builtin_function_graph.cc | 401 +
zetasql/common/builtin_function_internal.h | 4 +
zetasql/common/builtin_function_internal_2.cc | 85 +-
zetasql/common/builtin_function_internal_3.cc | 85 +-
zetasql/common/builtin_function_map.cc | 91 +-
zetasql/common/errors.cc | 12 +-
zetasql/common/errors.h | 6 +
zetasql/common/errors_test.cc | 8 +
zetasql/common/float_margin.h | 11 +-
zetasql/common/function_utils.cc | 44 +
zetasql/common/function_utils.h | 10 +
zetasql/common/graph_element_utils.cc | 43 +
zetasql/common/graph_element_utils.h | 31 +
zetasql/common/graph_element_utils_test.cc | 78 +
zetasql/common/initialize_required_fields.cc | 6 +-
zetasql/common/json_parser.cc | 248 +-
zetasql/common/json_parser.h | 16 +-
zetasql/common/json_parser_test.cc | 515 +-
zetasql/common/match_recognize/BUILD | 346 +
.../common/match_recognize/compiled_nfa.cc | 171 +
zetasql/common/match_recognize/compiled_nfa.h | 102 +
.../match_recognize/compiled_nfa_test.cc | 177 +
.../common/match_recognize/edge_matchers.cc | 74 +
.../common/match_recognize/edge_matchers.h | 82 +
.../common/match_recognize/edge_tracker.cc | 135 +
zetasql/common/match_recognize/edge_tracker.h | 101 +
.../match_recognize/edge_tracker_test.cc | 252 +
.../common/match_recognize/epsilon_remover.cc | 266 +
.../common/match_recognize/epsilon_remover.h | 39 +
.../match_recognize/epsilon_remover_test.cc | 634 ++
.../match_recognize/match_test_result.proto | 43 +
zetasql/common/match_recognize/nfa.cc | 156 +
zetasql/common/match_recognize/nfa.h | 315 +
zetasql/common/match_recognize/nfa_builder.cc | 368 +
zetasql/common/match_recognize/nfa_builder.h | 139 +
.../match_recognize/nfa_builder_test.cc | 940 +++
.../match_recognize/nfa_match_partition.cc | 211 +
.../match_recognize/nfa_match_partition.h | 110 +
.../common/match_recognize/nfa_matchers.cc | 245 +
zetasql/common/match_recognize/nfa_matchers.h | 148 +
.../match_recognize/nfa_matchers_test.cc | 312 +
zetasql/common/match_recognize/nfa_test.cc | 395 +
.../common/match_recognize/row_edge_list.cc | 115 +
.../common/match_recognize/row_edge_list.h | 89 +
.../match_recognize/row_edge_list_test.cc | 226 +
.../common/match_recognize/test_matcher.cc | 247 +
zetasql/common/match_recognize/test_matcher.h | 163 +
.../match_recognize/test_pattern_resolver.cc | 166 +
.../match_recognize/test_pattern_resolver.h | 102 +
.../test_pattern_resolver_test.cc | 221 +
zetasql/common/options_utils.cc | 37 +-
zetasql/common/options_utils.h | 8 +
zetasql/common/options_utils_test.cc | 21 +-
.../status_payload_matchers_oss_test.cc | 1 +
zetasql/compliance/BUILD | 14 +-
zetasql/compliance/compliance_test_cases.cc | 72 +-
.../depth_limit_detector_test_cases.cc | 31 +
.../depth_limit_detector_test_cases.h | 10 +-
zetasql/compliance/functions_testlib.h | 4 +
zetasql/compliance/functions_testlib_2.cc | 33 +-
zetasql/compliance/functions_testlib_cast.cc | 244 +-
zetasql/compliance/functions_testlib_regex.cc | 1668 ++--
.../compliance/functions_testlib_timestamp.cc | 93 +-
.../compliance/functions_testlib_tojson.cc | 69 +
zetasql/compliance/functions_testlib_uuid.cc | 57 +
.../zetasql_known_errors.textproto | 6 +
zetasql/compliance/matchers.h | 4 +
zetasql/compliance/matchers_test.cc | 9 +
zetasql/compliance/parameters_test_util.h | 2 +-
zetasql/compliance/runtime_expected_errors.cc | 58 +-
zetasql/compliance/runtime_expected_errors.h | 1 +
zetasql/compliance/sql_test_base.cc | 70 +-
zetasql/compliance/sql_test_base.h | 6 +-
.../compliance/sql_test_filebased_options.cc | 14 +-
.../compliance/sql_test_filebased_options.h | 8 +
.../sql_test_filebased_options_test.cc | 1 +
zetasql/compliance/test_database_catalog.cc | 49 +-
zetasql/compliance/test_database_catalog.h | 14 +-
zetasql/compliance/test_driver.cc | 16 +
zetasql/compliance/test_driver.h | 17 +-
zetasql/compliance/test_driver.proto | 9 +
zetasql/compliance/test_driver_test.cc | 5 +-
zetasql/compliance/test_util.cc | 14 +-
zetasql/compliance/test_util.h | 2 +
.../testdata/aggregation_queries.test | 256 +
.../compliance/testdata/array_functions.test | 14 +
.../testdata/case_statement_queries.test | 16 +
.../testdata/default_timezone_ist.test | 2 +-
zetasql/compliance/testdata/dml_delete.test | 20 +
.../compliance/testdata/graph_functions.test | 114 +
.../testdata/graph_query_statement.test | 311 +
zetasql/compliance/testdata/graph_table.test | 2030 +++++
.../testdata/graph_table_gql_extended.test | 4111 ++++++++++
.../compliance/testdata/groupby_queries.test | 140 +
zetasql/compliance/testdata/in_queries.test | 50 +
zetasql/compliance/testdata/join_queries.test | 85 +
.../compliance/testdata/map_functions.test | 299 +-
.../compliance/testdata/match_recognize.test | 782 ++
.../multi_level_aggregation_basic.test | 91 +
.../multi_level_aggregation_complex.test | 364 +
.../compliance/testdata/orderby_queries.test | 232 +
zetasql/compliance/testdata/timezones.test | 18 +-
zetasql/examples/pipe_queries/BUILD | 8 +-
zetasql/examples/pipe_queries/README.md | 7 +
.../pipe_queries/walkthrough_7day.sql | 5 +
zetasql/examples/tpch/catalog/BUILD | 1 +
zetasql/examples/tpch/catalog/tpch_catalog.cc | 1 +
zetasql/jdk/BUILD | 37 -
zetasql/local_service/BUILD | 2 +-
zetasql/local_service/local_service.cc | 3 +-
zetasql/parser/BUILD | 192 +-
zetasql/parser/ast_enums.proto | 96 +
zetasql/parser/ast_node.h | 19 +
zetasql/parser/bison_parser.cc | 122 +-
zetasql/parser/bison_parser.h | 1 +
zetasql/parser/flex_istream.h | 3 -
zetasql/parser/flex_tokenizer.cc | 101 +-
zetasql/parser/flex_tokenizer.cc.inc | 12 +-
zetasql/parser/flex_tokenizer.h | 69 +-
zetasql/parser/flex_tokenizer.l | 87 +-
zetasql/parser/gen_parse_tree.py | 2009 ++++-
zetasql/parser/keywords.cc | 700 +-
zetasql/parser/keywords.h | 67 +-
zetasql/parser/keywords_test.cc | 31 +-
zetasql/parser/lookahead_transformer.cc | 341 +-
zetasql/parser/lookahead_transformer.h | 114 +-
zetasql/parser/lookahead_transformer_test.cc | 333 +-
zetasql/parser/macros/BUILD | 45 +-
zetasql/parser/macros/flex_token_provider.cc | 7 +-
zetasql/parser/macros/flex_token_provider.h | 4 +-
.../parser/macros/flex_token_provider_test.cc | 20 +-
zetasql/parser/macros/macro_expander.cc | 130 +-
zetasql/parser/macros/macro_expander.h | 3 +-
zetasql/parser/macros/macro_expander_test.cc | 805 +-
.../macros/standalone_macro_expansion.cc | 31 +-
.../macros/standalone_macro_expansion.h | 2 +-
zetasql/parser/macros/token_provider_base.h | 2 +-
zetasql/parser/macros/token_splicing_utils.cc | 10 +-
zetasql/parser/macros/token_splicing_utils.h | 6 +-
zetasql/parser/parse_tree.cc | 181 +-
.../parser/parse_tree_generated.cc.template | 26 +-
.../parser/parse_tree_generated.h.template | 4 +
zetasql/parser/parser_internal.h | 179 +-
.../parser_match_recognize_quantifier_test.cc | 304 +
zetasql/parser/parser_test.cc | 30 +
zetasql/parser/run_parser_test.cc | 24 +-
zetasql/parser/testdata/corresponding.test | 44 +
.../corresponding_all_combinations.test | 3120 +++++++-
zetasql/parser/testdata/dml_insert.test | 3 +-
.../dml_insert_on_conflict_clause.test | 728 ++
zetasql/parser/testdata/dml_merge.test | 1 -
zetasql/parser/testdata/export_data.test | 29 +
.../testdata/from_clause_join_rewrites.test | 75 +
zetasql/parser/testdata/lock_mode_ddl.test | 64 +
.../parser/testdata/lock_mode_from_where.test | 118 +
.../testdata/lock_mode_group_by_having.test | 73 +
zetasql/parser/testdata/lock_mode_join.test | 120 +
.../testdata/lock_mode_order_by_page.test | 134 +
.../testdata/lock_mode_set_operation.test | 184 +
.../parser/testdata/lock_mode_subquery.test | 216 +
zetasql/parser/testdata/lock_mode_with.test | 202 +
zetasql/parser/testdata/match_recognize.test | 2017 ++++-
.../testdata/multi_level_aggregation.test | 8 +
zetasql/parser/testdata/options.test | 132 +-
zetasql/parser/testdata/pipe_aggregate.test | 6 +-
.../testdata/pipe_aggregate_with_order.test | 171 +
zetasql/parser/testdata/pipe_lock_mode.test | 139 +
.../parser/testdata/pipe_set_operation.test | 187 +-
zetasql/parser/testdata/tvf.test | 49 +-
zetasql/parser/testdata/with.test | 2 +-
zetasql/parser/textmapper_lexer_adapter.cc | 80 +
zetasql/parser/textmapper_lexer_adapter.h | 102 +
.../parser/textmapper_lexer_adapter_test.cc | 74 +
.../{bison_token_codes.h => token_codes.h} | 20 +-
.../{macros => }/token_with_location.cc | 4 +-
.../parser/{macros => }/token_with_location.h | 18 +-
zetasql/parser/unparser.cc | 1034 ++-
zetasql/parser/unparser.h | 100 +
zetasql/parser/unparser_test.cc | 102 +
zetasql/parser/{bison_parser.y => zetasql.tm} | 5939 +++++++++------
zetasql/proto/BUILD | 19 +
zetasql/proto/placeholder_descriptor.proto | 1 +
zetasql/proto/simple_catalog.proto | 2 +
zetasql/proto/simple_property_graph.proto | 75 +
zetasql/public/BUILD | 203 +-
zetasql/public/aggregation_threshold_utils.cc | 4 +
zetasql/public/analyzer.cc | 105 +-
zetasql/public/analyzer.h | 2 +-
zetasql/public/analyzer_options.cc | 18 +-
zetasql/public/analyzer_options.h | 53 +-
zetasql/public/analyzer_options_test.cc | 3 +
zetasql/public/analyzer_output.cc | 16 +-
zetasql/public/analyzer_output.h | 10 +-
zetasql/public/anonymization_utils.cc | 164 +
zetasql/public/anonymization_utils.h | 36 +-
zetasql/public/builtin_function.cc | 4 +
zetasql/public/builtin_function.proto | 89 +-
zetasql/public/cast.cc | 114 +-
zetasql/public/cast_test.cc | 194 +
zetasql/public/catalog.cc | 70 +
zetasql/public/catalog.h | 122 +-
zetasql/public/coercer.cc | 239 +
zetasql/public/coercer.h | 17 +
zetasql/public/constant.h | 20 +
zetasql/public/convert_type_to_proto.cc | 25 +-
zetasql/public/convert_type_to_proto.h | 3 +-
zetasql/public/deprecation_warning.proto | 7 +
zetasql/public/error_helpers.cc | 57 +-
zetasql/public/error_helpers.h | 18 +
zetasql/public/error_helpers_test.cc | 33 +
zetasql/public/evaluator_base.cc | 17 +-
zetasql/public/evaluator_test.cc | 82 +-
zetasql/public/function.cc | 31 +-
zetasql/public/function.proto | 9 +
zetasql/public/function_signature.cc | 70 +-
zetasql/public/function_signature.h | 470 +-
zetasql/public/function_signature_test.cc | 23 +-
zetasql/public/function_test.cc | 5 +
zetasql/public/functions/BUILD | 7 +-
zetasql/public/functions/arithmetics.h | 3 +-
zetasql/public/functions/cast_date_time.cc | 13 +-
zetasql/public/functions/comparison_test.cc | 1 -
.../functions/convert_string_with_format.cc | 5 +-
.../functions/differential_privacy.proto | 46 +-
zetasql/public/functions/format_test.cc | 1 +
zetasql/public/functions/json_format.cc | 6 +
zetasql/public/functions/json_internal.h | 27 +-
.../public/functions/match_recognize/BUILD | 83 +
.../match_recognize/compiled_pattern.cc | 262 +
.../match_recognize/compiled_pattern.h | 97 +
.../match_recognize/compiled_pattern.proto | 65 +
.../match_recognize/compiled_pattern_test.cc | 1817 +++++
.../match_recognize/match_partition.h | 167 +
zetasql/public/functions/parse_date_time.cc | 6 +-
zetasql/public/functions/percentile.cc | 32 +-
zetasql/public/functions/percentile.h | 55 +-
zetasql/public/functions/percentile_test.cc | 59 +-
zetasql/public/functions/regexp.cc | 53 +-
zetasql/public/functions/regexp.h | 67 +-
zetasql/public/functions/regexp_test.cc | 53 +
zetasql/public/functions/string.cc | 30 +-
zetasql/public/functions/string_format.cc | 6 +
.../public/functions/string_format_test.cc | 11 +
zetasql/public/functions/to_json.cc | 106 +
zetasql/public/functions/to_json_test.cc | 126 +
zetasql/public/graph_element_value_test.cc | 358 +
zetasql/public/graph_path_value_test.cc | 373 +
zetasql/public/id_string.h | 2 +
zetasql/public/interval_value.cc | 10 +-
zetasql/public/json_value_test.cc | 36 +
zetasql/public/language_options.cc | 17 +-
zetasql/public/language_options.h | 2 +-
zetasql/public/language_options_test.cc | 7 +
zetasql/public/measure_expression.cc | 190 +
zetasql/public/measure_expression.h | 89 +
zetasql/public/measure_expression_test.cc | 221 +
zetasql/public/multi_catalog.cc | 95 +-
zetasql/public/multi_catalog.h | 31 +-
zetasql/public/non_sql_function.h | 2 +
zetasql/public/options.proto | 159 +-
zetasql/public/parse_helpers.cc | 3 +
zetasql/public/parse_tokens.cc | 32 +-
zetasql/public/property_graph.h | 404 +
zetasql/public/proto/type_annotation.proto | 7 +
zetasql/public/proto_util.cc | 7 +-
zetasql/public/proto_value_conversion.cc | 37 +-
zetasql/public/proto_value_conversion_test.cc | 6 +-
zetasql/public/signature_match_result.h | 2 +-
zetasql/public/simple_catalog.cc | 205 +-
zetasql/public/simple_catalog.h | 64 +-
zetasql/public/simple_catalog_util.cc | 234 +
zetasql/public/simple_catalog_util.h | 30 +
zetasql/public/simple_property_graph.cc | 855 +++
zetasql/public/simple_property_graph.h | 401 +
zetasql/public/simple_table.proto | 2 +
zetasql/public/simple_token_list.cc | 107 +-
zetasql/public/simple_token_list.h | 196 +-
zetasql/public/simple_token_list.proto | 16 +-
zetasql/public/simple_token_list_test.cc | 358 +
zetasql/public/simple_value.proto | 1 -
zetasql/public/sql_constant.cc | 71 +
zetasql/public/sql_constant.h | 134 +
zetasql/public/strings_test.cc | 24 +-
zetasql/public/table_name_resolver.cc | 66 +-
zetasql/public/table_name_resolver_test.cc | 98 +
zetasql/public/table_valued_function.cc | 13 +
zetasql/public/table_valued_function.h | 8 +-
zetasql/public/templated_sql_function_test.cc | 5 +
zetasql/public/templated_sql_tvf.cc | 8 +-
zetasql/public/testing/BUILD | 5 +-
.../public/testing/test_case_options_util.cc | 4 +
.../public/testing/test_case_options_util.h | 1 +
zetasql/public/timestamp_pico_value.cc | 99 +
zetasql/public/timestamp_pico_value.h | 142 +
zetasql/public/timestamp_pico_value_test.cc | 137 +
zetasql/public/token_list_util.cc | 28 +-
zetasql/public/token_list_util.h | 11 +-
zetasql/public/token_list_util_test.cc | 54 +
zetasql/public/type.proto | 44 +-
zetasql/public/types/BUILD | 54 +
zetasql/public/types/array_type.cc | 6 +-
zetasql/public/types/enum_type.cc | 19 +-
zetasql/public/types/enum_type.h | 7 +-
zetasql/public/types/enum_type_test.cc | 6 +-
zetasql/public/types/graph_element_type.cc | 528 ++
zetasql/public/types/graph_element_type.h | 265 +
.../public/types/graph_element_type_test.cc | 504 ++
zetasql/public/types/graph_path_type.cc | 359 +
zetasql/public/types/graph_path_type.h | 153 +
zetasql/public/types/graph_path_type_test.cc | 467 ++
zetasql/public/types/map_type.cc | 111 +-
zetasql/public/types/map_type.h | 7 +
zetasql/public/types/map_type_test.cc | 29 +
zetasql/public/types/measure_type.cc | 161 +
zetasql/public/types/measure_type.h | 129 +
zetasql/public/types/measure_type_test.cc | 131 +
zetasql/public/types/proto_type.cc | 10 +-
zetasql/public/types/proto_type.h | 32 +-
zetasql/public/types/range_type.cc | 6 +-
zetasql/public/types/simple_type.cc | 76 +-
zetasql/public/types/simple_type.h | 1 -
zetasql/public/types/struct_type.cc | 6 +-
zetasql/public/types/struct_type.h | 2 +-
zetasql/public/types/type.cc | 48 +
zetasql/public/types/type.h | 14 +
zetasql/public/types/type_deserializer.cc | 68 +
zetasql/public/types/type_deserializer.h | 2 +
zetasql/public/types/type_factory.cc | 182 +-
zetasql/public/types/type_factory.h | 74 +-
zetasql/public/types/value_representations.h | 38 +-
zetasql/public/uuid_value.cc | 30 +-
zetasql/public/uuid_value.h | 6 +
zetasql/public/uuid_value_test.cc | 15 +
zetasql/public/value.cc | 182 +-
zetasql/public/value.h | 145 +
zetasql/public/value.proto | 20 +
zetasql/public/value_inl.h | 296 +-
zetasql/public/value_test.cc | 376 +-
zetasql/reference_impl/BUILD | 38 +-
zetasql/reference_impl/aggregate_op.cc | 1544 ++--
zetasql/reference_impl/algebrizer.cc | 666 +-
zetasql/reference_impl/algebrizer.h | 155 +-
zetasql/reference_impl/algebrizer_graph.cc | 1189 +++
zetasql/reference_impl/evaluation.cc | 5 -
zetasql/reference_impl/evaluation.h | 3 -
zetasql/reference_impl/expected_errors.cc | 8 +
zetasql/reference_impl/function.cc | 238 +-
zetasql/reference_impl/function.h | 39 +-
zetasql/reference_impl/functions/BUILD | 41 +
zetasql/reference_impl/functions/graph.cc | 710 ++
zetasql/reference_impl/functions/graph.h | 27 +
zetasql/reference_impl/functions/hash.cc | 8 +
zetasql/reference_impl/functions/json.cc | 41 +
zetasql/reference_impl/functions/like.cc | 1 +
zetasql/reference_impl/functions/map.cc | 173 +
zetasql/reference_impl/functions/range.cc | 5 +-
.../reference_impl/functions/register_all.cc | 3 +
.../functions/string_with_collation.cc | 1 +
zetasql/reference_impl/functions/uuid.cc | 5 +
zetasql/reference_impl/operator.cc | 21 +-
zetasql/reference_impl/operator.h | 586 +-
zetasql/reference_impl/pattern_matching_op.cc | 584 ++
.../pattern_matching_op_test.cc | 261 +
zetasql/reference_impl/reference_driver.cc | 44 +-
zetasql/reference_impl/reference_driver.h | 5 +
...e_impl_all_rewrites_known_errors.textproto | 6 +
zetasql/reference_impl/relational_op.cc | 1222 +++
zetasql/reference_impl/statement_evaluator.cc | 22 +-
zetasql/reference_impl/statement_evaluator.h | 9 +-
zetasql/reference_impl/tuple.h | 6 +-
zetasql/reference_impl/tuple_comparator.cc | 7 +-
zetasql/reference_impl/value_expr.cc | 690 +-
zetasql/reference_impl/value_expr_test.cc | 254 +-
zetasql/resolved_ast/BUILD | 8 +-
zetasql/resolved_ast/gen_resolved_ast.py | 2038 ++++-
zetasql/resolved_ast/query_expression.cc | 38 +-
zetasql/resolved_ast/query_expression.h | 10 +
zetasql/resolved_ast/resolved_ast.cc.template | 217 +-
zetasql/resolved_ast/resolved_ast.md.template | 13 +-
...resolved_ast_deep_copy_visitor.cc.template | 4 +
zetasql/resolved_ast/resolved_ast_enums.proto | 86 +
zetasql/resolved_ast/resolved_ast_helper.cc | 12 +
zetasql/resolved_ast/resolved_ast_helper.h | 3 +
zetasql/resolved_ast/resolved_node.cc | 44 +
zetasql/resolved_ast/rewrite_utils.cc | 488 +-
zetasql/resolved_ast/rewrite_utils.h | 140 +
zetasql/resolved_ast/rewrite_utils_test.cc | 114 +
zetasql/resolved_ast/serialization.proto | 26 +
zetasql/resolved_ast/sql_builder.cc | 2363 +++++-
zetasql/resolved_ast/sql_builder.h | 179 +-
zetasql/resolved_ast/target_syntax.h | 12 +
zetasql/resolved_ast/validator.cc | 1810 ++++-
zetasql/resolved_ast/validator.h | 229 +-
zetasql/resolved_ast/validator_test.cc | 779 +-
zetasql/scripting/BUILD | 3 +
zetasql/scripting/parsed_script.cc | 2 +-
zetasql/scripting/parsed_script.h | 14 +-
zetasql/scripting/parsed_script_test.cc | 5 +-
zetasql/scripting/script_executor.cc | 8 +
zetasql/scripting/script_executor.h | 4 +
zetasql/scripting/script_executor_impl.cc | 34 +-
zetasql/scripting/script_executor_impl.h | 15 +-
zetasql/scripting/variable.proto | 1 -
zetasql/testdata/BUILD | 19 +
zetasql/testdata/populate_sample_tables.cc | 58 +-
zetasql/testdata/sample_catalog.cc | 1 +
zetasql/testdata/sample_catalog_impl.cc | 1111 ++-
zetasql/testdata/sample_catalog_impl.h | 15 +
.../sample_catalog_property_graph_test.cc | 299 +
zetasql/testdata/sample_system_variables.cc | 2 +
zetasql/testing/test_function.cc | 29 +-
zetasql/testing/test_function.h | 14 +-
zetasql/testing/test_value.cc | 71 +
zetasql/testing/test_value.h | 40 +
zetasql/testing/using_test_value.cc | 6 +
zetasql/tools/execute_query/BUILD | 60 +
.../tools/execute_query/execute_query_loop.cc | 22 -
.../tools/execute_query/execute_query_loop.h | 1 -
.../execute_query_proto_writer.cc | 7 +
.../execute_query_proto_writer.h | 3 +
.../tools/execute_query/execute_query_tool.cc | 488 +-
.../tools/execute_query/execute_query_tool.h | 64 +-
.../execute_query/execute_query_tool_test.cc | 52 +-
.../tools/execute_query/execute_query_web.cc | 15 +-
.../execute_query_web_handler.cc | 102 +-
.../execute_query/execute_query_web_handler.h | 25 +-
.../execute_query_web_handler_test.cc | 258 +-
.../execute_query/execute_query_web_writer.h | 34 +-
.../tools/execute_query/selectable_catalog.cc | 3 +-
.../tools/execute_query/selectable_catalog.h | 3 +-
.../execute_query/selectable_catalog_test.cc | 7 +-
.../testdata/execute_query_tool.test | 677 +-
.../testdata/execute_query_web_handler.test | 230 +
.../execute_query/value_as_table_adapter.cc | 78 +
.../execute_query/value_as_table_adapter.h | 59 +
.../value_as_table_adapter_test.cc | 133 +
.../tools/execute_query/web/page_body.html | 50 +-
.../execute_query/web/page_template.html | 1 +
zetasql/tools/execute_query/web/style.css | 77 +-
zetasql/tools/formatter/internal/chunk.cc | 43 +-
.../internal/chunk_grouping_strategy.cc | 15 +-
.../formatter/internal/fusible_tokens.cc | 112 +-
.../tools/formatter/internal/fusible_tokens.h | 2 +-
zetasql/tools/formatter/internal/token.h | 3 +
711 files changed, 196848 insertions(+), 17044 deletions(-)
create mode 100644 bazel/grpc_cf_engine.patch
create mode 100644 docs/graph-conditional-expressions.md
create mode 100644 docs/graph-data-types.md
create mode 100644 docs/graph-gql-functions.md
create mode 100644 docs/graph-intro.md
create mode 100644 docs/graph-operators.md
create mode 100644 docs/graph-patterns.md
create mode 100644 docs/graph-query-statements.md
create mode 100644 docs/graph-schema-statements.md
create mode 100644 docs/graph-sql-queries.md
create mode 100644 docs/graph-subqueries.md
delete mode 100644 docs/modules.md
create mode 100644 java/com/google/zetasql/GraphEdgeTable.java
create mode 100644 java/com/google/zetasql/GraphElementLabel.java
create mode 100644 java/com/google/zetasql/GraphElementTable.java
create mode 100644 java/com/google/zetasql/GraphElementType.java
create mode 100644 java/com/google/zetasql/GraphNodeTable.java
create mode 100644 java/com/google/zetasql/GraphNodeTableReference.java
create mode 100644 java/com/google/zetasql/GraphPathType.java
create mode 100644 java/com/google/zetasql/GraphPropertyDeclaration.java
create mode 100644 java/com/google/zetasql/GraphPropertyDefinition.java
create mode 100644 java/com/google/zetasql/MeasureType.java
create mode 100644 java/com/google/zetasql/PropertyGraph.java
create mode 100644 java/com/google/zetasql/SimpleGraphEdgeTable.java
create mode 100644 java/com/google/zetasql/SimpleGraphElementLabel.java
create mode 100644 java/com/google/zetasql/SimpleGraphElementTable.java
create mode 100644 java/com/google/zetasql/SimpleGraphNodeTable.java
create mode 100644 java/com/google/zetasql/SimpleGraphNodeTableReference.java
create mode 100644 java/com/google/zetasql/SimpleGraphPropertyDeclaration.java
create mode 100644 java/com/google/zetasql/SimpleGraphPropertyDefinition.java
create mode 100644 java/com/google/zetasql/SimplePropertyGraph.java
create mode 100644 javatests/com/google/zetasql/MeasureTypeTest.java
create mode 100644 zetasql/analyzer/graph_label_expr_resolver.cc
create mode 100644 zetasql/analyzer/graph_label_expr_resolver.h
create mode 100644 zetasql/analyzer/graph_query_resolver.cc
create mode 100644 zetasql/analyzer/graph_query_resolver.h
create mode 100644 zetasql/analyzer/graph_stmt_resolver.cc
create mode 100644 zetasql/analyzer/graph_stmt_resolver.h
create mode 100644 zetasql/analyzer/rewriters/pipe_if_rewriter.cc
create mode 100644 zetasql/analyzer/rewriters/pipe_if_rewriter.h
create mode 100644 zetasql/analyzer/rewriters/pipe_if_rewriter_test.cc
create mode 100644 zetasql/analyzer/rewriters/privacy/approx_count_distinct_utility.cc
create mode 100644 zetasql/analyzer/rewriters/privacy/approx_count_distinct_utility.h
create mode 100644 zetasql/analyzer/rewriters/rewrite_subpipeline.cc
create mode 100644 zetasql/analyzer/rewriters/rewrite_subpipeline.h
create mode 100644 zetasql/analyzer/testdata/create_property_graph.test
create mode 100644 zetasql/analyzer/testdata/dml_insert_on_conflict_clause.test
create mode 100644 zetasql/analyzer/testdata/graph_access_tracking.test
create mode 100644 zetasql/analyzer/testdata/graph_correlated.test
create mode 100644 zetasql/analyzer/testdata/graph_edge_pattern.test
create mode 100644 zetasql/analyzer/testdata/graph_expressions.test
create mode 100644 zetasql/analyzer/testdata/graph_gql_aggregation.test
create mode 100644 zetasql/analyzer/testdata/graph_gql_composite_query1.test
create mode 100644 zetasql/analyzer/testdata/graph_gql_composite_query2.test
create mode 100644 zetasql/analyzer/testdata/graph_gql_for.test
create mode 100644 zetasql/analyzer/testdata/graph_gql_order_by_and_page.test
create mode 100644 zetasql/analyzer/testdata/graph_gql_window_function.test
create mode 100644 zetasql/analyzer/testdata/graph_horizontal_aggregation.test
create mode 100644 zetasql/analyzer/testdata/graph_label.test
create mode 100644 zetasql/analyzer/testdata/graph_node_pattern.test
create mode 100644 zetasql/analyzer/testdata/graph_optional_columns.test
create mode 100644 zetasql/analyzer/testdata/graph_parenthesized_path_pattern.test
create mode 100644 zetasql/analyzer/testdata/graph_path_functions.test
create mode 100644 zetasql/analyzer/testdata/graph_path_mode.test
create mode 100644 zetasql/analyzer/testdata/graph_path_pattern.test
create mode 100644 zetasql/analyzer/testdata/graph_path_search_prefix.test
create mode 100644 zetasql/analyzer/testdata/graph_path_variable.test
create mode 100644 zetasql/analyzer/testdata/graph_property_specification.test
create mode 100644 zetasql/analyzer/testdata/graph_quantified_path_pattern_test1.test
create mode 100644 zetasql/analyzer/testdata/graph_quantified_path_pattern_test2.test
create mode 100644 zetasql/analyzer/testdata/graph_quantified_path_pattern_test3.test
create mode 100644 zetasql/analyzer/testdata/graph_quantified_pattern_gql_extended.test
create mode 100644 zetasql/analyzer/testdata/graph_query_hints.test
create mode 100644 zetasql/analyzer/testdata/graph_query_statement.test
create mode 100644 zetasql/analyzer/testdata/graph_subquery.test
create mode 100644 zetasql/analyzer/testdata/graph_table.test
create mode 100644 zetasql/analyzer/testdata/graph_table_gql_extended.test
create mode 100644 zetasql/analyzer/testdata/lock_mode.test
create mode 100644 zetasql/analyzer/testdata/match_recognize_with_multilevel_aggregation.test
delete mode 100644 zetasql/analyzer/testdata/multi_level_aggregation.test
create mode 100644 zetasql/analyzer/testdata/multi_level_aggregation_basic.test
create mode 100644 zetasql/analyzer/testdata/multi_level_aggregation_complex.test
create mode 100644 zetasql/analyzer/testdata/multi_level_aggregation_errors.test
create mode 100644 zetasql/analyzer/testdata/multi_level_aggregation_not_yet_supported.test
create mode 100644 zetasql/analyzer/testdata/multi_level_aggregation_udas_not_yet_supported.test
create mode 100644 zetasql/analyzer/testdata/sql_builder_lock_mode.test
delete mode 100644 zetasql/base/mathlimits.cc
delete mode 100644 zetasql/base/mathlimits.h
delete mode 100644 zetasql/base/mathlimits_test.cc
create mode 100644 zetasql/common/builtin_function_graph.cc
create mode 100644 zetasql/common/graph_element_utils.cc
create mode 100644 zetasql/common/graph_element_utils.h
create mode 100644 zetasql/common/graph_element_utils_test.cc
create mode 100644 zetasql/common/match_recognize/BUILD
create mode 100644 zetasql/common/match_recognize/compiled_nfa.cc
create mode 100644 zetasql/common/match_recognize/compiled_nfa.h
create mode 100644 zetasql/common/match_recognize/compiled_nfa_test.cc
create mode 100644 zetasql/common/match_recognize/edge_matchers.cc
create mode 100644 zetasql/common/match_recognize/edge_matchers.h
create mode 100644 zetasql/common/match_recognize/edge_tracker.cc
create mode 100644 zetasql/common/match_recognize/edge_tracker.h
create mode 100644 zetasql/common/match_recognize/edge_tracker_test.cc
create mode 100644 zetasql/common/match_recognize/epsilon_remover.cc
create mode 100644 zetasql/common/match_recognize/epsilon_remover.h
create mode 100644 zetasql/common/match_recognize/epsilon_remover_test.cc
create mode 100644 zetasql/common/match_recognize/match_test_result.proto
create mode 100644 zetasql/common/match_recognize/nfa.cc
create mode 100644 zetasql/common/match_recognize/nfa.h
create mode 100644 zetasql/common/match_recognize/nfa_builder.cc
create mode 100644 zetasql/common/match_recognize/nfa_builder.h
create mode 100644 zetasql/common/match_recognize/nfa_builder_test.cc
create mode 100644 zetasql/common/match_recognize/nfa_match_partition.cc
create mode 100644 zetasql/common/match_recognize/nfa_match_partition.h
create mode 100644 zetasql/common/match_recognize/nfa_matchers.cc
create mode 100644 zetasql/common/match_recognize/nfa_matchers.h
create mode 100644 zetasql/common/match_recognize/nfa_matchers_test.cc
create mode 100644 zetasql/common/match_recognize/nfa_test.cc
create mode 100644 zetasql/common/match_recognize/row_edge_list.cc
create mode 100644 zetasql/common/match_recognize/row_edge_list.h
create mode 100644 zetasql/common/match_recognize/row_edge_list_test.cc
create mode 100644 zetasql/common/match_recognize/test_matcher.cc
create mode 100644 zetasql/common/match_recognize/test_matcher.h
create mode 100644 zetasql/common/match_recognize/test_pattern_resolver.cc
create mode 100644 zetasql/common/match_recognize/test_pattern_resolver.h
create mode 100644 zetasql/common/match_recognize/test_pattern_resolver_test.cc
create mode 100644 zetasql/compliance/functions_testlib_uuid.cc
create mode 100644 zetasql/compliance/testdata/graph_functions.test
create mode 100644 zetasql/compliance/testdata/graph_query_statement.test
create mode 100644 zetasql/compliance/testdata/graph_table.test
create mode 100644 zetasql/compliance/testdata/graph_table_gql_extended.test
create mode 100644 zetasql/compliance/testdata/match_recognize.test
create mode 100644 zetasql/compliance/testdata/multi_level_aggregation_basic.test
create mode 100644 zetasql/compliance/testdata/multi_level_aggregation_complex.test
delete mode 100644 zetasql/jdk/BUILD
create mode 100644 zetasql/parser/parser_match_recognize_quantifier_test.cc
create mode 100644 zetasql/parser/testdata/dml_insert_on_conflict_clause.test
create mode 100644 zetasql/parser/testdata/lock_mode_ddl.test
create mode 100644 zetasql/parser/testdata/lock_mode_from_where.test
create mode 100644 zetasql/parser/testdata/lock_mode_group_by_having.test
create mode 100644 zetasql/parser/testdata/lock_mode_join.test
create mode 100644 zetasql/parser/testdata/lock_mode_order_by_page.test
create mode 100644 zetasql/parser/testdata/lock_mode_set_operation.test
create mode 100644 zetasql/parser/testdata/lock_mode_subquery.test
create mode 100644 zetasql/parser/testdata/lock_mode_with.test
create mode 100644 zetasql/parser/testdata/pipe_lock_mode.test
create mode 100644 zetasql/parser/textmapper_lexer_adapter.cc
create mode 100644 zetasql/parser/textmapper_lexer_adapter.h
create mode 100644 zetasql/parser/textmapper_lexer_adapter_test.cc
rename zetasql/parser/{bison_token_codes.h => token_codes.h} (58%)
rename zetasql/parser/{macros => }/token_with_location.cc (90%)
rename zetasql/parser/{macros => }/token_with_location.h (88%)
rename zetasql/parser/{bison_parser.y => zetasql.tm} (73%)
create mode 100644 zetasql/proto/simple_property_graph.proto
create mode 100644 zetasql/public/functions/match_recognize/BUILD
create mode 100644 zetasql/public/functions/match_recognize/compiled_pattern.cc
create mode 100644 zetasql/public/functions/match_recognize/compiled_pattern.h
create mode 100644 zetasql/public/functions/match_recognize/compiled_pattern.proto
create mode 100644 zetasql/public/functions/match_recognize/compiled_pattern_test.cc
create mode 100644 zetasql/public/functions/match_recognize/match_partition.h
create mode 100644 zetasql/public/graph_element_value_test.cc
create mode 100644 zetasql/public/graph_path_value_test.cc
create mode 100644 zetasql/public/measure_expression.cc
create mode 100644 zetasql/public/measure_expression.h
create mode 100644 zetasql/public/measure_expression_test.cc
create mode 100644 zetasql/public/property_graph.h
create mode 100644 zetasql/public/simple_property_graph.cc
create mode 100644 zetasql/public/simple_property_graph.h
create mode 100644 zetasql/public/simple_token_list_test.cc
create mode 100644 zetasql/public/sql_constant.cc
create mode 100644 zetasql/public/sql_constant.h
create mode 100644 zetasql/public/timestamp_pico_value.cc
create mode 100644 zetasql/public/timestamp_pico_value.h
create mode 100644 zetasql/public/timestamp_pico_value_test.cc
create mode 100644 zetasql/public/token_list_util_test.cc
create mode 100644 zetasql/public/types/graph_element_type.cc
create mode 100644 zetasql/public/types/graph_element_type.h
create mode 100644 zetasql/public/types/graph_element_type_test.cc
create mode 100644 zetasql/public/types/graph_path_type.cc
create mode 100644 zetasql/public/types/graph_path_type.h
create mode 100644 zetasql/public/types/graph_path_type_test.cc
create mode 100644 zetasql/public/types/measure_type.cc
create mode 100644 zetasql/public/types/measure_type.h
create mode 100644 zetasql/public/types/measure_type_test.cc
create mode 100644 zetasql/reference_impl/algebrizer_graph.cc
create mode 100644 zetasql/reference_impl/functions/graph.cc
create mode 100644 zetasql/reference_impl/functions/graph.h
create mode 100644 zetasql/reference_impl/pattern_matching_op.cc
create mode 100644 zetasql/reference_impl/pattern_matching_op_test.cc
create mode 100644 zetasql/testdata/sample_catalog_property_graph_test.cc
create mode 100644 zetasql/tools/execute_query/testdata/execute_query_web_handler.test
create mode 100644 zetasql/tools/execute_query/value_as_table_adapter.cc
create mode 100644 zetasql/tools/execute_query/value_as_table_adapter.h
create mode 100644 zetasql/tools/execute_query/value_as_table_adapter_test.cc
diff --git a/.bazelrc b/.bazelrc
index bb2613f3a..9a99e4c05 100644
--- a/.bazelrc
+++ b/.bazelrc
@@ -22,6 +22,7 @@
# suppression you can invoke with --config=clang
build:clang --action_env=BAZEL_CXXOPTS="-std=c++17"
build:clang --cxxopt=-std=c++17
+build:clang --cxxopt=-Wno-module-import-in-extern-c
build:clang --cxxopt=-Wno-deprecated-declarations
build:clang --cxxopt=-Wno-range-loop-analysis
build:clang --cxxopt=-Wno-inconsistent-missing-override
@@ -54,6 +55,8 @@ build:g++ --cxxopt=-Wno-stringop-truncation
# C++17 is required to build ZetaSQL, hence `-cxxopt=-std=c++17`. On MacOS
# `--host_cxxopt=-std=c++17` is also needed.
-build --cxxopt=-std=c++17 --host_cxxopt=-std=c++17
-run --cxxopt=-std=c++17 --host_cxxopt=-std=c++17
-test --cxxopt=-std=c++17 --host_cxxopt=-std=c++17
+# "--enable_bzlmod=false" is required because grpc does not support bzlmod yet.
+# "--java_runtime_version=remotejdk_11": to make the build more hermetic.
+build --cxxopt=-std=c++17 --host_cxxopt=-std=c++17 --config=clang --enable_bzlmod=false --java_runtime_version=remotejdk_11
+run --cxxopt=-std=c++17 --host_cxxopt=-std=c++17 --config=clang --enable_bzlmod=false --java_runtime_version=remotejdk_11
+test --cxxopt=-std=c++17 --host_cxxopt=-std=c++17 --config=clang --enable_bzlmod=false --java_runtime_version=remotejdk_11
diff --git a/.bazelversion b/.bazelversion
index f22d756da..b26a34e47 100644
--- a/.bazelversion
+++ b/.bazelversion
@@ -1 +1 @@
-6.5.0
+7.2.1
diff --git a/Dockerfile b/Dockerfile
index d0721b8a5..4038e4350 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -8,27 +8,24 @@ FROM ubuntu:18.04 as build
RUN apt-get update && apt-get -qq install -y default-jre default-jdk
# Install prerequisites for bazel
-RUN apt-get update && apt-get -qq install curl tar build-essential wget \
- python python3 zip unzip
-
-ENV BAZEL_VERSION=6.5.0
-
-# Install bazel from source
-RUN mkdir -p bazel && \
- cd bazel && \
- wget https://github.com/bazelbuild/bazel/releases/download/${BAZEL_VERSION}/bazel-${BAZEL_VERSION}-dist.zip &&\
- unzip bazel-${BAZEL_VERSION}-dist.zip && \
- rm -rf bazel-${BAZEL_VERSION}-dist.zip
-ENV PATH=$PATH:/usr/bin:/usr/local/bin
-ENV EXTRA_BAZEL_ARGS="--tool_java_runtime_version=local_jdk"
-RUN cd bazel && bash ./compile.sh
-RUN cp /bazel/output/bazel /usr/local/bin
+RUN apt-get -qq install curl tar build-essential wget python python3 zip unzip
+
+ENV BAZEL_VERSION=7.2.1
+
+RUN apt install apt-transport-https curl gnupg -y
+RUN curl -fsSL https://bazel.build/bazel-release.pub.gpg | gpg --dearmor >bazel-archive-keyring.gpg
+RUN mv bazel-archive-keyring.gpg /usr/share/keyrings
+RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/bazel-archive-keyring.gpg] https://storage.googleapis.com/bazel-apt stable jdk1.8" | tee /etc/apt/sources.list.d/bazel.list
+
+RUN apt update && apt -qq install -y bazel-${BAZEL_VERSION}
+RUN ln -s /usr/bin/bazel-${BAZEL_VERSION} /usr/bin/bazel
RUN apt-get update && DEBIAN_FRONTEND="noninteractive" \
TZ="America/Los_Angeles" apt-get install -y tzdata
# Unfortunately ZetaSQL has issues with clang (default bazel compiler), so
# we install GCC. Also install make for rules_foreign_cc bazel rules.
+RUN apt-get -qq install -y software-properties-common make rename git ca-certificates libgnutls30
RUN apt-get -qq install -y software-properties-common
RUN add-apt-repository ppa:ubuntu-toolchain-r/test && \
apt-get -qq update && \
@@ -50,8 +47,6 @@ RUN useradd -ms /bin/bash zetasql
RUN chown -R zetasql:zetasql /zetasql
USER zetasql
-ENV BAZEL_ARGS="--config=g++"
-
ENV HOME=/home/zetasql
RUN mkdir -p $HOME/bin
diff --git a/README.md b/README.md
index 463123188..4b6d895f1 100644
--- a/README.md
+++ b/README.md
@@ -7,7 +7,7 @@ ZetaSQL is not itself a database or query engine. Instead,
it's intended to be used by multiple engines, to provide consistent
language and behavior (name resolution, type checking, implicit
casting, etc.). Specific query engines may implement a subset of features,
-giving errors for unuspported features.
+giving errors for unsupported features.
ZetaSQL's compliance test suite can be used to validate query engine
implementations are correct and consistent.
diff --git a/WORKSPACE b/WORKSPACE
index d355d13fd..994ce0109 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -37,6 +37,14 @@ workspace(name = "com_google_zetasql")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+http_archive(
+ name = "toolchains_llvm",
+ canonical_id = "1.0.0",
+ sha256 = "e91c4361f99011a54814e1afbe5c436e0d329871146a3cd58c23a2b4afb50737",
+ strip_prefix = "toolchains_llvm-1.0.0",
+ url = "https://github.com/bazel-contrib/toolchains_llvm/releases/download/1.0.0/toolchains_llvm-1.0.0.tar.gz",
+)
+
http_archive(
name = "rules_jvm_external",
sha256 = "b17d7388feb9bfa7f2fa09031b32707df529f26c91ab9e5d909eb1676badd9a6",
@@ -47,9 +55,9 @@ http_archive(
# gRPC Java
http_archive(
name = "io_grpc_grpc_java",
- url = "https://github.com/grpc/grpc-java/archive/v1.56.0.tar.gz",
- strip_prefix = "grpc-java-1.56.0",
- sha256 = "4af5ecbaed16455fcda9fdab36e131696f5092858dd130f026069fcf11817a21",
+ sha256 = "301e0de87c7659cc790bd2a7265970a71632d55773128c98768385091c0a1a97",
+ strip_prefix = "grpc-java-1.61.0",
+ url = "https://github.com/grpc/grpc-java/archive/v1.61.0.zip",
)
load("@rules_jvm_external//:repositories.bzl", "rules_jvm_external_deps")
diff --git a/bazel/grpc_cf_engine.patch b/bazel/grpc_cf_engine.patch
new file mode 100644
index 000000000..e26c2d864
--- /dev/null
+++ b/bazel/grpc_cf_engine.patch
@@ -0,0 +1,10 @@
+--- src/core/BUILD
++++ src/core/BUILD
+@@ -2563,6 +2563,7 @@
+ external_deps = [
+ "absl/container:flat_hash_map",
+ "absl/log:check",
++ "absl/status",
+ "absl/strings",
+ "absl/strings:str_format",
+ ],
diff --git a/bazel/grpc_extra_deps.patch b/bazel/grpc_extra_deps.patch
index 4b326f2b6..771761b36 100644
--- a/bazel/grpc_extra_deps.patch
+++ b/bazel/grpc_extra_deps.patch
@@ -7,7 +7,7 @@ index 4d8afa3131..b42224501f 100644
api_dependencies()
- go_rules_dependencies()
-- go_register_toolchains(version = "1.18")
+- go_register_toolchains(version = "1.20")
- gazelle_dependencies()
-
# Pull-in the go 3rd party dependencies for protoc_gen_validate, which is
diff --git a/bazel/icu.BUILD b/bazel/icu.BUILD
index e85da666c..be36d7de9 100644
--- a/bazel/icu.BUILD
+++ b/bazel/icu.BUILD
@@ -35,17 +35,20 @@ filegroup(
configure_make(
name = "icu",
configure_command = "source/configure",
- env = select({
- "@platforms//os:macos": {
- "AR": "",
- "CXXFLAGS": "-fPIC", # For JNI
- "CFLAGS": "-fPIC", # For JNI
- },
- "//conditions:default": {
- "CXXFLAGS": "-fPIC", # For JNI
- "CFLAGS": "-fPIC", # For JNI
- },
+ args = select({
+ # AR is overridden to be libtool by rules_foreign_cc. It does not support ar style arguments
+ # like "r". We need to prevent the icu make rules from adding unsupported parameters by
+ # forcing ARFLAGS to keep the rules_foreign_cc value in this parameter
+ "@platforms//os:macos": [
+ "ARFLAGS=\"-static -o\"",
+ "MAKE=gnumake",
+ ],
+ "//conditions:default": [],
}),
+ env = {
+ "CXXFLAGS": "-fPIC", # For JNI
+ "CFLAGS": "-fPIC", # For JNI
+ },
configure_options = [
"--enable-option-checking",
"--enable-static",
diff --git a/bazel/icu4c-64_2.patch b/bazel/icu4c-64_2.patch
index a23bdcafc..69d12b63f 100644
--- a/bazel/icu4c-64_2.patch
+++ b/bazel/icu4c-64_2.patch
@@ -5,7 +5,7 @@
CXX = @CXX@
AR = @AR@
-ARFLAGS = @ARFLAGS@ r
-+ARFLAGS = @ARFLAGS@
++ARFLAGS = @ARFLAGS@ -crs
RANLIB = @RANLIB@
COMPILE_LINK_ENVVAR = @COMPILE_LINK_ENVVAR@
UCLN_NO_AUTO_CLEANUP = @UCLN_NO_AUTO_CLEANUP@
diff --git a/bazel/maven_install.json b/bazel/maven_install.json
index acc195f38..725928dd8 100644
--- a/bazel/maven_install.json
+++ b/bazel/maven_install.json
@@ -1,19 +1,11 @@
{
"dependency_tree": {
"__AUTOGENERATED_FILE_DO_NOT_MODIFY_THIS_FILE_MANUALLY": "THERE_IS_NO_DATA_ONLY_ZUUL",
- "__INPUT_ARTIFACTS_HASH": 71245436,
- "__RESOLVED_ARTIFACTS_HASH": -255807762,
+ "__INPUT_ARTIFACTS_HASH": 150039912,
+ "__RESOLVED_ARTIFACTS_HASH": 374395340,
"conflict_resolution": {
- "com.google.api.grpc:proto-google-common-protos:2.17.0": "com.google.api.grpc:proto-google-common-protos:2.21.0",
- "com.google.code.gson:gson:2.8.9": "com.google.code.gson:gson:2.10.1",
- "com.google.errorprone:error_prone_annotations:2.11.0": "com.google.errorprone:error_prone_annotations:2.18.0",
- "com.google.guava:guava:31.1-android": "com.google.guava:guava:32.1.1-jre",
- "com.google.protobuf:protobuf-java:3.23.1": "com.google.protobuf:protobuf-java:3.23.2",
- "com.google.truth:truth:1.0.1": "com.google.truth:truth:1.1.3",
- "io.netty:netty-common:4.1.34.Final": "io.netty:netty-common:4.1.87.Final",
- "io.netty:netty-transport:4.1.34.Final": "io.netty:netty-transport:4.1.87.Final",
- "io.opencensus:opencensus-api:0.21.0": "io.opencensus:opencensus-api:0.31.0",
- "io.opencensus:opencensus-contrib-grpc-metrics:0.21.0": "io.opencensus:opencensus-contrib-grpc-metrics:0.31.0"
+ "com.google.guava:guava:32.1.3-android": "com.google.guava:guava:32.1.3-jre",
+ "com.google.protobuf:protobuf-java:3.23.1": "com.google.protobuf:protobuf-java:3.25.1"
},
"dependencies": [
{
@@ -45,17 +37,17 @@
"url": "https://repo1.maven.org/maven2/com/google/android/annotations/4.1.1.4/annotations-4.1.1.4-sources.jar"
},
{
- "coord": "com.google.api.grpc:proto-google-common-protos:2.21.0",
+ "coord": "com.google.api.grpc:proto-google-common-protos:2.29.0",
"dependencies": [
- "com.google.protobuf:protobuf-java:3.23.2"
+ "com.google.protobuf:protobuf-java:3.25.1"
],
"directDependencies": [
- "com.google.protobuf:protobuf-java:3.23.2"
+ "com.google.protobuf:protobuf-java:3.25.1"
],
- "file": "v1/https/repo1.maven.org/maven2/com/google/api/grpc/proto-google-common-protos/2.21.0/proto-google-common-protos-2.21.0.jar",
+ "file": "v1/https/repo1.maven.org/maven2/com/google/api/grpc/proto-google-common-protos/2.29.0/proto-google-common-protos-2.29.0.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/com/google/api/grpc/proto-google-common-protos/2.21.0/proto-google-common-protos-2.21.0.jar",
- "https://repo.maven.apache.org/maven2/com/google/api/grpc/proto-google-common-protos/2.21.0/proto-google-common-protos-2.21.0.jar"
+ "https://repo1.maven.org/maven2/com/google/api/grpc/proto-google-common-protos/2.29.0/proto-google-common-protos-2.29.0.jar",
+ "https://repo.maven.apache.org/maven2/com/google/api/grpc/proto-google-common-protos/2.29.0/proto-google-common-protos-2.29.0.jar"
],
"packages": [
"com.google.api",
@@ -69,25 +61,25 @@
"com.google.rpc.context",
"com.google.type"
],
- "sha256": "e5f5b2d8d76e1713207fda0c7b2f14daf5c64238856404da6edb889184c14ed9",
- "url": "https://repo1.maven.org/maven2/com/google/api/grpc/proto-google-common-protos/2.21.0/proto-google-common-protos-2.21.0.jar"
+ "sha256": "ee9c751f06b112e92b37f75e4f73a17d03ef2c3302c6e8d986adbcc721b63cb0",
+ "url": "https://repo1.maven.org/maven2/com/google/api/grpc/proto-google-common-protos/2.29.0/proto-google-common-protos-2.29.0.jar"
},
{
- "coord": "com.google.api.grpc:proto-google-common-protos:jar:sources:2.21.0",
+ "coord": "com.google.api.grpc:proto-google-common-protos:jar:sources:2.29.0",
"dependencies": [
- "com.google.protobuf:protobuf-java:jar:sources:3.23.2"
+ "com.google.protobuf:protobuf-java:jar:sources:3.25.1"
],
"directDependencies": [
- "com.google.protobuf:protobuf-java:jar:sources:3.23.2"
+ "com.google.protobuf:protobuf-java:jar:sources:3.25.1"
],
- "file": "v1/https/repo1.maven.org/maven2/com/google/api/grpc/proto-google-common-protos/2.21.0/proto-google-common-protos-2.21.0-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/com/google/api/grpc/proto-google-common-protos/2.29.0/proto-google-common-protos-2.29.0-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/com/google/api/grpc/proto-google-common-protos/2.21.0/proto-google-common-protos-2.21.0-sources.jar",
- "https://repo.maven.apache.org/maven2/com/google/api/grpc/proto-google-common-protos/2.21.0/proto-google-common-protos-2.21.0-sources.jar"
+ "https://repo1.maven.org/maven2/com/google/api/grpc/proto-google-common-protos/2.29.0/proto-google-common-protos-2.29.0-sources.jar",
+ "https://repo.maven.apache.org/maven2/com/google/api/grpc/proto-google-common-protos/2.29.0/proto-google-common-protos-2.29.0-sources.jar"
],
"packages": [],
- "sha256": "074888bf2afee12fec0bd5d0ed0cae1b9a2cc316ed8feeaa468cdb9a0f135179",
- "url": "https://repo1.maven.org/maven2/com/google/api/grpc/proto-google-common-protos/2.21.0/proto-google-common-protos-2.21.0-sources.jar"
+ "sha256": "fe7831089c20c097ef540b61ff90d12cfe0fbc57c2bbe21a3e8fa96bb0085d99",
+ "url": "https://repo1.maven.org/maven2/com/google/api/grpc/proto-google-common-protos/2.29.0/proto-google-common-protos-2.29.0-sources.jar"
},
{
"coord": "com.google.auth:google-auth-library-credentials:1.4.0",
@@ -121,10 +113,10 @@
"coord": "com.google.auth:google-auth-library-oauth2-http:1.4.0",
"dependencies": [
"com.google.auth:google-auth-library-credentials:1.4.0",
- "com.google.auto.value:auto-value-annotations:1.10.1",
+ "com.google.auto.value:auto-value-annotations:1.10.4",
"com.google.code.findbugs:jsr305:3.0.2",
"com.google.code.gson:gson:2.10.1",
- "com.google.guava:guava:32.1.1-jre",
+ "com.google.guava:guava:32.1.3-jre",
"com.google.http-client:google-http-client-gson:1.41.0",
"com.google.http-client:google-http-client:1.41.0",
"com.google.j2objc:j2objc-annotations:2.8",
@@ -137,9 +129,9 @@
],
"directDependencies": [
"com.google.auth:google-auth-library-credentials:1.4.0",
- "com.google.auto.value:auto-value-annotations:1.10.1",
+ "com.google.auto.value:auto-value-annotations:1.10.4",
"com.google.code.findbugs:jsr305:3.0.2",
- "com.google.guava:guava:32.1.1-jre",
+ "com.google.guava:guava:32.1.3-jre",
"com.google.http-client:google-http-client-gson:1.41.0",
"com.google.http-client:google-http-client:1.41.0"
],
@@ -159,10 +151,10 @@
"coord": "com.google.auth:google-auth-library-oauth2-http:jar:sources:1.4.0",
"dependencies": [
"com.google.auth:google-auth-library-credentials:jar:sources:1.4.0",
- "com.google.auto.value:auto-value-annotations:jar:sources:1.10.1",
+ "com.google.auto.value:auto-value-annotations:jar:sources:1.10.4",
"com.google.code.findbugs:jsr305:jar:sources:3.0.2",
"com.google.code.gson:gson:jar:sources:2.10.1",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
"com.google.http-client:google-http-client-gson:jar:sources:1.41.0",
"com.google.http-client:google-http-client:jar:sources:1.41.0",
"com.google.j2objc:j2objc-annotations:jar:sources:2.8",
@@ -175,9 +167,9 @@
],
"directDependencies": [
"com.google.auth:google-auth-library-credentials:jar:sources:1.4.0",
- "com.google.auto.value:auto-value-annotations:jar:sources:1.10.1",
+ "com.google.auto.value:auto-value-annotations:jar:sources:1.10.4",
"com.google.code.findbugs:jsr305:jar:sources:3.0.2",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
"com.google.http-client:google-http-client-gson:jar:sources:1.41.0",
"com.google.http-client:google-http-client:jar:sources:1.41.0"
],
@@ -191,13 +183,13 @@
"url": "https://repo1.maven.org/maven2/com/google/auth/google-auth-library-oauth2-http/1.4.0/google-auth-library-oauth2-http-1.4.0-sources.jar"
},
{
- "coord": "com.google.auto.value:auto-value-annotations:1.10.1",
+ "coord": "com.google.auto.value:auto-value-annotations:1.10.4",
"dependencies": [],
"directDependencies": [],
- "file": "v1/https/repo1.maven.org/maven2/com/google/auto/value/auto-value-annotations/1.10.1/auto-value-annotations-1.10.1.jar",
+ "file": "v1/https/repo1.maven.org/maven2/com/google/auto/value/auto-value-annotations/1.10.4/auto-value-annotations-1.10.4.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/com/google/auto/value/auto-value-annotations/1.10.1/auto-value-annotations-1.10.1.jar",
- "https://repo.maven.apache.org/maven2/com/google/auto/value/auto-value-annotations/1.10.1/auto-value-annotations-1.10.1.jar"
+ "https://repo1.maven.org/maven2/com/google/auto/value/auto-value-annotations/1.10.4/auto-value-annotations-1.10.4.jar",
+ "https://repo.maven.apache.org/maven2/com/google/auto/value/auto-value-annotations/1.10.4/auto-value-annotations-1.10.4.jar"
],
"packages": [
"com.google.auto.value",
@@ -205,30 +197,30 @@
"com.google.auto.value.extension.serializable",
"com.google.auto.value.extension.toprettystring"
],
- "sha256": "a4fe0a211925e938a8510d741763ee1171a11bf931f5891ef4d4ee84fca72be2",
- "url": "https://repo1.maven.org/maven2/com/google/auto/value/auto-value-annotations/1.10.1/auto-value-annotations-1.10.1.jar"
+ "sha256": "e1c45e6beadaef9797cb0d9afd5a45621ad061cd8632012f85582853a3887825",
+ "url": "https://repo1.maven.org/maven2/com/google/auto/value/auto-value-annotations/1.10.4/auto-value-annotations-1.10.4.jar"
},
{
- "coord": "com.google.auto.value:auto-value-annotations:jar:sources:1.10.1",
+ "coord": "com.google.auto.value:auto-value-annotations:jar:sources:1.10.4",
"dependencies": [],
"directDependencies": [],
- "file": "v1/https/repo1.maven.org/maven2/com/google/auto/value/auto-value-annotations/1.10.1/auto-value-annotations-1.10.1-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/com/google/auto/value/auto-value-annotations/1.10.4/auto-value-annotations-1.10.4-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/com/google/auto/value/auto-value-annotations/1.10.1/auto-value-annotations-1.10.1-sources.jar",
- "https://repo.maven.apache.org/maven2/com/google/auto/value/auto-value-annotations/1.10.1/auto-value-annotations-1.10.1-sources.jar"
+ "https://repo1.maven.org/maven2/com/google/auto/value/auto-value-annotations/1.10.4/auto-value-annotations-1.10.4-sources.jar",
+ "https://repo.maven.apache.org/maven2/com/google/auto/value/auto-value-annotations/1.10.4/auto-value-annotations-1.10.4-sources.jar"
],
"packages": [],
- "sha256": "44e6ce2884c18869422765b238f7f173faccd24643fabb5e95597382e80d50a8",
- "url": "https://repo1.maven.org/maven2/com/google/auto/value/auto-value-annotations/1.10.1/auto-value-annotations-1.10.1-sources.jar"
+ "sha256": "61a433f015b12a6cf4ecff227c7748486ff8f294ffe9d39827b382ade0514d0a",
+ "url": "https://repo1.maven.org/maven2/com/google/auto/value/auto-value-annotations/1.10.4/auto-value-annotations-1.10.4-sources.jar"
},
{
- "coord": "com.google.auto.value:auto-value:1.10.1",
+ "coord": "com.google.auto.value:auto-value:1.10.4",
"dependencies": [],
"directDependencies": [],
- "file": "v1/https/repo1.maven.org/maven2/com/google/auto/value/auto-value/1.10.1/auto-value-1.10.1.jar",
+ "file": "v1/https/repo1.maven.org/maven2/com/google/auto/value/auto-value/1.10.4/auto-value-1.10.4.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/com/google/auto/value/auto-value/1.10.1/auto-value-1.10.1.jar",
- "https://repo.maven.apache.org/maven2/com/google/auto/value/auto-value/1.10.1/auto-value-1.10.1.jar"
+ "https://repo1.maven.org/maven2/com/google/auto/value/auto-value/1.10.4/auto-value-1.10.4.jar",
+ "https://repo.maven.apache.org/maven2/com/google/auto/value/auto-value/1.10.4/auto-value-1.10.4.jar"
],
"packages": [
"autovalue.shaded.com.google.auto.common",
@@ -260,23 +252,29 @@
"autovalue.shaded.kotlin.collections.builders",
"autovalue.shaded.kotlin.collections.unsigned",
"autovalue.shaded.kotlin.comparisons",
+ "autovalue.shaded.kotlin.contracts",
"autovalue.shaded.kotlin.coroutines",
"autovalue.shaded.kotlin.coroutines.intrinsics",
"autovalue.shaded.kotlin.coroutines.jvm.internal",
+ "autovalue.shaded.kotlin.enums",
"autovalue.shaded.kotlin.experimental",
"autovalue.shaded.kotlin.internal",
+ "autovalue.shaded.kotlin.internal.jdk7",
+ "autovalue.shaded.kotlin.internal.jdk8",
"autovalue.shaded.kotlin.jvm",
"autovalue.shaded.kotlin.jvm.functions",
"autovalue.shaded.kotlin.jvm.internal",
"autovalue.shaded.kotlin.jvm.internal.markers",
"autovalue.shaded.kotlin.random",
+ "autovalue.shaded.kotlin.random.jdk8",
"autovalue.shaded.kotlin.ranges",
"autovalue.shaded.kotlin.reflect",
"autovalue.shaded.kotlin.sequences",
"autovalue.shaded.kotlin.text",
"autovalue.shaded.kotlinx.metadata",
- "autovalue.shaded.kotlinx.metadata.impl",
- "autovalue.shaded.kotlinx.metadata.impl.extensions",
+ "autovalue.shaded.kotlinx.metadata.internal",
+ "autovalue.shaded.kotlinx.metadata.internal.common",
+ "autovalue.shaded.kotlinx.metadata.internal.extensions",
"autovalue.shaded.kotlinx.metadata.internal.metadata",
"autovalue.shaded.kotlinx.metadata.internal.metadata.deserialization",
"autovalue.shaded.kotlinx.metadata.internal.metadata.jvm",
@@ -285,7 +283,7 @@
"autovalue.shaded.kotlinx.metadata.internal.metadata.serialization",
"autovalue.shaded.kotlinx.metadata.internal.protobuf",
"autovalue.shaded.kotlinx.metadata.jvm",
- "autovalue.shaded.kotlinx.metadata.jvm.impl",
+ "autovalue.shaded.kotlinx.metadata.jvm.internal",
"autovalue.shaded.net.ltgt.gradle.incap",
"autovalue.shaded.org.checkerframework.checker.nullness.qual",
"autovalue.shaded.org.checkerframework.framework.qual",
@@ -301,21 +299,21 @@
"com.google.auto.value.extension.toprettystring.processor",
"com.google.auto.value.processor"
],
- "sha256": "ea78010d82efdb2f919b16faed9e6427f07d2998a15c4f619cdc6a6164ecd336",
- "url": "https://repo1.maven.org/maven2/com/google/auto/value/auto-value/1.10.1/auto-value-1.10.1.jar"
+ "sha256": "f3c438d1f82904bbcb452084d488b660f3c7488e9274c3a58f049e121632d434",
+ "url": "https://repo1.maven.org/maven2/com/google/auto/value/auto-value/1.10.4/auto-value-1.10.4.jar"
},
{
- "coord": "com.google.auto.value:auto-value:jar:sources:1.10.1",
+ "coord": "com.google.auto.value:auto-value:jar:sources:1.10.4",
"dependencies": [],
"directDependencies": [],
- "file": "v1/https/repo1.maven.org/maven2/com/google/auto/value/auto-value/1.10.1/auto-value-1.10.1-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/com/google/auto/value/auto-value/1.10.4/auto-value-1.10.4-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/com/google/auto/value/auto-value/1.10.1/auto-value-1.10.1-sources.jar",
- "https://repo.maven.apache.org/maven2/com/google/auto/value/auto-value/1.10.1/auto-value-1.10.1-sources.jar"
+ "https://repo1.maven.org/maven2/com/google/auto/value/auto-value/1.10.4/auto-value-1.10.4-sources.jar",
+ "https://repo.maven.apache.org/maven2/com/google/auto/value/auto-value/1.10.4/auto-value-1.10.4-sources.jar"
],
"packages": [],
- "sha256": "39c581c92f8d48817e5f0a70832fd719aeb2f925aacfb3822b8c6f1ec7a618fe",
- "url": "https://repo1.maven.org/maven2/com/google/auto/value/auto-value/1.10.1/auto-value-1.10.1-sources.jar"
+ "sha256": "16a8ace0d11e71c54075f55def6b1e72b96f72711d72a09742223c115874635c",
+ "url": "https://repo1.maven.org/maven2/com/google/auto/value/auto-value/1.10.4/auto-value-1.10.4-sources.jar"
},
{
"coord": "com.google.code.findbugs:jsr305:3.0.2",
@@ -384,33 +382,33 @@
"url": "https://repo1.maven.org/maven2/com/google/code/gson/gson/2.10.1/gson-2.10.1-sources.jar"
},
{
- "coord": "com.google.errorprone:error_prone_annotations:2.18.0",
+ "coord": "com.google.errorprone:error_prone_annotations:2.23.0",
"dependencies": [],
"directDependencies": [],
- "file": "v1/https/repo1.maven.org/maven2/com/google/errorprone/error_prone_annotations/2.18.0/error_prone_annotations-2.18.0.jar",
+ "file": "v1/https/repo1.maven.org/maven2/com/google/errorprone/error_prone_annotations/2.23.0/error_prone_annotations-2.23.0.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/com/google/errorprone/error_prone_annotations/2.18.0/error_prone_annotations-2.18.0.jar",
- "https://repo.maven.apache.org/maven2/com/google/errorprone/error_prone_annotations/2.18.0/error_prone_annotations-2.18.0.jar"
+ "https://repo1.maven.org/maven2/com/google/errorprone/error_prone_annotations/2.23.0/error_prone_annotations-2.23.0.jar",
+ "https://repo.maven.apache.org/maven2/com/google/errorprone/error_prone_annotations/2.23.0/error_prone_annotations-2.23.0.jar"
],
"packages": [
"com.google.errorprone.annotations",
"com.google.errorprone.annotations.concurrent"
],
- "sha256": "9e6814cb71816988a4fd1b07a993a8f21bb7058d522c162b1de849e19bea54ae",
- "url": "https://repo1.maven.org/maven2/com/google/errorprone/error_prone_annotations/2.18.0/error_prone_annotations-2.18.0.jar"
+ "sha256": "ec6f39f068b6ff9ac323c68e28b9299f8c0a80ca512dccb1d4a70f40ac3ec054",
+ "url": "https://repo1.maven.org/maven2/com/google/errorprone/error_prone_annotations/2.23.0/error_prone_annotations-2.23.0.jar"
},
{
- "coord": "com.google.errorprone:error_prone_annotations:jar:sources:2.18.0",
+ "coord": "com.google.errorprone:error_prone_annotations:jar:sources:2.23.0",
"dependencies": [],
"directDependencies": [],
- "file": "v1/https/repo1.maven.org/maven2/com/google/errorprone/error_prone_annotations/2.18.0/error_prone_annotations-2.18.0-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/com/google/errorprone/error_prone_annotations/2.23.0/error_prone_annotations-2.23.0-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/com/google/errorprone/error_prone_annotations/2.18.0/error_prone_annotations-2.18.0-sources.jar",
- "https://repo.maven.apache.org/maven2/com/google/errorprone/error_prone_annotations/2.18.0/error_prone_annotations-2.18.0-sources.jar"
+ "https://repo1.maven.org/maven2/com/google/errorprone/error_prone_annotations/2.23.0/error_prone_annotations-2.23.0-sources.jar",
+ "https://repo.maven.apache.org/maven2/com/google/errorprone/error_prone_annotations/2.23.0/error_prone_annotations-2.23.0-sources.jar"
],
"packages": [],
- "sha256": "a2c0783981c8ad48faaa6ea8de6f1926d8e87c125f5df5ce531a9810b943e032",
- "url": "https://repo1.maven.org/maven2/com/google/errorprone/error_prone_annotations/2.18.0/error_prone_annotations-2.18.0-sources.jar"
+ "sha256": "5b4504609bb93d3c24b87cd839cf0bb7d878135d0a917a05081d0dc9b2a9973f",
+ "url": "https://repo1.maven.org/maven2/com/google/errorprone/error_prone_annotations/2.23.0/error_prone_annotations-2.23.0-sources.jar"
},
{
"coord": "com.google.guava:failureaccess:1.0.1",
@@ -441,30 +439,30 @@
"url": "https://repo1.maven.org/maven2/com/google/guava/failureaccess/1.0.1/failureaccess-1.0.1-sources.jar"
},
{
- "coord": "com.google.guava:guava-testlib:32.1.1-jre",
+ "coord": "com.google.guava:guava-testlib:32.1.3-jre",
"dependencies": [
"com.google.code.findbugs:jsr305:3.0.2",
- "com.google.errorprone:error_prone_annotations:2.18.0",
+ "com.google.errorprone:error_prone_annotations:2.23.0",
"com.google.guava:failureaccess:1.0.1",
- "com.google.guava:guava:32.1.1-jre",
+ "com.google.guava:guava:32.1.3-jre",
"com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava",
"com.google.j2objc:j2objc-annotations:2.8",
"junit:junit:4.13.2",
- "org.checkerframework:checker-qual:3.33.0",
+ "org.checkerframework:checker-qual:3.37.0",
"org.hamcrest:hamcrest-core:1.3"
],
"directDependencies": [
"com.google.code.findbugs:jsr305:3.0.2",
- "com.google.errorprone:error_prone_annotations:2.18.0",
- "com.google.guava:guava:32.1.1-jre",
+ "com.google.errorprone:error_prone_annotations:2.23.0",
+ "com.google.guava:guava:32.1.3-jre",
"com.google.j2objc:j2objc-annotations:2.8",
"junit:junit:4.13.2",
- "org.checkerframework:checker-qual:3.33.0"
+ "org.checkerframework:checker-qual:3.37.0"
],
- "file": "v1/https/repo1.maven.org/maven2/com/google/guava/guava-testlib/32.1.1-jre/guava-testlib-32.1.1-jre.jar",
+ "file": "v1/https/repo1.maven.org/maven2/com/google/guava/guava-testlib/32.1.3-jre/guava-testlib-32.1.3-jre.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/com/google/guava/guava-testlib/32.1.1-jre/guava-testlib-32.1.1-jre.jar",
- "https://repo.maven.apache.org/maven2/com/google/guava/guava-testlib/32.1.1-jre/guava-testlib-32.1.1-jre.jar"
+ "https://repo1.maven.org/maven2/com/google/guava/guava-testlib/32.1.3-jre/guava-testlib-32.1.3-jre.jar",
+ "https://repo.maven.apache.org/maven2/com/google/guava/guava-testlib/32.1.3-jre/guava-testlib-32.1.3-jre.jar"
],
"packages": [
"com.google.common.collect.testing",
@@ -476,61 +474,61 @@
"com.google.common.testing",
"com.google.common.util.concurrent.testing"
],
- "sha256": "77e6f9700bc63f18b3da24d2701f41b9c9e1cc4a57346f21ced66cb0066da235",
- "url": "https://repo1.maven.org/maven2/com/google/guava/guava-testlib/32.1.1-jre/guava-testlib-32.1.1-jre.jar"
+ "sha256": "58aca6a4f287ae73e5fd610212bdcdc78d677c4475695009ba1656349dad9079",
+ "url": "https://repo1.maven.org/maven2/com/google/guava/guava-testlib/32.1.3-jre/guava-testlib-32.1.3-jre.jar"
},
{
- "coord": "com.google.guava:guava-testlib:jar:sources:32.1.1-jre",
+ "coord": "com.google.guava:guava-testlib:jar:sources:32.1.3-jre",
"dependencies": [
"com.google.code.findbugs:jsr305:jar:sources:3.0.2",
- "com.google.errorprone:error_prone_annotations:jar:sources:2.18.0",
+ "com.google.errorprone:error_prone_annotations:jar:sources:2.23.0",
"com.google.guava:failureaccess:jar:sources:1.0.1",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
"com.google.guava:listenablefuture:jar:sources:9999.0-empty-to-avoid-conflict-with-guava",
"com.google.j2objc:j2objc-annotations:jar:sources:2.8",
"junit:junit:jar:sources:4.13.2",
- "org.checkerframework:checker-qual:jar:sources:3.33.0",
+ "org.checkerframework:checker-qual:jar:sources:3.37.0",
"org.hamcrest:hamcrest-core:jar:sources:1.3"
],
"directDependencies": [
"com.google.code.findbugs:jsr305:jar:sources:3.0.2",
- "com.google.errorprone:error_prone_annotations:jar:sources:2.18.0",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
+ "com.google.errorprone:error_prone_annotations:jar:sources:2.23.0",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
"com.google.j2objc:j2objc-annotations:jar:sources:2.8",
"junit:junit:jar:sources:4.13.2",
- "org.checkerframework:checker-qual:jar:sources:3.33.0"
+ "org.checkerframework:checker-qual:jar:sources:3.37.0"
],
- "file": "v1/https/repo1.maven.org/maven2/com/google/guava/guava-testlib/32.1.1-jre/guava-testlib-32.1.1-jre-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/com/google/guava/guava-testlib/32.1.3-jre/guava-testlib-32.1.3-jre-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/com/google/guava/guava-testlib/32.1.1-jre/guava-testlib-32.1.1-jre-sources.jar",
- "https://repo.maven.apache.org/maven2/com/google/guava/guava-testlib/32.1.1-jre/guava-testlib-32.1.1-jre-sources.jar"
+ "https://repo1.maven.org/maven2/com/google/guava/guava-testlib/32.1.3-jre/guava-testlib-32.1.3-jre-sources.jar",
+ "https://repo.maven.apache.org/maven2/com/google/guava/guava-testlib/32.1.3-jre/guava-testlib-32.1.3-jre-sources.jar"
],
"packages": [],
- "sha256": "9c3d54bf1d7e7606f5164636d7fe02a903f71f6053ed3217ed1d44dfead0d428",
- "url": "https://repo1.maven.org/maven2/com/google/guava/guava-testlib/32.1.1-jre/guava-testlib-32.1.1-jre-sources.jar"
+ "sha256": "52aacddaf68c1b329a7751ef2cf074df3bfbdd449d50e305fff5fbd835cca6d0",
+ "url": "https://repo1.maven.org/maven2/com/google/guava/guava-testlib/32.1.3-jre/guava-testlib-32.1.3-jre-sources.jar"
},
{
- "coord": "com.google.guava:guava:32.1.1-jre",
+ "coord": "com.google.guava:guava:32.1.3-jre",
"dependencies": [
"com.google.code.findbugs:jsr305:3.0.2",
- "com.google.errorprone:error_prone_annotations:2.18.0",
+ "com.google.errorprone:error_prone_annotations:2.23.0",
"com.google.guava:failureaccess:1.0.1",
"com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava",
"com.google.j2objc:j2objc-annotations:2.8",
- "org.checkerframework:checker-qual:3.33.0"
+ "org.checkerframework:checker-qual:3.37.0"
],
"directDependencies": [
"com.google.code.findbugs:jsr305:3.0.2",
- "com.google.errorprone:error_prone_annotations:2.18.0",
+ "com.google.errorprone:error_prone_annotations:2.23.0",
"com.google.guava:failureaccess:1.0.1",
"com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava",
"com.google.j2objc:j2objc-annotations:2.8",
- "org.checkerframework:checker-qual:3.33.0"
+ "org.checkerframework:checker-qual:3.37.0"
],
- "file": "v1/https/repo1.maven.org/maven2/com/google/guava/guava/32.1.1-jre/guava-32.1.1-jre.jar",
+ "file": "v1/https/repo1.maven.org/maven2/com/google/guava/guava/32.1.3-jre/guava-32.1.3-jre.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/com/google/guava/guava/32.1.1-jre/guava-32.1.1-jre.jar",
- "https://repo.maven.apache.org/maven2/com/google/guava/guava/32.1.1-jre/guava-32.1.1-jre.jar"
+ "https://repo1.maven.org/maven2/com/google/guava/guava/32.1.3-jre/guava-32.1.3-jre.jar",
+ "https://repo.maven.apache.org/maven2/com/google/guava/guava/32.1.3-jre/guava-32.1.3-jre.jar"
],
"packages": [
"com.google.common.annotations",
@@ -552,35 +550,35 @@
"com.google.common.xml",
"com.google.thirdparty.publicsuffix"
],
- "sha256": "91fbba37f1c8b251cf9ea9e7d3a369eb79eb1e6a5df1d4bbf483dd0380740281",
- "url": "https://repo1.maven.org/maven2/com/google/guava/guava/32.1.1-jre/guava-32.1.1-jre.jar"
+ "sha256": "6d4e2b5a118aab62e6e5e29d185a0224eed82c85c40ac3d33cf04a270c3b3744",
+ "url": "https://repo1.maven.org/maven2/com/google/guava/guava/32.1.3-jre/guava-32.1.3-jre.jar"
},
{
- "coord": "com.google.guava:guava:jar:sources:32.1.1-jre",
+ "coord": "com.google.guava:guava:jar:sources:32.1.3-jre",
"dependencies": [
"com.google.code.findbugs:jsr305:jar:sources:3.0.2",
- "com.google.errorprone:error_prone_annotations:jar:sources:2.18.0",
+ "com.google.errorprone:error_prone_annotations:jar:sources:2.23.0",
"com.google.guava:failureaccess:jar:sources:1.0.1",
"com.google.guava:listenablefuture:jar:sources:9999.0-empty-to-avoid-conflict-with-guava",
"com.google.j2objc:j2objc-annotations:jar:sources:2.8",
- "org.checkerframework:checker-qual:jar:sources:3.33.0"
+ "org.checkerframework:checker-qual:jar:sources:3.37.0"
],
"directDependencies": [
"com.google.code.findbugs:jsr305:jar:sources:3.0.2",
- "com.google.errorprone:error_prone_annotations:jar:sources:2.18.0",
+ "com.google.errorprone:error_prone_annotations:jar:sources:2.23.0",
"com.google.guava:failureaccess:jar:sources:1.0.1",
"com.google.guava:listenablefuture:jar:sources:9999.0-empty-to-avoid-conflict-with-guava",
"com.google.j2objc:j2objc-annotations:jar:sources:2.8",
- "org.checkerframework:checker-qual:jar:sources:3.33.0"
+ "org.checkerframework:checker-qual:jar:sources:3.37.0"
],
- "file": "v1/https/repo1.maven.org/maven2/com/google/guava/guava/32.1.1-jre/guava-32.1.1-jre-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/com/google/guava/guava/32.1.3-jre/guava-32.1.3-jre-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/com/google/guava/guava/32.1.1-jre/guava-32.1.1-jre-sources.jar",
- "https://repo.maven.apache.org/maven2/com/google/guava/guava/32.1.1-jre/guava-32.1.1-jre-sources.jar"
+ "https://repo1.maven.org/maven2/com/google/guava/guava/32.1.3-jre/guava-32.1.3-jre-sources.jar",
+ "https://repo.maven.apache.org/maven2/com/google/guava/guava/32.1.3-jre/guava-32.1.3-jre-sources.jar"
],
"packages": [],
- "sha256": "5e7b6cebd2e9087a536c1054bf52a2e6a49c284772421f146640cfadc54ba573",
- "url": "https://repo1.maven.org/maven2/com/google/guava/guava/32.1.1-jre/guava-32.1.1-jre-sources.jar"
+ "sha256": "9f6f333b2deaf36644d14ddeed7e6b31151b0c244bab1e4d58ee443ade9a09f3",
+ "url": "https://repo1.maven.org/maven2/com/google/guava/guava/32.1.3-jre/guava-32.1.3-jre-sources.jar"
},
{
"coord": "com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava",
@@ -606,7 +604,7 @@
"dependencies": [
"com.google.code.findbugs:jsr305:3.0.2",
"com.google.code.gson:gson:2.10.1",
- "com.google.guava:guava:32.1.1-jre",
+ "com.google.guava:guava:32.1.3-jre",
"com.google.http-client:google-http-client:1.41.0",
"com.google.j2objc:j2objc-annotations:2.8",
"commons-codec:commons-codec:1.11",
@@ -636,7 +634,7 @@
"dependencies": [
"com.google.code.findbugs:jsr305:jar:sources:3.0.2",
"com.google.code.gson:gson:jar:sources:2.10.1",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
"com.google.http-client:google-http-client:jar:sources:1.41.0",
"com.google.j2objc:j2objc-annotations:jar:sources:2.8",
"commons-codec:commons-codec:jar:sources:1.11",
@@ -663,7 +661,7 @@
"coord": "com.google.http-client:google-http-client:1.41.0",
"dependencies": [
"com.google.code.findbugs:jsr305:3.0.2",
- "com.google.guava:guava:32.1.1-jre",
+ "com.google.guava:guava:32.1.3-jre",
"com.google.j2objc:j2objc-annotations:2.8",
"commons-codec:commons-codec:1.11",
"commons-logging:commons-logging:1.2",
@@ -674,7 +672,7 @@
],
"directDependencies": [
"com.google.code.findbugs:jsr305:3.0.2",
- "com.google.guava:guava:32.1.1-jre",
+ "com.google.guava:guava:32.1.3-jre",
"com.google.j2objc:j2objc-annotations:2.8",
"io.opencensus:opencensus-api:0.31.0",
"io.opencensus:opencensus-contrib-http-util:0.28.0",
@@ -711,7 +709,7 @@
"coord": "com.google.http-client:google-http-client:jar:sources:1.41.0",
"dependencies": [
"com.google.code.findbugs:jsr305:jar:sources:3.0.2",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
"com.google.j2objc:j2objc-annotations:jar:sources:2.8",
"commons-codec:commons-codec:jar:sources:1.11",
"commons-logging:commons-logging:jar:sources:1.2",
@@ -722,7 +720,7 @@
],
"directDependencies": [
"com.google.code.findbugs:jsr305:jar:sources:3.0.2",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
"com.google.j2objc:j2objc-annotations:jar:sources:2.8",
"io.opencensus:opencensus-api:jar:sources:0.31.0",
"io.opencensus:opencensus-contrib-http-util:jar:sources:0.28.0",
@@ -767,61 +765,61 @@
"url": "https://repo1.maven.org/maven2/com/google/j2objc/j2objc-annotations/2.8/j2objc-annotations-2.8-sources.jar"
},
{
- "coord": "com.google.protobuf:protobuf-java:3.23.2",
+ "coord": "com.google.protobuf:protobuf-java:3.25.1",
"dependencies": [],
"directDependencies": [],
- "file": "v1/https/repo1.maven.org/maven2/com/google/protobuf/protobuf-java/3.23.2/protobuf-java-3.23.2.jar",
+ "file": "v1/https/repo1.maven.org/maven2/com/google/protobuf/protobuf-java/3.25.1/protobuf-java-3.25.1.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/com/google/protobuf/protobuf-java/3.23.2/protobuf-java-3.23.2.jar",
- "https://repo.maven.apache.org/maven2/com/google/protobuf/protobuf-java/3.23.2/protobuf-java-3.23.2.jar"
+ "https://repo1.maven.org/maven2/com/google/protobuf/protobuf-java/3.25.1/protobuf-java-3.25.1.jar",
+ "https://repo.maven.apache.org/maven2/com/google/protobuf/protobuf-java/3.25.1/protobuf-java-3.25.1.jar"
],
"packages": [
"com.google.protobuf",
"com.google.protobuf.compiler"
],
- "sha256": "18a057f5e0f828daa92b71c19df91f6bcc2aad067ca2cdd6b5698055ca7bcece",
- "url": "https://repo1.maven.org/maven2/com/google/protobuf/protobuf-java/3.23.2/protobuf-java-3.23.2.jar"
+ "sha256": "48a8e58a1a8f82eff141a7a388d38dfe77d7a48d5e57c9066ee37f19147e20df",
+ "url": "https://repo1.maven.org/maven2/com/google/protobuf/protobuf-java/3.25.1/protobuf-java-3.25.1.jar"
},
{
- "coord": "com.google.protobuf:protobuf-java:jar:sources:3.23.2",
+ "coord": "com.google.protobuf:protobuf-java:jar:sources:3.25.1",
"dependencies": [],
"directDependencies": [],
- "file": "v1/https/repo1.maven.org/maven2/com/google/protobuf/protobuf-java/3.23.2/protobuf-java-3.23.2-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/com/google/protobuf/protobuf-java/3.25.1/protobuf-java-3.25.1-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/com/google/protobuf/protobuf-java/3.23.2/protobuf-java-3.23.2-sources.jar",
- "https://repo.maven.apache.org/maven2/com/google/protobuf/protobuf-java/3.23.2/protobuf-java-3.23.2-sources.jar"
+ "https://repo1.maven.org/maven2/com/google/protobuf/protobuf-java/3.25.1/protobuf-java-3.25.1-sources.jar",
+ "https://repo.maven.apache.org/maven2/com/google/protobuf/protobuf-java/3.25.1/protobuf-java-3.25.1-sources.jar"
],
"packages": [],
- "sha256": "f258013889c5582275b5e7c2bc3be45bc2daca2063dfc35bacd91529045d6d05",
- "url": "https://repo1.maven.org/maven2/com/google/protobuf/protobuf-java/3.23.2/protobuf-java-3.23.2-sources.jar"
+ "sha256": "104e26e3772a4af530057bf84e81f7ffabd69097551ab1989a2ab2614cb47463",
+ "url": "https://repo1.maven.org/maven2/com/google/protobuf/protobuf-java/3.25.1/protobuf-java-3.25.1-sources.jar"
},
{
- "coord": "com.google.protobuf:protobuf-javalite:3.22.3",
+ "coord": "com.google.protobuf:protobuf-javalite:3.25.1",
"dependencies": [],
"directDependencies": [],
- "file": "v1/https/repo1.maven.org/maven2/com/google/protobuf/protobuf-javalite/3.22.3/protobuf-javalite-3.22.3.jar",
+ "file": "v1/https/repo1.maven.org/maven2/com/google/protobuf/protobuf-javalite/3.25.1/protobuf-javalite-3.25.1.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/com/google/protobuf/protobuf-javalite/3.22.3/protobuf-javalite-3.22.3.jar",
- "https://repo.maven.apache.org/maven2/com/google/protobuf/protobuf-javalite/3.22.3/protobuf-javalite-3.22.3.jar"
+ "https://repo1.maven.org/maven2/com/google/protobuf/protobuf-javalite/3.25.1/protobuf-javalite-3.25.1.jar",
+ "https://repo.maven.apache.org/maven2/com/google/protobuf/protobuf-javalite/3.25.1/protobuf-javalite-3.25.1.jar"
],
"packages": [
"com.google.protobuf"
],
- "sha256": "9d26bebd2607fd0553cedbfc3e4a3d3f06c6e7a207d2b74c87fa6181838ed1bf",
- "url": "https://repo1.maven.org/maven2/com/google/protobuf/protobuf-javalite/3.22.3/protobuf-javalite-3.22.3.jar"
+ "sha256": "4b33c2e706ae810eb474cf089d3bf7f29a8e19748909e1df1ec8cd27b4f93c40",
+ "url": "https://repo1.maven.org/maven2/com/google/protobuf/protobuf-javalite/3.25.1/protobuf-javalite-3.25.1.jar"
},
{
- "coord": "com.google.protobuf:protobuf-javalite:jar:sources:3.22.3",
+ "coord": "com.google.protobuf:protobuf-javalite:jar:sources:3.25.1",
"dependencies": [],
"directDependencies": [],
- "file": "v1/https/repo1.maven.org/maven2/com/google/protobuf/protobuf-javalite/3.22.3/protobuf-javalite-3.22.3-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/com/google/protobuf/protobuf-javalite/3.25.1/protobuf-javalite-3.25.1-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/com/google/protobuf/protobuf-javalite/3.22.3/protobuf-javalite-3.22.3-sources.jar",
- "https://repo.maven.apache.org/maven2/com/google/protobuf/protobuf-javalite/3.22.3/protobuf-javalite-3.22.3-sources.jar"
+ "https://repo1.maven.org/maven2/com/google/protobuf/protobuf-javalite/3.25.1/protobuf-javalite-3.25.1-sources.jar",
+ "https://repo.maven.apache.org/maven2/com/google/protobuf/protobuf-javalite/3.25.1/protobuf-javalite-3.25.1-sources.jar"
],
"packages": [],
- "sha256": "a1b3e64ca87b47698b6f844a4b40738a0c32999547d5bab9178bd37224258ccc",
- "url": "https://repo1.maven.org/maven2/com/google/protobuf/protobuf-javalite/3.22.3/protobuf-javalite-3.22.3-sources.jar"
+ "sha256": "ce0eca783fd9eace9049835675bbf6277425bb96addc41130392f61ad82a1524",
+ "url": "https://repo1.maven.org/maven2/com/google/protobuf/protobuf-javalite/3.25.1/protobuf-javalite-3.25.1-sources.jar"
},
{
"coord": "com.google.re2j:re2j:1.7",
@@ -854,21 +852,18 @@
{
"coord": "com.google.truth.extensions:truth-liteproto-extension:1.1.3",
"dependencies": [
- "com.google.auto.value:auto-value-annotations:1.10.1",
- "com.google.errorprone:error_prone_annotations:2.18.0",
- "com.google.guava:guava:32.1.1-jre",
- "com.google.truth:truth:1.1.3",
- "junit:junit:4.13.2",
- "org.checkerframework:checker-qual:3.33.0",
- "org.hamcrest:hamcrest-core:1.3",
- "org.ow2.asm:asm:9.1"
+ "com.google.auto.value:auto-value-annotations:1.10.4",
+ "com.google.errorprone:error_prone_annotations:2.23.0",
+ "com.google.guava:guava:32.1.3-jre",
+ "com.google.truth:truth:1.1.5",
+ "org.checkerframework:checker-qual:3.37.0"
],
"directDependencies": [
- "com.google.auto.value:auto-value-annotations:1.10.1",
- "com.google.errorprone:error_prone_annotations:2.18.0",
- "com.google.guava:guava:32.1.1-jre",
- "com.google.truth:truth:1.1.3",
- "org.checkerframework:checker-qual:3.33.0"
+ "com.google.auto.value:auto-value-annotations:1.10.4",
+ "com.google.errorprone:error_prone_annotations:2.23.0",
+ "com.google.guava:guava:32.1.3-jre",
+ "com.google.truth:truth:1.1.5",
+ "org.checkerframework:checker-qual:3.37.0"
],
"file": "v1/https/repo1.maven.org/maven2/com/google/truth/extensions/truth-liteproto-extension/1.1.3/truth-liteproto-extension-1.1.3.jar",
"mirror_urls": [
@@ -884,21 +879,18 @@
{
"coord": "com.google.truth.extensions:truth-liteproto-extension:jar:sources:1.1.3",
"dependencies": [
- "com.google.auto.value:auto-value-annotations:jar:sources:1.10.1",
- "com.google.errorprone:error_prone_annotations:jar:sources:2.18.0",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
- "com.google.truth:truth:jar:sources:1.1.3",
- "junit:junit:jar:sources:4.13.2",
- "org.checkerframework:checker-qual:jar:sources:3.33.0",
- "org.hamcrest:hamcrest-core:jar:sources:1.3",
- "org.ow2.asm:asm:jar:sources:9.1"
+ "com.google.auto.value:auto-value-annotations:jar:sources:1.10.4",
+ "com.google.errorprone:error_prone_annotations:jar:sources:2.23.0",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
+ "com.google.truth:truth:jar:sources:1.1.5",
+ "org.checkerframework:checker-qual:jar:sources:3.37.0"
],
"directDependencies": [
- "com.google.auto.value:auto-value-annotations:jar:sources:1.10.1",
- "com.google.errorprone:error_prone_annotations:jar:sources:2.18.0",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
- "com.google.truth:truth:jar:sources:1.1.3",
- "org.checkerframework:checker-qual:jar:sources:3.33.0"
+ "com.google.auto.value:auto-value-annotations:jar:sources:1.10.4",
+ "com.google.errorprone:error_prone_annotations:jar:sources:2.23.0",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
+ "com.google.truth:truth:jar:sources:1.1.5",
+ "org.checkerframework:checker-qual:jar:sources:3.37.0"
],
"file": "v1/https/repo1.maven.org/maven2/com/google/truth/extensions/truth-liteproto-extension/1.1.3/truth-liteproto-extension-1.1.3-sources.jar",
"mirror_urls": [
@@ -912,25 +904,22 @@
{
"coord": "com.google.truth.extensions:truth-proto-extension:1.1.3",
"dependencies": [
- "com.google.auto.value:auto-value-annotations:1.10.1",
- "com.google.errorprone:error_prone_annotations:2.18.0",
- "com.google.guava:guava:32.1.1-jre",
- "com.google.protobuf:protobuf-java:3.23.2",
+ "com.google.auto.value:auto-value-annotations:1.10.4",
+ "com.google.errorprone:error_prone_annotations:2.23.0",
+ "com.google.guava:guava:32.1.3-jre",
+ "com.google.protobuf:protobuf-java:3.25.1",
"com.google.truth.extensions:truth-liteproto-extension:1.1.3",
- "com.google.truth:truth:1.1.3",
- "junit:junit:4.13.2",
- "org.checkerframework:checker-qual:3.33.0",
- "org.hamcrest:hamcrest-core:1.3",
- "org.ow2.asm:asm:9.1"
+ "com.google.truth:truth:1.1.5",
+ "org.checkerframework:checker-qual:3.37.0"
],
"directDependencies": [
- "com.google.auto.value:auto-value-annotations:1.10.1",
- "com.google.errorprone:error_prone_annotations:2.18.0",
- "com.google.guava:guava:32.1.1-jre",
- "com.google.protobuf:protobuf-java:3.23.2",
+ "com.google.auto.value:auto-value-annotations:1.10.4",
+ "com.google.errorprone:error_prone_annotations:2.23.0",
+ "com.google.guava:guava:32.1.3-jre",
+ "com.google.protobuf:protobuf-java:3.25.1",
"com.google.truth.extensions:truth-liteproto-extension:1.1.3",
- "com.google.truth:truth:1.1.3",
- "org.checkerframework:checker-qual:3.33.0"
+ "com.google.truth:truth:1.1.5",
+ "org.checkerframework:checker-qual:3.37.0"
],
"file": "v1/https/repo1.maven.org/maven2/com/google/truth/extensions/truth-proto-extension/1.1.3/truth-proto-extension-1.1.3.jar",
"mirror_urls": [
@@ -946,25 +935,22 @@
{
"coord": "com.google.truth.extensions:truth-proto-extension:jar:sources:1.1.3",
"dependencies": [
- "com.google.auto.value:auto-value-annotations:jar:sources:1.10.1",
- "com.google.errorprone:error_prone_annotations:jar:sources:2.18.0",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
- "com.google.protobuf:protobuf-java:jar:sources:3.23.2",
+ "com.google.auto.value:auto-value-annotations:jar:sources:1.10.4",
+ "com.google.errorprone:error_prone_annotations:jar:sources:2.23.0",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
+ "com.google.protobuf:protobuf-java:jar:sources:3.25.1",
"com.google.truth.extensions:truth-liteproto-extension:jar:sources:1.1.3",
- "com.google.truth:truth:jar:sources:1.1.3",
- "junit:junit:jar:sources:4.13.2",
- "org.checkerframework:checker-qual:jar:sources:3.33.0",
- "org.hamcrest:hamcrest-core:jar:sources:1.3",
- "org.ow2.asm:asm:jar:sources:9.1"
+ "com.google.truth:truth:jar:sources:1.1.5",
+ "org.checkerframework:checker-qual:jar:sources:3.37.0"
],
"directDependencies": [
- "com.google.auto.value:auto-value-annotations:jar:sources:1.10.1",
- "com.google.errorprone:error_prone_annotations:jar:sources:2.18.0",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
- "com.google.protobuf:protobuf-java:jar:sources:3.23.2",
+ "com.google.auto.value:auto-value-annotations:jar:sources:1.10.4",
+ "com.google.errorprone:error_prone_annotations:jar:sources:2.23.0",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
+ "com.google.protobuf:protobuf-java:jar:sources:3.25.1",
"com.google.truth.extensions:truth-liteproto-extension:jar:sources:1.1.3",
- "com.google.truth:truth:jar:sources:1.1.3",
- "org.checkerframework:checker-qual:jar:sources:3.33.0"
+ "com.google.truth:truth:jar:sources:1.1.5",
+ "org.checkerframework:checker-qual:jar:sources:3.37.0"
],
"file": "v1/https/repo1.maven.org/maven2/com/google/truth/extensions/truth-proto-extension/1.1.3/truth-proto-extension-1.1.3-sources.jar",
"mirror_urls": [
@@ -976,70 +962,70 @@
"url": "https://repo1.maven.org/maven2/com/google/truth/extensions/truth-proto-extension/1.1.3/truth-proto-extension-1.1.3-sources.jar"
},
{
- "coord": "com.google.truth:truth:1.1.3",
+ "coord": "com.google.truth:truth:1.1.5",
"dependencies": [
- "com.google.auto.value:auto-value-annotations:1.10.1",
- "com.google.errorprone:error_prone_annotations:2.18.0",
- "com.google.guava:guava:32.1.1-jre",
+ "com.google.auto.value:auto-value-annotations:1.10.4",
+ "com.google.errorprone:error_prone_annotations:2.23.0",
+ "com.google.guava:guava:32.1.3-jre",
"junit:junit:4.13.2",
- "org.checkerframework:checker-qual:3.33.0",
+ "org.checkerframework:checker-qual:3.37.0",
"org.hamcrest:hamcrest-core:1.3",
- "org.ow2.asm:asm:9.1"
+ "org.ow2.asm:asm:9.5"
],
"directDependencies": [
- "com.google.auto.value:auto-value-annotations:1.10.1",
- "com.google.errorprone:error_prone_annotations:2.18.0",
- "com.google.guava:guava:32.1.1-jre",
+ "com.google.auto.value:auto-value-annotations:1.10.4",
+ "com.google.errorprone:error_prone_annotations:2.23.0",
+ "com.google.guava:guava:32.1.3-jre",
"junit:junit:4.13.2",
- "org.checkerframework:checker-qual:3.33.0",
- "org.ow2.asm:asm:9.1"
+ "org.checkerframework:checker-qual:3.37.0",
+ "org.ow2.asm:asm:9.5"
],
- "file": "v1/https/repo1.maven.org/maven2/com/google/truth/truth/1.1.3/truth-1.1.3.jar",
+ "file": "v1/https/repo1.maven.org/maven2/com/google/truth/truth/1.1.5/truth-1.1.5.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/com/google/truth/truth/1.1.3/truth-1.1.3.jar",
- "https://repo.maven.apache.org/maven2/com/google/truth/truth/1.1.3/truth-1.1.3.jar"
+ "https://repo1.maven.org/maven2/com/google/truth/truth/1.1.5/truth-1.1.5.jar",
+ "https://repo.maven.apache.org/maven2/com/google/truth/truth/1.1.5/truth-1.1.5.jar"
],
"packages": [
"com.google.common.truth"
],
- "sha256": "fc0b67782289a2aabfddfdf99eff1dcd5edc890d49143fcd489214b107b8f4f3",
- "url": "https://repo1.maven.org/maven2/com/google/truth/truth/1.1.3/truth-1.1.3.jar"
+ "sha256": "7f6d50d6f43a102942ef2c5a05f37a84f77788bb448cf33cceebf86d34e575c0",
+ "url": "https://repo1.maven.org/maven2/com/google/truth/truth/1.1.5/truth-1.1.5.jar"
},
{
- "coord": "com.google.truth:truth:jar:sources:1.1.3",
+ "coord": "com.google.truth:truth:jar:sources:1.1.5",
"dependencies": [
- "com.google.auto.value:auto-value-annotations:jar:sources:1.10.1",
- "com.google.errorprone:error_prone_annotations:jar:sources:2.18.0",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
+ "com.google.auto.value:auto-value-annotations:jar:sources:1.10.4",
+ "com.google.errorprone:error_prone_annotations:jar:sources:2.23.0",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
"junit:junit:jar:sources:4.13.2",
- "org.checkerframework:checker-qual:jar:sources:3.33.0",
+ "org.checkerframework:checker-qual:jar:sources:3.37.0",
"org.hamcrest:hamcrest-core:jar:sources:1.3",
- "org.ow2.asm:asm:jar:sources:9.1"
+ "org.ow2.asm:asm:jar:sources:9.5"
],
"directDependencies": [
- "com.google.auto.value:auto-value-annotations:jar:sources:1.10.1",
- "com.google.errorprone:error_prone_annotations:jar:sources:2.18.0",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
+ "com.google.auto.value:auto-value-annotations:jar:sources:1.10.4",
+ "com.google.errorprone:error_prone_annotations:jar:sources:2.23.0",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
"junit:junit:jar:sources:4.13.2",
- "org.checkerframework:checker-qual:jar:sources:3.33.0",
- "org.ow2.asm:asm:jar:sources:9.1"
+ "org.checkerframework:checker-qual:jar:sources:3.37.0",
+ "org.ow2.asm:asm:jar:sources:9.5"
],
- "file": "v1/https/repo1.maven.org/maven2/com/google/truth/truth/1.1.3/truth-1.1.3-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/com/google/truth/truth/1.1.5/truth-1.1.5-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/com/google/truth/truth/1.1.3/truth-1.1.3-sources.jar",
- "https://repo.maven.apache.org/maven2/com/google/truth/truth/1.1.3/truth-1.1.3-sources.jar"
+ "https://repo1.maven.org/maven2/com/google/truth/truth/1.1.5/truth-1.1.5-sources.jar",
+ "https://repo.maven.apache.org/maven2/com/google/truth/truth/1.1.5/truth-1.1.5-sources.jar"
],
"packages": [],
- "sha256": "6c35e3d7087cd222938b41bbdb54041239b79dda07cf96c4027c118d566df545",
- "url": "https://repo1.maven.org/maven2/com/google/truth/truth/1.1.3/truth-1.1.3-sources.jar"
+ "sha256": "f1a94449ed48392525626f4e5edeff5c6e66af21c4c009ebb49b5109ac5db6b2",
+ "url": "https://repo1.maven.org/maven2/com/google/truth/truth/1.1.5/truth-1.1.5-sources.jar"
},
{
"coord": "com.squareup.okhttp:okhttp:2.7.5",
"dependencies": [
- "com.squareup.okio:okio:1.17.5"
+ "com.squareup.okio:okio:2.10.0"
],
"directDependencies": [
- "com.squareup.okio:okio:1.17.5"
+ "com.squareup.okio:okio:2.10.0"
],
"file": "v1/https/repo1.maven.org/maven2/com/squareup/okhttp/okhttp/2.7.5/okhttp-2.7.5.jar",
"mirror_urls": [
@@ -1060,10 +1046,10 @@
{
"coord": "com.squareup.okhttp:okhttp:jar:sources:2.7.5",
"dependencies": [
- "com.squareup.okio:okio:jar:sources:1.17.5"
+ "com.squareup.okio:okio:jar:sources:2.10.0"
],
"directDependencies": [
- "com.squareup.okio:okio:jar:sources:1.17.5"
+ "com.squareup.okio:okio:jar:sources:2.10.0"
],
"file": "v1/https/repo1.maven.org/maven2/com/squareup/okhttp/okhttp/2.7.5/okhttp-2.7.5-sources.jar",
"mirror_urls": [
@@ -1075,32 +1061,47 @@
"url": "https://repo1.maven.org/maven2/com/squareup/okhttp/okhttp/2.7.5/okhttp-2.7.5-sources.jar"
},
{
- "coord": "com.squareup.okio:okio:1.17.5",
- "dependencies": [],
- "directDependencies": [],
- "file": "v1/https/repo1.maven.org/maven2/com/squareup/okio/okio/1.17.5/okio-1.17.5.jar",
+ "coord": "com.squareup.okio:okio:2.10.0",
+ "dependencies": [
+ "org.jetbrains.kotlin:kotlin-stdlib-common:1.4.20",
+ "org.jetbrains.kotlin:kotlin-stdlib:1.4.20",
+ "org.jetbrains:annotations:13.0"
+ ],
+ "directDependencies": [
+ "org.jetbrains.kotlin:kotlin-stdlib-common:1.4.20",
+ "org.jetbrains.kotlin:kotlin-stdlib:1.4.20"
+ ],
+ "file": "v1/https/repo1.maven.org/maven2/com/squareup/okio/okio/2.10.0/okio-2.10.0.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/com/squareup/okio/okio/1.17.5/okio-1.17.5.jar",
- "https://repo.maven.apache.org/maven2/com/squareup/okio/okio/1.17.5/okio-1.17.5.jar"
+ "https://repo1.maven.org/maven2/com/squareup/okio/okio/2.10.0/okio-2.10.0.jar",
+ "https://repo.maven.apache.org/maven2/com/squareup/okio/okio/2.10.0/okio-2.10.0.jar"
],
"packages": [
- "okio"
+ "okio",
+ "okio.internal"
],
- "sha256": "19a7ff48d86d3cf4497f7f250fbf295f430c13a528dd5b7b203f821802b886ad",
- "url": "https://repo1.maven.org/maven2/com/squareup/okio/okio/1.17.5/okio-1.17.5.jar"
+ "sha256": "a27f091d34aa452e37227e2cfa85809f29012a8ef2501a9b5a125a978e4fcbc1",
+ "url": "https://repo1.maven.org/maven2/com/squareup/okio/okio/2.10.0/okio-2.10.0.jar"
},
{
- "coord": "com.squareup.okio:okio:jar:sources:1.17.5",
- "dependencies": [],
- "directDependencies": [],
- "file": "v1/https/repo1.maven.org/maven2/com/squareup/okio/okio/1.17.5/okio-1.17.5-sources.jar",
+ "coord": "com.squareup.okio:okio:jar:sources:2.10.0",
+ "dependencies": [
+ "org.jetbrains.kotlin:kotlin-stdlib-common:jar:sources:1.4.20",
+ "org.jetbrains.kotlin:kotlin-stdlib:jar:sources:1.4.20",
+ "org.jetbrains:annotations:jar:sources:13.0"
+ ],
+ "directDependencies": [
+ "org.jetbrains.kotlin:kotlin-stdlib-common:jar:sources:1.4.20",
+ "org.jetbrains.kotlin:kotlin-stdlib:jar:sources:1.4.20"
+ ],
+ "file": "v1/https/repo1.maven.org/maven2/com/squareup/okio/okio/2.10.0/okio-2.10.0-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/com/squareup/okio/okio/1.17.5/okio-1.17.5-sources.jar",
- "https://repo.maven.apache.org/maven2/com/squareup/okio/okio/1.17.5/okio-1.17.5-sources.jar"
+ "https://repo1.maven.org/maven2/com/squareup/okio/okio/2.10.0/okio-2.10.0-sources.jar",
+ "https://repo.maven.apache.org/maven2/com/squareup/okio/okio/2.10.0/okio-2.10.0-sources.jar"
],
"packages": [],
- "sha256": "537b41075d390d888aec040d0798211b1702d34f558efc09364b5f7d388ec496",
- "url": "https://repo1.maven.org/maven2/com/squareup/okio/okio/1.17.5/okio-1.17.5-sources.jar"
+ "sha256": "4f255d11829d7e1949be042efa70f933650a7d929e68d594c1608f97884a0535",
+ "url": "https://repo1.maven.org/maven2/com/squareup/okio/okio/2.10.0/okio-2.10.0-sources.jar"
},
{
"coord": "commons-codec:commons-codec:1.11",
@@ -1166,515 +1167,577 @@
"url": "https://repo1.maven.org/maven2/commons-logging/commons-logging/1.2/commons-logging-1.2-sources.jar"
},
{
- "coord": "io.grpc:grpc-api:1.56.0",
+ "coord": "io.grpc:grpc-api:1.64.2",
"dependencies": [
"com.google.code.findbugs:jsr305:3.0.2",
- "com.google.errorprone:error_prone_annotations:2.18.0",
- "com.google.guava:guava:32.1.1-jre",
- "io.grpc:grpc-context:1.56.0"
+ "com.google.errorprone:error_prone_annotations:2.23.0",
+ "com.google.guava:guava:32.1.3-jre"
],
"directDependencies": [
"com.google.code.findbugs:jsr305:3.0.2",
- "com.google.errorprone:error_prone_annotations:2.18.0",
- "com.google.guava:guava:32.1.1-jre",
- "io.grpc:grpc-context:1.56.0"
+ "com.google.errorprone:error_prone_annotations:2.23.0",
+ "com.google.guava:guava:32.1.3-jre"
],
- "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-api/1.56.0/grpc-api-1.56.0.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-api/1.64.2/grpc-api-1.64.2.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/grpc/grpc-api/1.56.0/grpc-api-1.56.0.jar",
- "https://repo.maven.apache.org/maven2/io/grpc/grpc-api/1.56.0/grpc-api-1.56.0.jar"
+ "https://repo1.maven.org/maven2/io/grpc/grpc-api/1.64.2/grpc-api-1.64.2.jar",
+ "https://repo.maven.apache.org/maven2/io/grpc/grpc-api/1.64.2/grpc-api-1.64.2.jar"
],
"packages": [
"io.grpc"
],
- "sha256": "fb8313443d63403d9a65b71cfe719e0e717b1abef0ede8c5e8e57beffe202093",
- "url": "https://repo1.maven.org/maven2/io/grpc/grpc-api/1.56.0/grpc-api-1.56.0.jar"
+ "sha256": "70c28a5d3d234a944048c273a59dd60291d76ffc2e94c69563120f363b9450e1",
+ "url": "https://repo1.maven.org/maven2/io/grpc/grpc-api/1.64.2/grpc-api-1.64.2.jar"
},
{
- "coord": "io.grpc:grpc-api:jar:sources:1.56.0",
+ "coord": "io.grpc:grpc-api:jar:sources:1.64.2",
"dependencies": [
"com.google.code.findbugs:jsr305:jar:sources:3.0.2",
- "com.google.errorprone:error_prone_annotations:jar:sources:2.18.0",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
- "io.grpc:grpc-context:jar:sources:1.56.0"
+ "com.google.errorprone:error_prone_annotations:jar:sources:2.23.0",
+ "com.google.guava:guava:jar:sources:32.1.3-jre"
],
"directDependencies": [
"com.google.code.findbugs:jsr305:jar:sources:3.0.2",
- "com.google.errorprone:error_prone_annotations:jar:sources:2.18.0",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
- "io.grpc:grpc-context:jar:sources:1.56.0"
+ "com.google.errorprone:error_prone_annotations:jar:sources:2.23.0",
+ "com.google.guava:guava:jar:sources:32.1.3-jre"
],
- "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-api/1.56.0/grpc-api-1.56.0-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-api/1.64.2/grpc-api-1.64.2-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/grpc/grpc-api/1.56.0/grpc-api-1.56.0-sources.jar",
- "https://repo.maven.apache.org/maven2/io/grpc/grpc-api/1.56.0/grpc-api-1.56.0-sources.jar"
+ "https://repo1.maven.org/maven2/io/grpc/grpc-api/1.64.2/grpc-api-1.64.2-sources.jar",
+ "https://repo.maven.apache.org/maven2/io/grpc/grpc-api/1.64.2/grpc-api-1.64.2-sources.jar"
],
"packages": [],
- "sha256": "72b6b1435ea930714a66230fff240b0204fceec15c7cb5b2855b0edac8151355",
- "url": "https://repo1.maven.org/maven2/io/grpc/grpc-api/1.56.0/grpc-api-1.56.0-sources.jar"
+ "sha256": "e136d7a1abfae659d4412b1269abb75c294c3fc725f340078aec13bdf881bfdb",
+ "url": "https://repo1.maven.org/maven2/io/grpc/grpc-api/1.64.2/grpc-api-1.64.2-sources.jar"
},
{
- "coord": "io.grpc:grpc-context:1.56.0",
- "dependencies": [],
- "directDependencies": [],
- "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-context/1.56.0/grpc-context-1.56.0.jar",
- "mirror_urls": [
- "https://repo1.maven.org/maven2/io/grpc/grpc-context/1.56.0/grpc-context-1.56.0.jar",
- "https://repo.maven.apache.org/maven2/io/grpc/grpc-context/1.56.0/grpc-context-1.56.0.jar"
+ "coord": "io.grpc:grpc-context:1.64.2",
+ "dependencies": [
+ "io.grpc:grpc-api:1.64.2"
],
- "packages": [
- "io.grpc"
+ "directDependencies": [
+ "io.grpc:grpc-api:1.64.2"
],
- "sha256": "1196993f219024c0a51f45f2bd2f448e5b6646292beb76e8fb6aae498e6f393b",
- "url": "https://repo1.maven.org/maven2/io/grpc/grpc-context/1.56.0/grpc-context-1.56.0.jar"
+ "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-context/1.64.2/grpc-context-1.64.2.jar",
+ "mirror_urls": [
+ "https://repo1.maven.org/maven2/io/grpc/grpc-context/1.64.2/grpc-context-1.64.2.jar",
+ "https://repo.maven.apache.org/maven2/io/grpc/grpc-context/1.64.2/grpc-context-1.64.2.jar"
+ ],
+ "packages": [],
+ "sha256": "62f4cf33bfd1985f981bc266e0a8cf3fd09de8a037de6df8a5f5758963d687b2",
+ "url": "https://repo1.maven.org/maven2/io/grpc/grpc-context/1.64.2/grpc-context-1.64.2.jar"
},
{
- "coord": "io.grpc:grpc-context:jar:sources:1.56.0",
- "dependencies": [],
- "directDependencies": [],
- "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-context/1.56.0/grpc-context-1.56.0-sources.jar",
+ "coord": "io.grpc:grpc-context:jar:sources:1.64.2",
+ "dependencies": [
+ "io.grpc:grpc-api:jar:sources:1.64.2"
+ ],
+ "directDependencies": [
+ "io.grpc:grpc-api:jar:sources:1.64.2"
+ ],
+ "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-context/1.64.2/grpc-context-1.64.2-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/grpc/grpc-context/1.56.0/grpc-context-1.56.0-sources.jar",
- "https://repo.maven.apache.org/maven2/io/grpc/grpc-context/1.56.0/grpc-context-1.56.0-sources.jar"
+ "https://repo1.maven.org/maven2/io/grpc/grpc-context/1.64.2/grpc-context-1.64.2-sources.jar",
+ "https://repo.maven.apache.org/maven2/io/grpc/grpc-context/1.64.2/grpc-context-1.64.2-sources.jar"
],
"packages": [],
- "sha256": "347033e2f3b22c5f6adcd16a6d9e91918a300a7a62544f001e5aae5d1758f627",
- "url": "https://repo1.maven.org/maven2/io/grpc/grpc-context/1.56.0/grpc-context-1.56.0-sources.jar"
+ "sha256": "9ec2e97750ee298e990005f4e3baf38231f86759a71ae80ff2259cb0831f45a9",
+ "url": "https://repo1.maven.org/maven2/io/grpc/grpc-context/1.64.2/grpc-context-1.64.2-sources.jar"
},
{
- "coord": "io.grpc:grpc-core:1.56.0",
+ "coord": "io.grpc:grpc-core:1.64.2",
"dependencies": [
"com.google.android:annotations:4.1.1.4",
"com.google.code.findbugs:jsr305:3.0.2",
"com.google.code.gson:gson:2.10.1",
- "com.google.errorprone:error_prone_annotations:2.18.0",
- "com.google.guava:guava:32.1.1-jre",
- "io.grpc:grpc-api:1.56.0",
- "io.grpc:grpc-context:1.56.0",
+ "com.google.errorprone:error_prone_annotations:2.23.0",
+ "com.google.guava:guava:32.1.3-jre",
+ "io.grpc:grpc-api:1.64.2",
+ "io.grpc:grpc-context:1.64.2",
"io.perfmark:perfmark-api:0.26.0",
"org.codehaus.mojo:animal-sniffer-annotations:1.23"
],
"directDependencies": [
"com.google.android:annotations:4.1.1.4",
"com.google.code.gson:gson:2.10.1",
- "com.google.errorprone:error_prone_annotations:2.18.0",
- "com.google.guava:guava:32.1.1-jre",
- "io.grpc:grpc-api:1.56.0",
+ "com.google.errorprone:error_prone_annotations:2.23.0",
+ "com.google.guava:guava:32.1.3-jre",
+ "io.grpc:grpc-api:1.64.2",
+ "io.grpc:grpc-context:1.64.2",
"io.perfmark:perfmark-api:0.26.0",
"org.codehaus.mojo:animal-sniffer-annotations:1.23"
],
- "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-core/1.56.0/grpc-core-1.56.0.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-core/1.64.2/grpc-core-1.64.2.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/grpc/grpc-core/1.56.0/grpc-core-1.56.0.jar",
- "https://repo.maven.apache.org/maven2/io/grpc/grpc-core/1.56.0/grpc-core-1.56.0.jar"
+ "https://repo1.maven.org/maven2/io/grpc/grpc-core/1.64.2/grpc-core-1.64.2.jar",
+ "https://repo.maven.apache.org/maven2/io/grpc/grpc-core/1.64.2/grpc-core-1.64.2.jar"
],
"packages": [
- "io.grpc.inprocess",
- "io.grpc.internal",
- "io.grpc.util"
+ "io.grpc.internal"
],
- "sha256": "0f1ac3bc378416d0b0f80bf9029808f978faf35fd2022ad2d8bdd6276e6ea53e",
- "url": "https://repo1.maven.org/maven2/io/grpc/grpc-core/1.56.0/grpc-core-1.56.0.jar"
+ "sha256": "c2a3ad4e6c872ac24e53618337fc0d735231fc9e1f3db1e5fe8f20c5e66dfdbc",
+ "url": "https://repo1.maven.org/maven2/io/grpc/grpc-core/1.64.2/grpc-core-1.64.2.jar"
},
{
- "coord": "io.grpc:grpc-core:jar:sources:1.56.0",
+ "coord": "io.grpc:grpc-core:jar:sources:1.64.2",
"dependencies": [
"com.google.android:annotations:jar:sources:4.1.1.4",
"com.google.code.findbugs:jsr305:jar:sources:3.0.2",
"com.google.code.gson:gson:jar:sources:2.10.1",
- "com.google.errorprone:error_prone_annotations:jar:sources:2.18.0",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
- "io.grpc:grpc-api:jar:sources:1.56.0",
- "io.grpc:grpc-context:jar:sources:1.56.0",
+ "com.google.errorprone:error_prone_annotations:jar:sources:2.23.0",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
+ "io.grpc:grpc-api:jar:sources:1.64.2",
+ "io.grpc:grpc-context:jar:sources:1.64.2",
"io.perfmark:perfmark-api:jar:sources:0.26.0",
"org.codehaus.mojo:animal-sniffer-annotations:jar:sources:1.23"
],
"directDependencies": [
"com.google.android:annotations:jar:sources:4.1.1.4",
"com.google.code.gson:gson:jar:sources:2.10.1",
- "com.google.errorprone:error_prone_annotations:jar:sources:2.18.0",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
- "io.grpc:grpc-api:jar:sources:1.56.0",
+ "com.google.errorprone:error_prone_annotations:jar:sources:2.23.0",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
+ "io.grpc:grpc-api:jar:sources:1.64.2",
+ "io.grpc:grpc-context:jar:sources:1.64.2",
"io.perfmark:perfmark-api:jar:sources:0.26.0",
"org.codehaus.mojo:animal-sniffer-annotations:jar:sources:1.23"
],
- "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-core/1.56.0/grpc-core-1.56.0-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-core/1.64.2/grpc-core-1.64.2-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/grpc/grpc-core/1.56.0/grpc-core-1.56.0-sources.jar",
- "https://repo.maven.apache.org/maven2/io/grpc/grpc-core/1.56.0/grpc-core-1.56.0-sources.jar"
+ "https://repo1.maven.org/maven2/io/grpc/grpc-core/1.64.2/grpc-core-1.64.2-sources.jar",
+ "https://repo.maven.apache.org/maven2/io/grpc/grpc-core/1.64.2/grpc-core-1.64.2-sources.jar"
],
"packages": [],
- "sha256": "64a2ca99c07e8aee11b7708ca93445a4ed656bcb122cd5a9b5da20ff66e25dcb",
- "url": "https://repo1.maven.org/maven2/io/grpc/grpc-core/1.56.0/grpc-core-1.56.0-sources.jar"
+ "sha256": "8b83bc55504668f6b945083ec1d527af8f662be9d93358785dd5fc9982ed9886",
+ "url": "https://repo1.maven.org/maven2/io/grpc/grpc-core/1.64.2/grpc-core-1.64.2-sources.jar"
},
{
- "coord": "io.grpc:grpc-netty:1.56.0",
+ "coord": "io.grpc:grpc-netty:1.64.2",
"dependencies": [
"com.google.android:annotations:4.1.1.4",
"com.google.code.findbugs:jsr305:3.0.2",
"com.google.code.gson:gson:2.10.1",
- "com.google.errorprone:error_prone_annotations:2.18.0",
- "com.google.guava:guava:32.1.1-jre",
- "io.grpc:grpc-api:1.56.0",
- "io.grpc:grpc-context:1.56.0",
- "io.grpc:grpc-core:1.56.0",
- "io.netty:netty-buffer:4.1.87.Final",
- "io.netty:netty-codec-http2:4.1.87.Final",
- "io.netty:netty-codec-http:4.1.87.Final",
- "io.netty:netty-codec-socks:4.1.87.Final",
- "io.netty:netty-codec:4.1.87.Final",
- "io.netty:netty-common:4.1.87.Final",
- "io.netty:netty-handler-proxy:4.1.87.Final",
- "io.netty:netty-handler:4.1.87.Final",
- "io.netty:netty-resolver:4.1.87.Final",
- "io.netty:netty-transport-native-unix-common:4.1.87.Final",
- "io.netty:netty-transport:4.1.87.Final",
+ "com.google.errorprone:error_prone_annotations:2.23.0",
+ "com.google.guava:guava:32.1.3-jre",
+ "io.grpc:grpc-api:1.64.2",
+ "io.grpc:grpc-context:1.64.2",
+ "io.grpc:grpc-core:1.64.2",
+ "io.grpc:grpc-util:1.64.2",
+ "io.netty:netty-buffer:4.1.100.Final",
+ "io.netty:netty-codec-http2:4.1.100.Final",
+ "io.netty:netty-codec-http:4.1.100.Final",
+ "io.netty:netty-codec-socks:4.1.100.Final",
+ "io.netty:netty-codec:4.1.100.Final",
+ "io.netty:netty-common:4.1.100.Final",
+ "io.netty:netty-handler-proxy:4.1.100.Final",
+ "io.netty:netty-handler:4.1.100.Final",
+ "io.netty:netty-resolver:4.1.100.Final",
+ "io.netty:netty-transport-native-unix-common:4.1.100.Final",
+ "io.netty:netty-transport:4.1.100.Final",
"io.perfmark:perfmark-api:0.26.0",
"org.codehaus.mojo:animal-sniffer-annotations:1.23"
],
"directDependencies": [
- "com.google.errorprone:error_prone_annotations:2.18.0",
- "com.google.guava:guava:32.1.1-jre",
- "io.grpc:grpc-core:1.56.0",
- "io.netty:netty-codec-http2:4.1.87.Final",
- "io.netty:netty-handler-proxy:4.1.87.Final",
- "io.netty:netty-transport-native-unix-common:4.1.87.Final",
+ "com.google.errorprone:error_prone_annotations:2.23.0",
+ "com.google.guava:guava:32.1.3-jre",
+ "io.grpc:grpc-api:1.64.2",
+ "io.grpc:grpc-core:1.64.2",
+ "io.grpc:grpc-util:1.64.2",
+ "io.netty:netty-codec-http2:4.1.100.Final",
+ "io.netty:netty-handler-proxy:4.1.100.Final",
+ "io.netty:netty-transport-native-unix-common:4.1.100.Final",
"io.perfmark:perfmark-api:0.26.0"
],
- "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-netty/1.56.0/grpc-netty-1.56.0.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-netty/1.64.2/grpc-netty-1.64.2.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/grpc/grpc-netty/1.56.0/grpc-netty-1.56.0.jar",
- "https://repo.maven.apache.org/maven2/io/grpc/grpc-netty/1.56.0/grpc-netty-1.56.0.jar"
+ "https://repo1.maven.org/maven2/io/grpc/grpc-netty/1.64.2/grpc-netty-1.64.2.jar",
+ "https://repo.maven.apache.org/maven2/io/grpc/grpc-netty/1.64.2/grpc-netty-1.64.2.jar"
],
"packages": [
"io.grpc.netty"
],
- "sha256": "87dccfa1703e72b7aba0e0f5aac7f4e4c8c342e2faf82cccabd7b8ae9217f6ef",
- "url": "https://repo1.maven.org/maven2/io/grpc/grpc-netty/1.56.0/grpc-netty-1.56.0.jar"
+ "sha256": "40da4d27d954299a3340574ad4c29320bbba99393115e7185283669693d8a60c",
+ "url": "https://repo1.maven.org/maven2/io/grpc/grpc-netty/1.64.2/grpc-netty-1.64.2.jar"
},
{
- "coord": "io.grpc:grpc-netty:jar:sources:1.56.0",
+ "coord": "io.grpc:grpc-netty:jar:sources:1.64.2",
"dependencies": [
"com.google.android:annotations:jar:sources:4.1.1.4",
"com.google.code.findbugs:jsr305:jar:sources:3.0.2",
"com.google.code.gson:gson:jar:sources:2.10.1",
- "com.google.errorprone:error_prone_annotations:jar:sources:2.18.0",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
- "io.grpc:grpc-api:jar:sources:1.56.0",
- "io.grpc:grpc-context:jar:sources:1.56.0",
- "io.grpc:grpc-core:jar:sources:1.56.0",
- "io.netty:netty-buffer:jar:sources:4.1.87.Final",
- "io.netty:netty-codec-http2:jar:sources:4.1.87.Final",
- "io.netty:netty-codec-http:jar:sources:4.1.87.Final",
- "io.netty:netty-codec-socks:jar:sources:4.1.87.Final",
- "io.netty:netty-codec:jar:sources:4.1.87.Final",
- "io.netty:netty-common:jar:sources:4.1.87.Final",
- "io.netty:netty-handler-proxy:jar:sources:4.1.87.Final",
- "io.netty:netty-handler:jar:sources:4.1.87.Final",
- "io.netty:netty-resolver:jar:sources:4.1.87.Final",
- "io.netty:netty-transport-native-unix-common:jar:sources:4.1.87.Final",
- "io.netty:netty-transport:jar:sources:4.1.87.Final",
+ "com.google.errorprone:error_prone_annotations:jar:sources:2.23.0",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
+ "io.grpc:grpc-api:jar:sources:1.64.2",
+ "io.grpc:grpc-context:jar:sources:1.64.2",
+ "io.grpc:grpc-core:jar:sources:1.64.2",
+ "io.grpc:grpc-util:jar:sources:1.64.2",
+ "io.netty:netty-buffer:jar:sources:4.1.100.Final",
+ "io.netty:netty-codec-http2:jar:sources:4.1.100.Final",
+ "io.netty:netty-codec-http:jar:sources:4.1.100.Final",
+ "io.netty:netty-codec-socks:jar:sources:4.1.100.Final",
+ "io.netty:netty-codec:jar:sources:4.1.100.Final",
+ "io.netty:netty-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-handler-proxy:jar:sources:4.1.100.Final",
+ "io.netty:netty-handler:jar:sources:4.1.100.Final",
+ "io.netty:netty-resolver:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport-native-unix-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport:jar:sources:4.1.100.Final",
"io.perfmark:perfmark-api:jar:sources:0.26.0",
"org.codehaus.mojo:animal-sniffer-annotations:jar:sources:1.23"
],
"directDependencies": [
- "com.google.errorprone:error_prone_annotations:jar:sources:2.18.0",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
- "io.grpc:grpc-core:jar:sources:1.56.0",
- "io.netty:netty-codec-http2:jar:sources:4.1.87.Final",
- "io.netty:netty-handler-proxy:jar:sources:4.1.87.Final",
- "io.netty:netty-transport-native-unix-common:jar:sources:4.1.87.Final",
+ "com.google.errorprone:error_prone_annotations:jar:sources:2.23.0",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
+ "io.grpc:grpc-api:jar:sources:1.64.2",
+ "io.grpc:grpc-core:jar:sources:1.64.2",
+ "io.grpc:grpc-util:jar:sources:1.64.2",
+ "io.netty:netty-codec-http2:jar:sources:4.1.100.Final",
+ "io.netty:netty-handler-proxy:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport-native-unix-common:jar:sources:4.1.100.Final",
"io.perfmark:perfmark-api:jar:sources:0.26.0"
],
- "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-netty/1.56.0/grpc-netty-1.56.0-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-netty/1.64.2/grpc-netty-1.64.2-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/grpc/grpc-netty/1.56.0/grpc-netty-1.56.0-sources.jar",
- "https://repo.maven.apache.org/maven2/io/grpc/grpc-netty/1.56.0/grpc-netty-1.56.0-sources.jar"
+ "https://repo1.maven.org/maven2/io/grpc/grpc-netty/1.64.2/grpc-netty-1.64.2-sources.jar",
+ "https://repo.maven.apache.org/maven2/io/grpc/grpc-netty/1.64.2/grpc-netty-1.64.2-sources.jar"
],
"packages": [],
- "sha256": "012a2256cc94d5e4ebae65cfb68022b70099faec26690fcc664aedbb245cf122",
- "url": "https://repo1.maven.org/maven2/io/grpc/grpc-netty/1.56.0/grpc-netty-1.56.0-sources.jar"
+ "sha256": "cf1f131c7fdb756f26262502870ab131c19d663c1f807df8339f43f604800164",
+ "url": "https://repo1.maven.org/maven2/io/grpc/grpc-netty/1.64.2/grpc-netty-1.64.2-sources.jar"
},
{
- "coord": "io.grpc:grpc-protobuf-lite:1.56.0",
+ "coord": "io.grpc:grpc-protobuf-lite:1.64.2",
"dependencies": [
"com.google.code.findbugs:jsr305:3.0.2",
- "com.google.errorprone:error_prone_annotations:2.18.0",
- "com.google.guava:guava:32.1.1-jre",
- "com.google.protobuf:protobuf-javalite:3.22.3",
- "io.grpc:grpc-api:1.56.0",
- "io.grpc:grpc-context:1.56.0"
+ "com.google.errorprone:error_prone_annotations:2.23.0",
+ "com.google.guava:guava:32.1.3-jre",
+ "com.google.protobuf:protobuf-javalite:3.25.1",
+ "io.grpc:grpc-api:1.64.2"
],
"directDependencies": [
"com.google.code.findbugs:jsr305:3.0.2",
- "com.google.guava:guava:32.1.1-jre",
- "com.google.protobuf:protobuf-javalite:3.22.3",
- "io.grpc:grpc-api:1.56.0"
+ "com.google.guava:guava:32.1.3-jre",
+ "com.google.protobuf:protobuf-javalite:3.25.1",
+ "io.grpc:grpc-api:1.64.2"
],
- "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-protobuf-lite/1.56.0/grpc-protobuf-lite-1.56.0.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-protobuf-lite/1.64.2/grpc-protobuf-lite-1.64.2.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/grpc/grpc-protobuf-lite/1.56.0/grpc-protobuf-lite-1.56.0.jar",
- "https://repo.maven.apache.org/maven2/io/grpc/grpc-protobuf-lite/1.56.0/grpc-protobuf-lite-1.56.0.jar"
+ "https://repo1.maven.org/maven2/io/grpc/grpc-protobuf-lite/1.64.2/grpc-protobuf-lite-1.64.2.jar",
+ "https://repo.maven.apache.org/maven2/io/grpc/grpc-protobuf-lite/1.64.2/grpc-protobuf-lite-1.64.2.jar"
],
"packages": [
"io.grpc.protobuf.lite"
],
- "sha256": "f35464edcde6a4a07b59b5c7de9e1422536652d8002f6cb2d0ece7117391176d",
- "url": "https://repo1.maven.org/maven2/io/grpc/grpc-protobuf-lite/1.56.0/grpc-protobuf-lite-1.56.0.jar"
+ "sha256": "b276d946abdc08a3c3721c2d6a6159d78691d712c9c8283842675367e73f5193",
+ "url": "https://repo1.maven.org/maven2/io/grpc/grpc-protobuf-lite/1.64.2/grpc-protobuf-lite-1.64.2.jar"
},
{
- "coord": "io.grpc:grpc-protobuf-lite:jar:sources:1.56.0",
+ "coord": "io.grpc:grpc-protobuf-lite:jar:sources:1.64.2",
"dependencies": [
"com.google.code.findbugs:jsr305:jar:sources:3.0.2",
- "com.google.errorprone:error_prone_annotations:jar:sources:2.18.0",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
- "com.google.protobuf:protobuf-javalite:jar:sources:3.22.3",
- "io.grpc:grpc-api:jar:sources:1.56.0",
- "io.grpc:grpc-context:jar:sources:1.56.0"
+ "com.google.errorprone:error_prone_annotations:jar:sources:2.23.0",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
+ "com.google.protobuf:protobuf-javalite:jar:sources:3.25.1",
+ "io.grpc:grpc-api:jar:sources:1.64.2"
],
"directDependencies": [
"com.google.code.findbugs:jsr305:jar:sources:3.0.2",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
- "com.google.protobuf:protobuf-javalite:jar:sources:3.22.3",
- "io.grpc:grpc-api:jar:sources:1.56.0"
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
+ "com.google.protobuf:protobuf-javalite:jar:sources:3.25.1",
+ "io.grpc:grpc-api:jar:sources:1.64.2"
],
- "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-protobuf-lite/1.56.0/grpc-protobuf-lite-1.56.0-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-protobuf-lite/1.64.2/grpc-protobuf-lite-1.64.2-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/grpc/grpc-protobuf-lite/1.56.0/grpc-protobuf-lite-1.56.0-sources.jar",
- "https://repo.maven.apache.org/maven2/io/grpc/grpc-protobuf-lite/1.56.0/grpc-protobuf-lite-1.56.0-sources.jar"
+ "https://repo1.maven.org/maven2/io/grpc/grpc-protobuf-lite/1.64.2/grpc-protobuf-lite-1.64.2-sources.jar",
+ "https://repo.maven.apache.org/maven2/io/grpc/grpc-protobuf-lite/1.64.2/grpc-protobuf-lite-1.64.2-sources.jar"
],
"packages": [],
- "sha256": "5ac4311f8abff02719686a7dc1faa1d452bdbaa25a14cc7967ff5845128cebf1",
- "url": "https://repo1.maven.org/maven2/io/grpc/grpc-protobuf-lite/1.56.0/grpc-protobuf-lite-1.56.0-sources.jar"
+ "sha256": "babbb3185f3a5984ed73878466d58d72d5d52cb80ba4888358a326e9d9816314",
+ "url": "https://repo1.maven.org/maven2/io/grpc/grpc-protobuf-lite/1.64.2/grpc-protobuf-lite-1.64.2-sources.jar"
},
{
- "coord": "io.grpc:grpc-protobuf:1.56.0",
+ "coord": "io.grpc:grpc-protobuf:1.64.2",
"dependencies": [
- "com.google.api.grpc:proto-google-common-protos:2.21.0",
+ "com.google.api.grpc:proto-google-common-protos:2.29.0",
"com.google.code.findbugs:jsr305:3.0.2",
- "com.google.errorprone:error_prone_annotations:2.18.0",
- "com.google.guava:guava:32.1.1-jre",
- "com.google.protobuf:protobuf-java:3.23.2",
- "io.grpc:grpc-api:1.56.0",
- "io.grpc:grpc-context:1.56.0",
- "io.grpc:grpc-protobuf-lite:1.56.0"
+ "com.google.errorprone:error_prone_annotations:2.23.0",
+ "com.google.guava:guava:32.1.3-jre",
+ "com.google.protobuf:protobuf-java:3.25.1",
+ "io.grpc:grpc-api:1.64.2",
+ "io.grpc:grpc-protobuf-lite:1.64.2"
],
"directDependencies": [
- "com.google.api.grpc:proto-google-common-protos:2.21.0",
+ "com.google.api.grpc:proto-google-common-protos:2.29.0",
"com.google.code.findbugs:jsr305:3.0.2",
- "com.google.guava:guava:32.1.1-jre",
- "com.google.protobuf:protobuf-java:3.23.2",
- "io.grpc:grpc-api:1.56.0",
- "io.grpc:grpc-protobuf-lite:1.56.0"
+ "com.google.guava:guava:32.1.3-jre",
+ "com.google.protobuf:protobuf-java:3.25.1",
+ "io.grpc:grpc-api:1.64.2",
+ "io.grpc:grpc-protobuf-lite:1.64.2"
],
- "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-protobuf/1.56.0/grpc-protobuf-1.56.0.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-protobuf/1.64.2/grpc-protobuf-1.64.2.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/grpc/grpc-protobuf/1.56.0/grpc-protobuf-1.56.0.jar",
- "https://repo.maven.apache.org/maven2/io/grpc/grpc-protobuf/1.56.0/grpc-protobuf-1.56.0.jar"
+ "https://repo1.maven.org/maven2/io/grpc/grpc-protobuf/1.64.2/grpc-protobuf-1.64.2.jar",
+ "https://repo.maven.apache.org/maven2/io/grpc/grpc-protobuf/1.64.2/grpc-protobuf-1.64.2.jar"
],
"packages": [
"io.grpc.protobuf"
],
- "sha256": "d5f2e33345b19a74815858b1bd8679943a1d4c5faed88e9ff792e74875e23966",
- "url": "https://repo1.maven.org/maven2/io/grpc/grpc-protobuf/1.56.0/grpc-protobuf-1.56.0.jar"
+ "sha256": "308cb8aa5975159454dc2cee3dc3572286ed7e1dc302ff821b817651c5b7e4f6",
+ "url": "https://repo1.maven.org/maven2/io/grpc/grpc-protobuf/1.64.2/grpc-protobuf-1.64.2.jar"
},
{
- "coord": "io.grpc:grpc-protobuf:jar:sources:1.56.0",
+ "coord": "io.grpc:grpc-protobuf:jar:sources:1.64.2",
"dependencies": [
- "com.google.api.grpc:proto-google-common-protos:jar:sources:2.21.0",
+ "com.google.api.grpc:proto-google-common-protos:jar:sources:2.29.0",
"com.google.code.findbugs:jsr305:jar:sources:3.0.2",
- "com.google.errorprone:error_prone_annotations:jar:sources:2.18.0",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
- "com.google.protobuf:protobuf-java:jar:sources:3.23.2",
- "io.grpc:grpc-api:jar:sources:1.56.0",
- "io.grpc:grpc-context:jar:sources:1.56.0",
- "io.grpc:grpc-protobuf-lite:jar:sources:1.56.0"
+ "com.google.errorprone:error_prone_annotations:jar:sources:2.23.0",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
+ "com.google.protobuf:protobuf-java:jar:sources:3.25.1",
+ "io.grpc:grpc-api:jar:sources:1.64.2",
+ "io.grpc:grpc-protobuf-lite:jar:sources:1.64.2"
],
"directDependencies": [
- "com.google.api.grpc:proto-google-common-protos:jar:sources:2.21.0",
+ "com.google.api.grpc:proto-google-common-protos:jar:sources:2.29.0",
"com.google.code.findbugs:jsr305:jar:sources:3.0.2",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
- "com.google.protobuf:protobuf-java:jar:sources:3.23.2",
- "io.grpc:grpc-api:jar:sources:1.56.0",
- "io.grpc:grpc-protobuf-lite:jar:sources:1.56.0"
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
+ "com.google.protobuf:protobuf-java:jar:sources:3.25.1",
+ "io.grpc:grpc-api:jar:sources:1.64.2",
+ "io.grpc:grpc-protobuf-lite:jar:sources:1.64.2"
],
- "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-protobuf/1.56.0/grpc-protobuf-1.56.0-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-protobuf/1.64.2/grpc-protobuf-1.64.2-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/grpc/grpc-protobuf/1.56.0/grpc-protobuf-1.56.0-sources.jar",
- "https://repo.maven.apache.org/maven2/io/grpc/grpc-protobuf/1.56.0/grpc-protobuf-1.56.0-sources.jar"
+ "https://repo1.maven.org/maven2/io/grpc/grpc-protobuf/1.64.2/grpc-protobuf-1.64.2-sources.jar",
+ "https://repo.maven.apache.org/maven2/io/grpc/grpc-protobuf/1.64.2/grpc-protobuf-1.64.2-sources.jar"
],
"packages": [],
- "sha256": "bbf4b075755843c0db7f3d3b37c4f0f73698c6ea58e0930f5edfd65c88d81ff3",
- "url": "https://repo1.maven.org/maven2/io/grpc/grpc-protobuf/1.56.0/grpc-protobuf-1.56.0-sources.jar"
+ "sha256": "b26d604310a2ca7bca79540ddf2f99cc31d43ed5fb0e71a169946a2e4a65fd2a",
+ "url": "https://repo1.maven.org/maven2/io/grpc/grpc-protobuf/1.64.2/grpc-protobuf-1.64.2-sources.jar"
},
{
- "coord": "io.grpc:grpc-stub:1.56.0",
+ "coord": "io.grpc:grpc-stub:1.64.2",
"dependencies": [
"com.google.code.findbugs:jsr305:3.0.2",
- "com.google.errorprone:error_prone_annotations:2.18.0",
- "com.google.guava:guava:32.1.1-jre",
- "io.grpc:grpc-api:1.56.0",
- "io.grpc:grpc-context:1.56.0"
+ "com.google.errorprone:error_prone_annotations:2.23.0",
+ "com.google.guava:guava:32.1.3-jre",
+ "io.grpc:grpc-api:1.64.2"
],
"directDependencies": [
- "com.google.errorprone:error_prone_annotations:2.18.0",
- "com.google.guava:guava:32.1.1-jre",
- "io.grpc:grpc-api:1.56.0"
+ "com.google.errorprone:error_prone_annotations:2.23.0",
+ "com.google.guava:guava:32.1.3-jre",
+ "io.grpc:grpc-api:1.64.2"
],
- "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-stub/1.56.0/grpc-stub-1.56.0.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-stub/1.64.2/grpc-stub-1.64.2.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/grpc/grpc-stub/1.56.0/grpc-stub-1.56.0.jar",
- "https://repo.maven.apache.org/maven2/io/grpc/grpc-stub/1.56.0/grpc-stub-1.56.0.jar"
+ "https://repo1.maven.org/maven2/io/grpc/grpc-stub/1.64.2/grpc-stub-1.64.2.jar",
+ "https://repo.maven.apache.org/maven2/io/grpc/grpc-stub/1.64.2/grpc-stub-1.64.2.jar"
],
"packages": [
"io.grpc.stub",
"io.grpc.stub.annotations"
],
- "sha256": "4808c5f63233b2df2c2fd2ce14b8384ce9b0a367f7842e93b1e5b9bb342cdb0c",
- "url": "https://repo1.maven.org/maven2/io/grpc/grpc-stub/1.56.0/grpc-stub-1.56.0.jar"
+ "sha256": "c5a7b13768c2d94ecdac3d25594f86dd9d471358bce0e96f436d23af30cd250e",
+ "url": "https://repo1.maven.org/maven2/io/grpc/grpc-stub/1.64.2/grpc-stub-1.64.2.jar"
},
{
- "coord": "io.grpc:grpc-stub:jar:sources:1.56.0",
+ "coord": "io.grpc:grpc-stub:jar:sources:1.64.2",
"dependencies": [
"com.google.code.findbugs:jsr305:jar:sources:3.0.2",
- "com.google.errorprone:error_prone_annotations:jar:sources:2.18.0",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
- "io.grpc:grpc-api:jar:sources:1.56.0",
- "io.grpc:grpc-context:jar:sources:1.56.0"
+ "com.google.errorprone:error_prone_annotations:jar:sources:2.23.0",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
+ "io.grpc:grpc-api:jar:sources:1.64.2"
],
"directDependencies": [
- "com.google.errorprone:error_prone_annotations:jar:sources:2.18.0",
- "com.google.guava:guava:jar:sources:32.1.1-jre",
- "io.grpc:grpc-api:jar:sources:1.56.0"
+ "com.google.errorprone:error_prone_annotations:jar:sources:2.23.0",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
+ "io.grpc:grpc-api:jar:sources:1.64.2"
],
- "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-stub/1.56.0/grpc-stub-1.56.0-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-stub/1.64.2/grpc-stub-1.64.2-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/grpc/grpc-stub/1.56.0/grpc-stub-1.56.0-sources.jar",
- "https://repo.maven.apache.org/maven2/io/grpc/grpc-stub/1.56.0/grpc-stub-1.56.0-sources.jar"
+ "https://repo1.maven.org/maven2/io/grpc/grpc-stub/1.64.2/grpc-stub-1.64.2-sources.jar",
+ "https://repo.maven.apache.org/maven2/io/grpc/grpc-stub/1.64.2/grpc-stub-1.64.2-sources.jar"
],
"packages": [],
- "sha256": "0e6e4641815ae8bd2f1da183c69f4027d83e7e101045773185036dac54fb448a",
- "url": "https://repo1.maven.org/maven2/io/grpc/grpc-stub/1.56.0/grpc-stub-1.56.0-sources.jar"
+ "sha256": "bdd76b1804f2ec994bfbb8ecc5beccec9cfaa1c8f6cbf489da59673ba139805a",
+ "url": "https://repo1.maven.org/maven2/io/grpc/grpc-stub/1.64.2/grpc-stub-1.64.2-sources.jar"
},
{
- "coord": "io.netty:netty-buffer:4.1.87.Final",
+ "coord": "io.grpc:grpc-util:1.64.2",
"dependencies": [
- "io.netty:netty-common:4.1.87.Final"
+ "com.google.android:annotations:4.1.1.4",
+ "com.google.code.findbugs:jsr305:3.0.2",
+ "com.google.code.gson:gson:2.10.1",
+ "com.google.errorprone:error_prone_annotations:2.23.0",
+ "com.google.guava:guava:32.1.3-jre",
+ "io.grpc:grpc-api:1.64.2",
+ "io.grpc:grpc-context:1.64.2",
+ "io.grpc:grpc-core:1.64.2",
+ "io.perfmark:perfmark-api:0.26.0",
+ "org.codehaus.mojo:animal-sniffer-annotations:1.23"
],
"directDependencies": [
- "io.netty:netty-common:4.1.87.Final"
+ "com.google.guava:guava:32.1.3-jre",
+ "io.grpc:grpc-api:1.64.2",
+ "io.grpc:grpc-core:1.64.2",
+ "org.codehaus.mojo:animal-sniffer-annotations:1.23"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-buffer/4.1.87.Final/netty-buffer-4.1.87.Final.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-util/1.64.2/grpc-util-1.64.2.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-buffer/4.1.87.Final/netty-buffer-4.1.87.Final.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-buffer/4.1.87.Final/netty-buffer-4.1.87.Final.jar"
+ "https://repo1.maven.org/maven2/io/grpc/grpc-util/1.64.2/grpc-util-1.64.2.jar",
+ "https://repo.maven.apache.org/maven2/io/grpc/grpc-util/1.64.2/grpc-util-1.64.2.jar"
+ ],
+ "packages": [
+ "io.grpc.util"
+ ],
+ "sha256": "c0bec8da44130c3cd808ecc59fa69096169a2dd71904426f1172ce6e755e45ef",
+ "url": "https://repo1.maven.org/maven2/io/grpc/grpc-util/1.64.2/grpc-util-1.64.2.jar"
+ },
+ {
+ "coord": "io.grpc:grpc-util:jar:sources:1.64.2",
+ "dependencies": [
+ "com.google.android:annotations:jar:sources:4.1.1.4",
+ "com.google.code.findbugs:jsr305:jar:sources:3.0.2",
+ "com.google.code.gson:gson:jar:sources:2.10.1",
+ "com.google.errorprone:error_prone_annotations:jar:sources:2.23.0",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
+ "io.grpc:grpc-api:jar:sources:1.64.2",
+ "io.grpc:grpc-context:jar:sources:1.64.2",
+ "io.grpc:grpc-core:jar:sources:1.64.2",
+ "io.perfmark:perfmark-api:jar:sources:0.26.0",
+ "org.codehaus.mojo:animal-sniffer-annotations:jar:sources:1.23"
+ ],
+ "directDependencies": [
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
+ "io.grpc:grpc-api:jar:sources:1.64.2",
+ "io.grpc:grpc-core:jar:sources:1.64.2",
+ "org.codehaus.mojo:animal-sniffer-annotations:jar:sources:1.23"
+ ],
+ "file": "v1/https/repo1.maven.org/maven2/io/grpc/grpc-util/1.64.2/grpc-util-1.64.2-sources.jar",
+ "mirror_urls": [
+ "https://repo1.maven.org/maven2/io/grpc/grpc-util/1.64.2/grpc-util-1.64.2-sources.jar",
+ "https://repo.maven.apache.org/maven2/io/grpc/grpc-util/1.64.2/grpc-util-1.64.2-sources.jar"
+ ],
+ "packages": [],
+ "sha256": "c3ff01030947ed719dc754bad620e2608bb999e09db8a8db3ad7a8d232e4e370",
+ "url": "https://repo1.maven.org/maven2/io/grpc/grpc-util/1.64.2/grpc-util-1.64.2-sources.jar"
+ },
+ {
+ "coord": "io.netty:netty-buffer:4.1.100.Final",
+ "dependencies": [
+ "io.netty:netty-common:4.1.100.Final"
+ ],
+ "directDependencies": [
+ "io.netty:netty-common:4.1.100.Final"
+ ],
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-buffer/4.1.100.Final/netty-buffer-4.1.100.Final.jar",
+ "mirror_urls": [
+ "https://repo1.maven.org/maven2/io/netty/netty-buffer/4.1.100.Final/netty-buffer-4.1.100.Final.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-buffer/4.1.100.Final/netty-buffer-4.1.100.Final.jar"
],
"packages": [
"io.netty.buffer",
"io.netty.buffer.search"
],
- "sha256": "4e3abdf4ddb8810f5fb4a2b57c9144407873b359faa1894007673ed1465dd3e8",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-buffer/4.1.87.Final/netty-buffer-4.1.87.Final.jar"
+ "sha256": "462874b44ee782fbefec64078cda6eb8e7bf9f0e0af71a928ef4c1f2d564f7ee",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-buffer/4.1.100.Final/netty-buffer-4.1.100.Final.jar"
},
{
- "coord": "io.netty:netty-buffer:jar:sources:4.1.87.Final",
+ "coord": "io.netty:netty-buffer:jar:sources:4.1.100.Final",
"dependencies": [
- "io.netty:netty-common:jar:sources:4.1.87.Final"
+ "io.netty:netty-common:jar:sources:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-common:jar:sources:4.1.87.Final"
+ "io.netty:netty-common:jar:sources:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-buffer/4.1.87.Final/netty-buffer-4.1.87.Final-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-buffer/4.1.100.Final/netty-buffer-4.1.100.Final-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-buffer/4.1.87.Final/netty-buffer-4.1.87.Final-sources.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-buffer/4.1.87.Final/netty-buffer-4.1.87.Final-sources.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-buffer/4.1.100.Final/netty-buffer-4.1.100.Final-sources.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-buffer/4.1.100.Final/netty-buffer-4.1.100.Final-sources.jar"
],
"packages": [],
- "sha256": "9b7e313923c37d2467726a9fcf247bc128535cc2c5533c6ec4026d662c746945",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-buffer/4.1.87.Final/netty-buffer-4.1.87.Final-sources.jar"
+ "sha256": "7954d2b274d418d844a573a4712b18a7c40b85f94284f5429cb32e6408394d60",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-buffer/4.1.100.Final/netty-buffer-4.1.100.Final-sources.jar"
},
{
- "coord": "io.netty:netty-codec-http2:4.1.87.Final",
+ "coord": "io.netty:netty-codec-http2:4.1.100.Final",
"dependencies": [
- "io.netty:netty-buffer:4.1.87.Final",
- "io.netty:netty-codec-http:4.1.87.Final",
- "io.netty:netty-codec:4.1.87.Final",
- "io.netty:netty-common:4.1.87.Final",
- "io.netty:netty-handler:4.1.87.Final",
- "io.netty:netty-resolver:4.1.87.Final",
- "io.netty:netty-transport-native-unix-common:4.1.87.Final",
- "io.netty:netty-transport:4.1.87.Final"
+ "io.netty:netty-buffer:4.1.100.Final",
+ "io.netty:netty-codec-http:4.1.100.Final",
+ "io.netty:netty-codec:4.1.100.Final",
+ "io.netty:netty-common:4.1.100.Final",
+ "io.netty:netty-handler:4.1.100.Final",
+ "io.netty:netty-resolver:4.1.100.Final",
+ "io.netty:netty-transport-native-unix-common:4.1.100.Final",
+ "io.netty:netty-transport:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-buffer:4.1.87.Final",
- "io.netty:netty-codec-http:4.1.87.Final",
- "io.netty:netty-codec:4.1.87.Final",
- "io.netty:netty-common:4.1.87.Final",
- "io.netty:netty-handler:4.1.87.Final",
- "io.netty:netty-transport:4.1.87.Final"
+ "io.netty:netty-buffer:4.1.100.Final",
+ "io.netty:netty-codec-http:4.1.100.Final",
+ "io.netty:netty-codec:4.1.100.Final",
+ "io.netty:netty-common:4.1.100.Final",
+ "io.netty:netty-handler:4.1.100.Final",
+ "io.netty:netty-transport:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-codec-http2/4.1.87.Final/netty-codec-http2-4.1.87.Final.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-codec-http2/4.1.100.Final/netty-codec-http2-4.1.100.Final.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-codec-http2/4.1.87.Final/netty-codec-http2-4.1.87.Final.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-codec-http2/4.1.87.Final/netty-codec-http2-4.1.87.Final.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-codec-http2/4.1.100.Final/netty-codec-http2-4.1.100.Final.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-codec-http2/4.1.100.Final/netty-codec-http2-4.1.100.Final.jar"
],
"packages": [
"io.netty.handler.codec.http2"
],
- "sha256": "e5d01b8db385b23103de67d3666c6fa9fa144d42217a3c8266b2a5efe425f093",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-codec-http2/4.1.87.Final/netty-codec-http2-4.1.87.Final.jar"
+ "sha256": "23b4a74350f4cf8d41b93fb93d52b5050667d8d53fffc385672c86eab83b8749",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-codec-http2/4.1.100.Final/netty-codec-http2-4.1.100.Final.jar"
},
{
- "coord": "io.netty:netty-codec-http2:jar:sources:4.1.87.Final",
+ "coord": "io.netty:netty-codec-http2:jar:sources:4.1.100.Final",
"dependencies": [
- "io.netty:netty-buffer:jar:sources:4.1.87.Final",
- "io.netty:netty-codec-http:jar:sources:4.1.87.Final",
- "io.netty:netty-codec:jar:sources:4.1.87.Final",
- "io.netty:netty-common:jar:sources:4.1.87.Final",
- "io.netty:netty-handler:jar:sources:4.1.87.Final",
- "io.netty:netty-resolver:jar:sources:4.1.87.Final",
- "io.netty:netty-transport-native-unix-common:jar:sources:4.1.87.Final",
- "io.netty:netty-transport:jar:sources:4.1.87.Final"
+ "io.netty:netty-buffer:jar:sources:4.1.100.Final",
+ "io.netty:netty-codec-http:jar:sources:4.1.100.Final",
+ "io.netty:netty-codec:jar:sources:4.1.100.Final",
+ "io.netty:netty-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-handler:jar:sources:4.1.100.Final",
+ "io.netty:netty-resolver:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport-native-unix-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport:jar:sources:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-buffer:jar:sources:4.1.87.Final",
- "io.netty:netty-codec-http:jar:sources:4.1.87.Final",
- "io.netty:netty-codec:jar:sources:4.1.87.Final",
- "io.netty:netty-common:jar:sources:4.1.87.Final",
- "io.netty:netty-handler:jar:sources:4.1.87.Final",
- "io.netty:netty-transport:jar:sources:4.1.87.Final"
+ "io.netty:netty-buffer:jar:sources:4.1.100.Final",
+ "io.netty:netty-codec-http:jar:sources:4.1.100.Final",
+ "io.netty:netty-codec:jar:sources:4.1.100.Final",
+ "io.netty:netty-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-handler:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport:jar:sources:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-codec-http2/4.1.87.Final/netty-codec-http2-4.1.87.Final-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-codec-http2/4.1.100.Final/netty-codec-http2-4.1.100.Final-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-codec-http2/4.1.87.Final/netty-codec-http2-4.1.87.Final-sources.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-codec-http2/4.1.87.Final/netty-codec-http2-4.1.87.Final-sources.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-codec-http2/4.1.100.Final/netty-codec-http2-4.1.100.Final-sources.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-codec-http2/4.1.100.Final/netty-codec-http2-4.1.100.Final-sources.jar"
],
"packages": [],
- "sha256": "e4454eaed6d09569885b9f8bac0561c0db1896eaa32e5ccd60afea490569591a",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-codec-http2/4.1.87.Final/netty-codec-http2-4.1.87.Final-sources.jar"
+ "sha256": "10ddecd4de1351a4bdfa4ed6a113a47f808edfdd54b83cd477c4fb27e935f78e",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-codec-http2/4.1.100.Final/netty-codec-http2-4.1.100.Final-sources.jar"
},
{
- "coord": "io.netty:netty-codec-http:4.1.87.Final",
+ "coord": "io.netty:netty-codec-http:4.1.100.Final",
"dependencies": [
- "io.netty:netty-buffer:4.1.87.Final",
- "io.netty:netty-codec:4.1.87.Final",
- "io.netty:netty-common:4.1.87.Final",
- "io.netty:netty-handler:4.1.87.Final",
- "io.netty:netty-resolver:4.1.87.Final",
- "io.netty:netty-transport-native-unix-common:4.1.87.Final",
- "io.netty:netty-transport:4.1.87.Final"
+ "io.netty:netty-buffer:4.1.100.Final",
+ "io.netty:netty-codec:4.1.100.Final",
+ "io.netty:netty-common:4.1.100.Final",
+ "io.netty:netty-handler:4.1.100.Final",
+ "io.netty:netty-resolver:4.1.100.Final",
+ "io.netty:netty-transport-native-unix-common:4.1.100.Final",
+ "io.netty:netty-transport:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-buffer:4.1.87.Final",
- "io.netty:netty-codec:4.1.87.Final",
- "io.netty:netty-common:4.1.87.Final",
- "io.netty:netty-handler:4.1.87.Final",
- "io.netty:netty-transport:4.1.87.Final"
+ "io.netty:netty-buffer:4.1.100.Final",
+ "io.netty:netty-codec:4.1.100.Final",
+ "io.netty:netty-common:4.1.100.Final",
+ "io.netty:netty-handler:4.1.100.Final",
+ "io.netty:netty-transport:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-codec-http/4.1.87.Final/netty-codec-http-4.1.87.Final.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-codec-http/4.1.100.Final/netty-codec-http-4.1.100.Final.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-codec-http/4.1.87.Final/netty-codec-http-4.1.87.Final.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-codec-http/4.1.87.Final/netty-codec-http-4.1.87.Final.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-codec-http/4.1.100.Final/netty-codec-http-4.1.100.Final.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-codec-http/4.1.100.Final/netty-codec-http-4.1.100.Final.jar"
],
"packages": [
"io.netty.handler.codec.http",
@@ -1687,55 +1750,55 @@
"io.netty.handler.codec.rtsp",
"io.netty.handler.codec.spdy"
],
- "sha256": "39368c15595b791dbe26e7639cf2e71b6647bab8838fa5781cf7a80762eac927",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-codec-http/4.1.87.Final/netty-codec-http-4.1.87.Final.jar"
+ "sha256": "326811d249cb0e5555e78e026e877834e792261c38f0666d80464426695d9590",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-codec-http/4.1.100.Final/netty-codec-http-4.1.100.Final.jar"
},
{
- "coord": "io.netty:netty-codec-http:jar:sources:4.1.87.Final",
+ "coord": "io.netty:netty-codec-http:jar:sources:4.1.100.Final",
"dependencies": [
- "io.netty:netty-buffer:jar:sources:4.1.87.Final",
- "io.netty:netty-codec:jar:sources:4.1.87.Final",
- "io.netty:netty-common:jar:sources:4.1.87.Final",
- "io.netty:netty-handler:jar:sources:4.1.87.Final",
- "io.netty:netty-resolver:jar:sources:4.1.87.Final",
- "io.netty:netty-transport-native-unix-common:jar:sources:4.1.87.Final",
- "io.netty:netty-transport:jar:sources:4.1.87.Final"
+ "io.netty:netty-buffer:jar:sources:4.1.100.Final",
+ "io.netty:netty-codec:jar:sources:4.1.100.Final",
+ "io.netty:netty-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-handler:jar:sources:4.1.100.Final",
+ "io.netty:netty-resolver:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport-native-unix-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport:jar:sources:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-buffer:jar:sources:4.1.87.Final",
- "io.netty:netty-codec:jar:sources:4.1.87.Final",
- "io.netty:netty-common:jar:sources:4.1.87.Final",
- "io.netty:netty-handler:jar:sources:4.1.87.Final",
- "io.netty:netty-transport:jar:sources:4.1.87.Final"
+ "io.netty:netty-buffer:jar:sources:4.1.100.Final",
+ "io.netty:netty-codec:jar:sources:4.1.100.Final",
+ "io.netty:netty-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-handler:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport:jar:sources:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-codec-http/4.1.87.Final/netty-codec-http-4.1.87.Final-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-codec-http/4.1.100.Final/netty-codec-http-4.1.100.Final-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-codec-http/4.1.87.Final/netty-codec-http-4.1.87.Final-sources.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-codec-http/4.1.87.Final/netty-codec-http-4.1.87.Final-sources.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-codec-http/4.1.100.Final/netty-codec-http-4.1.100.Final-sources.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-codec-http/4.1.100.Final/netty-codec-http-4.1.100.Final-sources.jar"
],
"packages": [],
- "sha256": "f8cd7ba87346f5e70cf4cfc1c018ad682f7a7c099f802dac805dcb0614c67f19",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-codec-http/4.1.87.Final/netty-codec-http-4.1.87.Final-sources.jar"
+ "sha256": "6f1489ef88b5dc670addd06180e1f5f3bf86b9fd01758d8200a4f7307ddf77d1",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-codec-http/4.1.100.Final/netty-codec-http-4.1.100.Final-sources.jar"
},
{
- "coord": "io.netty:netty-codec-socks:4.1.87.Final",
+ "coord": "io.netty:netty-codec-socks:4.1.100.Final",
"dependencies": [
- "io.netty:netty-buffer:4.1.87.Final",
- "io.netty:netty-codec:4.1.87.Final",
- "io.netty:netty-common:4.1.87.Final",
- "io.netty:netty-resolver:4.1.87.Final",
- "io.netty:netty-transport:4.1.87.Final"
+ "io.netty:netty-buffer:4.1.100.Final",
+ "io.netty:netty-codec:4.1.100.Final",
+ "io.netty:netty-common:4.1.100.Final",
+ "io.netty:netty-resolver:4.1.100.Final",
+ "io.netty:netty-transport:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-buffer:4.1.87.Final",
- "io.netty:netty-codec:4.1.87.Final",
- "io.netty:netty-common:4.1.87.Final",
- "io.netty:netty-transport:4.1.87.Final"
+ "io.netty:netty-buffer:4.1.100.Final",
+ "io.netty:netty-codec:4.1.100.Final",
+ "io.netty:netty-common:4.1.100.Final",
+ "io.netty:netty-transport:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-codec-socks/4.1.87.Final/netty-codec-socks-4.1.87.Final.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-codec-socks/4.1.100.Final/netty-codec-socks-4.1.100.Final.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-codec-socks/4.1.87.Final/netty-codec-socks-4.1.87.Final.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-codec-socks/4.1.87.Final/netty-codec-socks-4.1.87.Final.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-codec-socks/4.1.100.Final/netty-codec-socks-4.1.100.Final.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-codec-socks/4.1.100.Final/netty-codec-socks-4.1.100.Final.jar"
],
"packages": [
"io.netty.handler.codec.socks",
@@ -1743,50 +1806,50 @@
"io.netty.handler.codec.socksx.v4",
"io.netty.handler.codec.socksx.v5"
],
- "sha256": "41a76a64514f8f8f50ab8527254073521394138e8a96191905e8c143368b2006",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-codec-socks/4.1.87.Final/netty-codec-socks-4.1.87.Final.jar"
+ "sha256": "608a453b90f8384ba4efcdc6db7f899a1f10b9ea1890954696e6cfac45ff1ba9",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-codec-socks/4.1.100.Final/netty-codec-socks-4.1.100.Final.jar"
},
{
- "coord": "io.netty:netty-codec-socks:jar:sources:4.1.87.Final",
+ "coord": "io.netty:netty-codec-socks:jar:sources:4.1.100.Final",
"dependencies": [
- "io.netty:netty-buffer:jar:sources:4.1.87.Final",
- "io.netty:netty-codec:jar:sources:4.1.87.Final",
- "io.netty:netty-common:jar:sources:4.1.87.Final",
- "io.netty:netty-resolver:jar:sources:4.1.87.Final",
- "io.netty:netty-transport:jar:sources:4.1.87.Final"
+ "io.netty:netty-buffer:jar:sources:4.1.100.Final",
+ "io.netty:netty-codec:jar:sources:4.1.100.Final",
+ "io.netty:netty-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-resolver:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport:jar:sources:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-buffer:jar:sources:4.1.87.Final",
- "io.netty:netty-codec:jar:sources:4.1.87.Final",
- "io.netty:netty-common:jar:sources:4.1.87.Final",
- "io.netty:netty-transport:jar:sources:4.1.87.Final"
+ "io.netty:netty-buffer:jar:sources:4.1.100.Final",
+ "io.netty:netty-codec:jar:sources:4.1.100.Final",
+ "io.netty:netty-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport:jar:sources:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-codec-socks/4.1.87.Final/netty-codec-socks-4.1.87.Final-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-codec-socks/4.1.100.Final/netty-codec-socks-4.1.100.Final-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-codec-socks/4.1.87.Final/netty-codec-socks-4.1.87.Final-sources.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-codec-socks/4.1.87.Final/netty-codec-socks-4.1.87.Final-sources.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-codec-socks/4.1.100.Final/netty-codec-socks-4.1.100.Final-sources.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-codec-socks/4.1.100.Final/netty-codec-socks-4.1.100.Final-sources.jar"
],
"packages": [],
- "sha256": "bc90f881572fafc3abdf38c6905e848173cedb5d54f0920380b52a4bb03fa4a9",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-codec-socks/4.1.87.Final/netty-codec-socks-4.1.87.Final-sources.jar"
+ "sha256": "a2f57ddadc95f8d06ce8b1713a30e72aab6acf9a20fba7eb0e30b7df9e6ec256",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-codec-socks/4.1.100.Final/netty-codec-socks-4.1.100.Final-sources.jar"
},
{
- "coord": "io.netty:netty-codec:4.1.87.Final",
+ "coord": "io.netty:netty-codec:4.1.100.Final",
"dependencies": [
- "io.netty:netty-buffer:4.1.87.Final",
- "io.netty:netty-common:4.1.87.Final",
- "io.netty:netty-resolver:4.1.87.Final",
- "io.netty:netty-transport:4.1.87.Final"
+ "io.netty:netty-buffer:4.1.100.Final",
+ "io.netty:netty-common:4.1.100.Final",
+ "io.netty:netty-resolver:4.1.100.Final",
+ "io.netty:netty-transport:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-buffer:4.1.87.Final",
- "io.netty:netty-common:4.1.87.Final",
- "io.netty:netty-transport:4.1.87.Final"
+ "io.netty:netty-buffer:4.1.100.Final",
+ "io.netty:netty-common:4.1.100.Final",
+ "io.netty:netty-transport:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-codec/4.1.87.Final/netty-codec-4.1.87.Final.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-codec/4.1.100.Final/netty-codec-4.1.100.Final.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-codec/4.1.87.Final/netty-codec-4.1.87.Final.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-codec/4.1.87.Final/netty-codec-4.1.87.Final.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-codec/4.1.100.Final/netty-codec-4.1.100.Final.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-codec/4.1.100.Final/netty-codec-4.1.100.Final.jar"
],
"packages": [
"io.netty.handler.codec",
@@ -1800,39 +1863,39 @@
"io.netty.handler.codec.string",
"io.netty.handler.codec.xml"
],
- "sha256": "c643fe8318b7c7405acec13ed98e8f634f917960b20486295efbc5a690bac694",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-codec/4.1.87.Final/netty-codec-4.1.87.Final.jar"
+ "sha256": "180a01ed67af399602e24ff1c32864e7f57f57c4a0fa5e9ab3fe9b0e5e9cf051",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-codec/4.1.100.Final/netty-codec-4.1.100.Final.jar"
},
{
- "coord": "io.netty:netty-codec:jar:sources:4.1.87.Final",
+ "coord": "io.netty:netty-codec:jar:sources:4.1.100.Final",
"dependencies": [
- "io.netty:netty-buffer:jar:sources:4.1.87.Final",
- "io.netty:netty-common:jar:sources:4.1.87.Final",
- "io.netty:netty-resolver:jar:sources:4.1.87.Final",
- "io.netty:netty-transport:jar:sources:4.1.87.Final"
+ "io.netty:netty-buffer:jar:sources:4.1.100.Final",
+ "io.netty:netty-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-resolver:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport:jar:sources:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-buffer:jar:sources:4.1.87.Final",
- "io.netty:netty-common:jar:sources:4.1.87.Final",
- "io.netty:netty-transport:jar:sources:4.1.87.Final"
+ "io.netty:netty-buffer:jar:sources:4.1.100.Final",
+ "io.netty:netty-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport:jar:sources:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-codec/4.1.87.Final/netty-codec-4.1.87.Final-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-codec/4.1.100.Final/netty-codec-4.1.100.Final-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-codec/4.1.87.Final/netty-codec-4.1.87.Final-sources.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-codec/4.1.87.Final/netty-codec-4.1.87.Final-sources.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-codec/4.1.100.Final/netty-codec-4.1.100.Final-sources.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-codec/4.1.100.Final/netty-codec-4.1.100.Final-sources.jar"
],
"packages": [],
- "sha256": "39a397c6636288eadaf1366e0f0da3e53a9e15bf35df252bbc36532ccf415547",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-codec/4.1.87.Final/netty-codec-4.1.87.Final-sources.jar"
+ "sha256": "33a18f8618c81b251e58d2cc94445bddf7bf0c84856213febf13c2b42fd5966a",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-codec/4.1.100.Final/netty-codec-4.1.100.Final-sources.jar"
},
{
- "coord": "io.netty:netty-common:4.1.87.Final",
+ "coord": "io.netty:netty-common:4.1.100.Final",
"dependencies": [],
"directDependencies": [],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-common/4.1.87.Final/netty-common-4.1.87.Final.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-common/4.1.100.Final/netty-common-4.1.100.Final.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-common/4.1.87.Final/netty-common-4.1.87.Final.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-common/4.1.87.Final/netty-common-4.1.87.Final.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-common/4.1.100.Final/netty-common-4.1.100.Final.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-common/4.1.100.Final/netty-common-4.1.100.Final.jar"
],
"packages": [
"io.netty.util",
@@ -1845,106 +1908,106 @@
"io.netty.util.internal.shaded.org.jctools.util",
"io.netty.util.internal.svm"
],
- "sha256": "811443fc1411a9ef7c1f7b02a750e2a6688f1eec0ae7361ed07812a85d982c86",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-common/4.1.87.Final/netty-common-4.1.87.Final.jar"
+ "sha256": "d2908301f1ac6f2910900742473c15d701765d3d4467acdb1eebb9df3aa82885",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-common/4.1.100.Final/netty-common-4.1.100.Final.jar"
},
{
- "coord": "io.netty:netty-common:jar:sources:4.1.87.Final",
+ "coord": "io.netty:netty-common:jar:sources:4.1.100.Final",
"dependencies": [],
"directDependencies": [],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-common/4.1.87.Final/netty-common-4.1.87.Final-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-common/4.1.100.Final/netty-common-4.1.100.Final-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-common/4.1.87.Final/netty-common-4.1.87.Final-sources.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-common/4.1.87.Final/netty-common-4.1.87.Final-sources.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-common/4.1.100.Final/netty-common-4.1.100.Final-sources.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-common/4.1.100.Final/netty-common-4.1.100.Final-sources.jar"
],
"packages": [],
- "sha256": "679c23349089a12edeb0dbd6a4b05e1c7f527db69e33be8b98fb482c8c24db64",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-common/4.1.87.Final/netty-common-4.1.87.Final-sources.jar"
+ "sha256": "cb67f6cfc3ee2c9f0966ad244197e0c23a9c1c615aec764afd3793316766c67c",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-common/4.1.100.Final/netty-common-4.1.100.Final-sources.jar"
},
{
- "coord": "io.netty:netty-handler-proxy:4.1.87.Final",
+ "coord": "io.netty:netty-handler-proxy:4.1.100.Final",
"dependencies": [
- "io.netty:netty-buffer:4.1.87.Final",
- "io.netty:netty-codec-http:4.1.87.Final",
- "io.netty:netty-codec-socks:4.1.87.Final",
- "io.netty:netty-codec:4.1.87.Final",
- "io.netty:netty-common:4.1.87.Final",
- "io.netty:netty-handler:4.1.87.Final",
- "io.netty:netty-resolver:4.1.87.Final",
- "io.netty:netty-transport-native-unix-common:4.1.87.Final",
- "io.netty:netty-transport:4.1.87.Final"
+ "io.netty:netty-buffer:4.1.100.Final",
+ "io.netty:netty-codec-http:4.1.100.Final",
+ "io.netty:netty-codec-socks:4.1.100.Final",
+ "io.netty:netty-codec:4.1.100.Final",
+ "io.netty:netty-common:4.1.100.Final",
+ "io.netty:netty-handler:4.1.100.Final",
+ "io.netty:netty-resolver:4.1.100.Final",
+ "io.netty:netty-transport-native-unix-common:4.1.100.Final",
+ "io.netty:netty-transport:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-buffer:4.1.87.Final",
- "io.netty:netty-codec-http:4.1.87.Final",
- "io.netty:netty-codec-socks:4.1.87.Final",
- "io.netty:netty-codec:4.1.87.Final",
- "io.netty:netty-common:4.1.87.Final",
- "io.netty:netty-transport:4.1.87.Final"
+ "io.netty:netty-buffer:4.1.100.Final",
+ "io.netty:netty-codec-http:4.1.100.Final",
+ "io.netty:netty-codec-socks:4.1.100.Final",
+ "io.netty:netty-codec:4.1.100.Final",
+ "io.netty:netty-common:4.1.100.Final",
+ "io.netty:netty-transport:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-handler-proxy/4.1.87.Final/netty-handler-proxy-4.1.87.Final.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-handler-proxy/4.1.100.Final/netty-handler-proxy-4.1.100.Final.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-handler-proxy/4.1.87.Final/netty-handler-proxy-4.1.87.Final.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-handler-proxy/4.1.87.Final/netty-handler-proxy-4.1.87.Final.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-handler-proxy/4.1.100.Final/netty-handler-proxy-4.1.100.Final.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-handler-proxy/4.1.100.Final/netty-handler-proxy-4.1.100.Final.jar"
],
"packages": [
"io.netty.handler.proxy"
],
- "sha256": "65276b2ab687b1b7c1ceabcc941d6e2cce1038f35b581218d7dbf46b16c5b4ac",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-handler-proxy/4.1.87.Final/netty-handler-proxy-4.1.87.Final.jar"
+ "sha256": "686dbc2e61407f216d6cb267dd7954896f851dd34b58be3e757c5a89f20a5e67",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-handler-proxy/4.1.100.Final/netty-handler-proxy-4.1.100.Final.jar"
},
{
- "coord": "io.netty:netty-handler-proxy:jar:sources:4.1.87.Final",
+ "coord": "io.netty:netty-handler-proxy:jar:sources:4.1.100.Final",
"dependencies": [
- "io.netty:netty-buffer:jar:sources:4.1.87.Final",
- "io.netty:netty-codec-http:jar:sources:4.1.87.Final",
- "io.netty:netty-codec-socks:jar:sources:4.1.87.Final",
- "io.netty:netty-codec:jar:sources:4.1.87.Final",
- "io.netty:netty-common:jar:sources:4.1.87.Final",
- "io.netty:netty-handler:jar:sources:4.1.87.Final",
- "io.netty:netty-resolver:jar:sources:4.1.87.Final",
- "io.netty:netty-transport-native-unix-common:jar:sources:4.1.87.Final",
- "io.netty:netty-transport:jar:sources:4.1.87.Final"
+ "io.netty:netty-buffer:jar:sources:4.1.100.Final",
+ "io.netty:netty-codec-http:jar:sources:4.1.100.Final",
+ "io.netty:netty-codec-socks:jar:sources:4.1.100.Final",
+ "io.netty:netty-codec:jar:sources:4.1.100.Final",
+ "io.netty:netty-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-handler:jar:sources:4.1.100.Final",
+ "io.netty:netty-resolver:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport-native-unix-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport:jar:sources:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-buffer:jar:sources:4.1.87.Final",
- "io.netty:netty-codec-http:jar:sources:4.1.87.Final",
- "io.netty:netty-codec-socks:jar:sources:4.1.87.Final",
- "io.netty:netty-codec:jar:sources:4.1.87.Final",
- "io.netty:netty-common:jar:sources:4.1.87.Final",
- "io.netty:netty-transport:jar:sources:4.1.87.Final"
+ "io.netty:netty-buffer:jar:sources:4.1.100.Final",
+ "io.netty:netty-codec-http:jar:sources:4.1.100.Final",
+ "io.netty:netty-codec-socks:jar:sources:4.1.100.Final",
+ "io.netty:netty-codec:jar:sources:4.1.100.Final",
+ "io.netty:netty-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport:jar:sources:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-handler-proxy/4.1.87.Final/netty-handler-proxy-4.1.87.Final-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-handler-proxy/4.1.100.Final/netty-handler-proxy-4.1.100.Final-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-handler-proxy/4.1.87.Final/netty-handler-proxy-4.1.87.Final-sources.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-handler-proxy/4.1.87.Final/netty-handler-proxy-4.1.87.Final-sources.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-handler-proxy/4.1.100.Final/netty-handler-proxy-4.1.100.Final-sources.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-handler-proxy/4.1.100.Final/netty-handler-proxy-4.1.100.Final-sources.jar"
],
"packages": [],
- "sha256": "b6c10bc43044cb3b53c601a9ffc75ffb3d6af7b9c218e6bff72f5beb22993947",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-handler-proxy/4.1.87.Final/netty-handler-proxy-4.1.87.Final-sources.jar"
+ "sha256": "40b1123cfaec444459e1184af882c994cb7c6a9b21cc7f4c98e902e2ee628f10",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-handler-proxy/4.1.100.Final/netty-handler-proxy-4.1.100.Final-sources.jar"
},
{
- "coord": "io.netty:netty-handler:4.1.87.Final",
+ "coord": "io.netty:netty-handler:4.1.100.Final",
"dependencies": [
- "io.netty:netty-buffer:4.1.87.Final",
- "io.netty:netty-codec:4.1.87.Final",
- "io.netty:netty-common:4.1.87.Final",
- "io.netty:netty-resolver:4.1.87.Final",
- "io.netty:netty-transport-native-unix-common:4.1.87.Final",
- "io.netty:netty-transport:4.1.87.Final"
+ "io.netty:netty-buffer:4.1.100.Final",
+ "io.netty:netty-codec:4.1.100.Final",
+ "io.netty:netty-common:4.1.100.Final",
+ "io.netty:netty-resolver:4.1.100.Final",
+ "io.netty:netty-transport-native-unix-common:4.1.100.Final",
+ "io.netty:netty-transport:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-buffer:4.1.87.Final",
- "io.netty:netty-codec:4.1.87.Final",
- "io.netty:netty-common:4.1.87.Final",
- "io.netty:netty-resolver:4.1.87.Final",
- "io.netty:netty-transport-native-unix-common:4.1.87.Final",
- "io.netty:netty-transport:4.1.87.Final"
+ "io.netty:netty-buffer:4.1.100.Final",
+ "io.netty:netty-codec:4.1.100.Final",
+ "io.netty:netty-common:4.1.100.Final",
+ "io.netty:netty-resolver:4.1.100.Final",
+ "io.netty:netty-transport-native-unix-common:4.1.100.Final",
+ "io.netty:netty-transport:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-handler/4.1.87.Final/netty-handler-4.1.87.Final.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-handler/4.1.100.Final/netty-handler-4.1.100.Final.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-handler/4.1.87.Final/netty-handler-4.1.87.Final.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-handler/4.1.87.Final/netty-handler-4.1.87.Final.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-handler/4.1.100.Final/netty-handler-4.1.100.Final.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-handler/4.1.100.Final/netty-handler-4.1.100.Final.jar"
],
"packages": [
"io.netty.handler.address",
@@ -1960,71 +2023,71 @@
"io.netty.handler.timeout",
"io.netty.handler.traffic"
],
- "sha256": "31a20e1de13b06a677a4719fd5aa95635982114a32c1d034739a48cceada2383",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-handler/4.1.87.Final/netty-handler-4.1.87.Final.jar"
+ "sha256": "0e10e584c2e7fdf7f4804e14760ed987003f1b62ab982f62eaf13a9892793d3a",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-handler/4.1.100.Final/netty-handler-4.1.100.Final.jar"
},
{
- "coord": "io.netty:netty-handler:jar:sources:4.1.87.Final",
+ "coord": "io.netty:netty-handler:jar:sources:4.1.100.Final",
"dependencies": [
- "io.netty:netty-buffer:jar:sources:4.1.87.Final",
- "io.netty:netty-codec:jar:sources:4.1.87.Final",
- "io.netty:netty-common:jar:sources:4.1.87.Final",
- "io.netty:netty-resolver:jar:sources:4.1.87.Final",
- "io.netty:netty-transport-native-unix-common:jar:sources:4.1.87.Final",
- "io.netty:netty-transport:jar:sources:4.1.87.Final"
+ "io.netty:netty-buffer:jar:sources:4.1.100.Final",
+ "io.netty:netty-codec:jar:sources:4.1.100.Final",
+ "io.netty:netty-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-resolver:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport-native-unix-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport:jar:sources:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-buffer:jar:sources:4.1.87.Final",
- "io.netty:netty-codec:jar:sources:4.1.87.Final",
- "io.netty:netty-common:jar:sources:4.1.87.Final",
- "io.netty:netty-resolver:jar:sources:4.1.87.Final",
- "io.netty:netty-transport-native-unix-common:jar:sources:4.1.87.Final",
- "io.netty:netty-transport:jar:sources:4.1.87.Final"
+ "io.netty:netty-buffer:jar:sources:4.1.100.Final",
+ "io.netty:netty-codec:jar:sources:4.1.100.Final",
+ "io.netty:netty-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-resolver:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport-native-unix-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport:jar:sources:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-handler/4.1.87.Final/netty-handler-4.1.87.Final-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-handler/4.1.100.Final/netty-handler-4.1.100.Final-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-handler/4.1.87.Final/netty-handler-4.1.87.Final-sources.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-handler/4.1.87.Final/netty-handler-4.1.87.Final-sources.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-handler/4.1.100.Final/netty-handler-4.1.100.Final-sources.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-handler/4.1.100.Final/netty-handler-4.1.100.Final-sources.jar"
],
"packages": [],
- "sha256": "e07bc5906414d522eb10cee0ce9162ab6382d05a71bf14ecb53630e3ebff5067",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-handler/4.1.87.Final/netty-handler-4.1.87.Final-sources.jar"
+ "sha256": "c591995af266161769863298dfe1aa2ff634c25abef33e710a1c9f2ef707fc20",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-handler/4.1.100.Final/netty-handler-4.1.100.Final-sources.jar"
},
{
- "coord": "io.netty:netty-resolver:4.1.87.Final",
+ "coord": "io.netty:netty-resolver:4.1.100.Final",
"dependencies": [
- "io.netty:netty-common:4.1.87.Final"
+ "io.netty:netty-common:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-common:4.1.87.Final"
+ "io.netty:netty-common:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-resolver/4.1.87.Final/netty-resolver-4.1.87.Final.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-resolver/4.1.100.Final/netty-resolver-4.1.100.Final.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-resolver/4.1.87.Final/netty-resolver-4.1.87.Final.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-resolver/4.1.87.Final/netty-resolver-4.1.87.Final.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-resolver/4.1.100.Final/netty-resolver-4.1.100.Final.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-resolver/4.1.100.Final/netty-resolver-4.1.100.Final.jar"
],
"packages": [
"io.netty.resolver"
],
- "sha256": "ee11d22d240b4b125edaed23ec7cc35ad9241b1b8fdd50630de20411ec345fc7",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-resolver/4.1.87.Final/netty-resolver-4.1.87.Final.jar"
+ "sha256": "c42c481c776e9d367a45cc3a67a06f65897d280334eb30b2362b8c55b7523f4f",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-resolver/4.1.100.Final/netty-resolver-4.1.100.Final.jar"
},
{
- "coord": "io.netty:netty-resolver:jar:sources:4.1.87.Final",
+ "coord": "io.netty:netty-resolver:jar:sources:4.1.100.Final",
"dependencies": [
- "io.netty:netty-common:jar:sources:4.1.87.Final"
+ "io.netty:netty-common:jar:sources:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-common:jar:sources:4.1.87.Final"
+ "io.netty:netty-common:jar:sources:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-resolver/4.1.87.Final/netty-resolver-4.1.87.Final-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-resolver/4.1.100.Final/netty-resolver-4.1.100.Final-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-resolver/4.1.87.Final/netty-resolver-4.1.87.Final-sources.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-resolver/4.1.87.Final/netty-resolver-4.1.87.Final-sources.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-resolver/4.1.100.Final/netty-resolver-4.1.100.Final-sources.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-resolver/4.1.100.Final/netty-resolver-4.1.100.Final-sources.jar"
],
"packages": [],
- "sha256": "5c2ee5e9e26e10e1d94faaf7ee12fd021f967144eb630ef779aa81dd4d72da3e",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-resolver/4.1.87.Final/netty-resolver-4.1.87.Final-sources.jar"
+ "sha256": "5e8cd49d0804f2ec6c04f16f46cc1a6ebfb00745177d740c23a51b9913c28fc8",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-resolver/4.1.100.Final/netty-resolver-4.1.100.Final-sources.jar"
},
{
"coord": "io.netty:netty-tcnative-boringssl-static:2.0.61.Final",
@@ -2229,169 +2292,169 @@
"url": "https://repo1.maven.org/maven2/io/netty/netty-tcnative-classes/2.0.61.Final/netty-tcnative-classes-2.0.61.Final-sources.jar"
},
{
- "coord": "io.netty:netty-transport-classes-epoll:4.1.87.Final",
+ "coord": "io.netty:netty-transport-classes-epoll:4.1.100.Final",
"dependencies": [
- "io.netty:netty-buffer:4.1.87.Final",
- "io.netty:netty-common:4.1.87.Final",
- "io.netty:netty-resolver:4.1.87.Final",
- "io.netty:netty-transport-native-unix-common:4.1.87.Final",
- "io.netty:netty-transport:4.1.87.Final"
+ "io.netty:netty-buffer:4.1.100.Final",
+ "io.netty:netty-common:4.1.100.Final",
+ "io.netty:netty-resolver:4.1.100.Final",
+ "io.netty:netty-transport-native-unix-common:4.1.100.Final",
+ "io.netty:netty-transport:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-buffer:4.1.87.Final",
- "io.netty:netty-common:4.1.87.Final",
- "io.netty:netty-transport-native-unix-common:4.1.87.Final",
- "io.netty:netty-transport:4.1.87.Final"
+ "io.netty:netty-buffer:4.1.100.Final",
+ "io.netty:netty-common:4.1.100.Final",
+ "io.netty:netty-transport-native-unix-common:4.1.100.Final",
+ "io.netty:netty-transport:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-transport-classes-epoll/4.1.87.Final/netty-transport-classes-epoll-4.1.87.Final.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-transport-classes-epoll/4.1.100.Final/netty-transport-classes-epoll-4.1.100.Final.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-transport-classes-epoll/4.1.87.Final/netty-transport-classes-epoll-4.1.87.Final.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-transport-classes-epoll/4.1.87.Final/netty-transport-classes-epoll-4.1.87.Final.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-transport-classes-epoll/4.1.100.Final/netty-transport-classes-epoll-4.1.100.Final.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-transport-classes-epoll/4.1.100.Final/netty-transport-classes-epoll-4.1.100.Final.jar"
],
"packages": [
"io.netty.channel.epoll"
],
- "sha256": "0b50ae7e1cb32f89c3a8b38a29605cfe6bd797693b3374fbf2577fbc03719297",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-transport-classes-epoll/4.1.87.Final/netty-transport-classes-epoll-4.1.87.Final.jar"
+ "sha256": "9abc4b17b1212b33666eae4e8013d0bb78a9a2bcd0a9a621b9bd06a7e5fc0050",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-transport-classes-epoll/4.1.100.Final/netty-transport-classes-epoll-4.1.100.Final.jar"
},
{
- "coord": "io.netty:netty-transport-classes-epoll:jar:sources:4.1.87.Final",
+ "coord": "io.netty:netty-transport-classes-epoll:jar:sources:4.1.100.Final",
"dependencies": [
- "io.netty:netty-buffer:jar:sources:4.1.87.Final",
- "io.netty:netty-common:jar:sources:4.1.87.Final",
- "io.netty:netty-resolver:jar:sources:4.1.87.Final",
- "io.netty:netty-transport-native-unix-common:jar:sources:4.1.87.Final",
- "io.netty:netty-transport:jar:sources:4.1.87.Final"
+ "io.netty:netty-buffer:jar:sources:4.1.100.Final",
+ "io.netty:netty-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-resolver:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport-native-unix-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport:jar:sources:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-buffer:jar:sources:4.1.87.Final",
- "io.netty:netty-common:jar:sources:4.1.87.Final",
- "io.netty:netty-transport-native-unix-common:jar:sources:4.1.87.Final",
- "io.netty:netty-transport:jar:sources:4.1.87.Final"
+ "io.netty:netty-buffer:jar:sources:4.1.100.Final",
+ "io.netty:netty-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport-native-unix-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport:jar:sources:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-transport-classes-epoll/4.1.87.Final/netty-transport-classes-epoll-4.1.87.Final-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-transport-classes-epoll/4.1.100.Final/netty-transport-classes-epoll-4.1.100.Final-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-transport-classes-epoll/4.1.87.Final/netty-transport-classes-epoll-4.1.87.Final-sources.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-transport-classes-epoll/4.1.87.Final/netty-transport-classes-epoll-4.1.87.Final-sources.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-transport-classes-epoll/4.1.100.Final/netty-transport-classes-epoll-4.1.100.Final-sources.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-transport-classes-epoll/4.1.100.Final/netty-transport-classes-epoll-4.1.100.Final-sources.jar"
],
"packages": [],
- "sha256": "b34f037cb1d1134cf5d99f5da7d73dad6a33ebb1c21274e7e226d92548b99eb9",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-transport-classes-epoll/4.1.87.Final/netty-transport-classes-epoll-4.1.87.Final-sources.jar"
+ "sha256": "f6e36b8a08e593f4985210f90230ae4d4927342e372a5f60ca1d5ee6f4013d2c",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-transport-classes-epoll/4.1.100.Final/netty-transport-classes-epoll-4.1.100.Final-sources.jar"
},
{
- "coord": "io.netty:netty-transport-native-epoll:jar:linux-x86_64:4.1.87.Final",
+ "coord": "io.netty:netty-transport-native-epoll:jar:linux-x86_64:4.1.100.Final",
"dependencies": [
- "io.netty:netty-buffer:4.1.87.Final",
- "io.netty:netty-common:4.1.87.Final",
- "io.netty:netty-resolver:4.1.87.Final",
- "io.netty:netty-transport-classes-epoll:4.1.87.Final",
- "io.netty:netty-transport-native-unix-common:4.1.87.Final",
- "io.netty:netty-transport:4.1.87.Final"
+ "io.netty:netty-buffer:4.1.100.Final",
+ "io.netty:netty-common:4.1.100.Final",
+ "io.netty:netty-resolver:4.1.100.Final",
+ "io.netty:netty-transport-classes-epoll:4.1.100.Final",
+ "io.netty:netty-transport-native-unix-common:4.1.100.Final",
+ "io.netty:netty-transport:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-buffer:4.1.87.Final",
- "io.netty:netty-common:4.1.87.Final",
- "io.netty:netty-transport-classes-epoll:4.1.87.Final",
- "io.netty:netty-transport-native-unix-common:4.1.87.Final",
- "io.netty:netty-transport:4.1.87.Final"
+ "io.netty:netty-buffer:4.1.100.Final",
+ "io.netty:netty-common:4.1.100.Final",
+ "io.netty:netty-transport-classes-epoll:4.1.100.Final",
+ "io.netty:netty-transport-native-unix-common:4.1.100.Final",
+ "io.netty:netty-transport:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-transport-native-epoll/4.1.87.Final/netty-transport-native-epoll-4.1.87.Final-linux-x86_64.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-transport-native-epoll/4.1.100.Final/netty-transport-native-epoll-4.1.100.Final-linux-x86_64.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-transport-native-epoll/4.1.87.Final/netty-transport-native-epoll-4.1.87.Final-linux-x86_64.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-transport-native-epoll/4.1.87.Final/netty-transport-native-epoll-4.1.87.Final-linux-x86_64.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-transport-native-epoll/4.1.100.Final/netty-transport-native-epoll-4.1.100.Final-linux-x86_64.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-transport-native-epoll/4.1.100.Final/netty-transport-native-epoll-4.1.100.Final-linux-x86_64.jar"
],
"packages": [],
- "sha256": "bb3752881a027efa99388bef21f3287a6553299930f4226a6ea683da21c957f2",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-transport-native-epoll/4.1.87.Final/netty-transport-native-epoll-4.1.87.Final-linux-x86_64.jar"
+ "sha256": "a108fb47babb7678b5b2abb1fc8b34510bd2f705faa450447860647f2de4ebaa",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-transport-native-epoll/4.1.100.Final/netty-transport-native-epoll-4.1.100.Final-linux-x86_64.jar"
},
{
- "coord": "io.netty:netty-transport-native-epoll:jar:sources:4.1.87.Final",
+ "coord": "io.netty:netty-transport-native-epoll:jar:sources:4.1.100.Final",
"dependencies": [
- "io.netty:netty-buffer:jar:sources:4.1.87.Final",
- "io.netty:netty-common:jar:sources:4.1.87.Final",
- "io.netty:netty-resolver:jar:sources:4.1.87.Final",
- "io.netty:netty-transport-classes-epoll:jar:sources:4.1.87.Final",
- "io.netty:netty-transport-native-unix-common:jar:sources:4.1.87.Final",
- "io.netty:netty-transport:jar:sources:4.1.87.Final"
+ "io.netty:netty-buffer:jar:sources:4.1.100.Final",
+ "io.netty:netty-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-resolver:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport-classes-epoll:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport-native-unix-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport:jar:sources:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-buffer:jar:sources:4.1.87.Final",
- "io.netty:netty-common:jar:sources:4.1.87.Final",
- "io.netty:netty-transport-classes-epoll:jar:sources:4.1.87.Final",
- "io.netty:netty-transport-native-unix-common:jar:sources:4.1.87.Final",
- "io.netty:netty-transport:jar:sources:4.1.87.Final"
+ "io.netty:netty-buffer:jar:sources:4.1.100.Final",
+ "io.netty:netty-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport-classes-epoll:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport-native-unix-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport:jar:sources:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-transport-native-epoll/4.1.87.Final/netty-transport-native-epoll-4.1.87.Final-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-transport-native-epoll/4.1.100.Final/netty-transport-native-epoll-4.1.100.Final-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-transport-native-epoll/4.1.87.Final/netty-transport-native-epoll-4.1.87.Final-sources.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-transport-native-epoll/4.1.87.Final/netty-transport-native-epoll-4.1.87.Final-sources.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-transport-native-epoll/4.1.100.Final/netty-transport-native-epoll-4.1.100.Final-sources.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-transport-native-epoll/4.1.100.Final/netty-transport-native-epoll-4.1.100.Final-sources.jar"
],
"packages": [],
- "sha256": "ca421381ba027e6cc90091b9fac013df025fd7a83f72982fb43ae5e440f49f0c",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-transport-native-epoll/4.1.87.Final/netty-transport-native-epoll-4.1.87.Final-sources.jar"
+ "sha256": "c6baf02f65e295baa8e4bfadaa2443b62187b6af4040cef165eaba0061c284ad",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-transport-native-epoll/4.1.100.Final/netty-transport-native-epoll-4.1.100.Final-sources.jar"
},
{
- "coord": "io.netty:netty-transport-native-unix-common:4.1.87.Final",
+ "coord": "io.netty:netty-transport-native-unix-common:4.1.100.Final",
"dependencies": [
- "io.netty:netty-buffer:4.1.87.Final",
- "io.netty:netty-common:4.1.87.Final",
- "io.netty:netty-resolver:4.1.87.Final",
- "io.netty:netty-transport:4.1.87.Final"
+ "io.netty:netty-buffer:4.1.100.Final",
+ "io.netty:netty-common:4.1.100.Final",
+ "io.netty:netty-resolver:4.1.100.Final",
+ "io.netty:netty-transport:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-buffer:4.1.87.Final",
- "io.netty:netty-common:4.1.87.Final",
- "io.netty:netty-transport:4.1.87.Final"
+ "io.netty:netty-buffer:4.1.100.Final",
+ "io.netty:netty-common:4.1.100.Final",
+ "io.netty:netty-transport:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-transport-native-unix-common/4.1.87.Final/netty-transport-native-unix-common-4.1.87.Final.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-transport-native-unix-common/4.1.100.Final/netty-transport-native-unix-common-4.1.100.Final.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-transport-native-unix-common/4.1.87.Final/netty-transport-native-unix-common-4.1.87.Final.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-transport-native-unix-common/4.1.87.Final/netty-transport-native-unix-common-4.1.87.Final.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-transport-native-unix-common/4.1.100.Final/netty-transport-native-unix-common-4.1.100.Final.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-transport-native-unix-common/4.1.100.Final/netty-transport-native-unix-common-4.1.100.Final.jar"
],
"packages": [
"io.netty.channel.unix"
],
- "sha256": "cc2d013c933c206605da01be39f4c2c1f4586a1b9103a665bd724bdd46d9cd0c",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-transport-native-unix-common/4.1.87.Final/netty-transport-native-unix-common-4.1.87.Final.jar"
+ "sha256": "5d888230a04c4a3e647c64e933cefb64fd49056f969bfb734c8a3fcedf0bea8a",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-transport-native-unix-common/4.1.100.Final/netty-transport-native-unix-common-4.1.100.Final.jar"
},
{
- "coord": "io.netty:netty-transport-native-unix-common:jar:sources:4.1.87.Final",
+ "coord": "io.netty:netty-transport-native-unix-common:jar:sources:4.1.100.Final",
"dependencies": [
- "io.netty:netty-buffer:jar:sources:4.1.87.Final",
- "io.netty:netty-common:jar:sources:4.1.87.Final",
- "io.netty:netty-resolver:jar:sources:4.1.87.Final",
- "io.netty:netty-transport:jar:sources:4.1.87.Final"
+ "io.netty:netty-buffer:jar:sources:4.1.100.Final",
+ "io.netty:netty-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-resolver:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport:jar:sources:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-buffer:jar:sources:4.1.87.Final",
- "io.netty:netty-common:jar:sources:4.1.87.Final",
- "io.netty:netty-transport:jar:sources:4.1.87.Final"
+ "io.netty:netty-buffer:jar:sources:4.1.100.Final",
+ "io.netty:netty-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-transport:jar:sources:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-transport-native-unix-common/4.1.87.Final/netty-transport-native-unix-common-4.1.87.Final-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-transport-native-unix-common/4.1.100.Final/netty-transport-native-unix-common-4.1.100.Final-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-transport-native-unix-common/4.1.87.Final/netty-transport-native-unix-common-4.1.87.Final-sources.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-transport-native-unix-common/4.1.87.Final/netty-transport-native-unix-common-4.1.87.Final-sources.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-transport-native-unix-common/4.1.100.Final/netty-transport-native-unix-common-4.1.100.Final-sources.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-transport-native-unix-common/4.1.100.Final/netty-transport-native-unix-common-4.1.100.Final-sources.jar"
],
"packages": [],
- "sha256": "4a9b81c8dc1497038d1a6b4c01ba9fb8a86aa538dd2b8c6f47dc360ec09f6c94",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-transport-native-unix-common/4.1.87.Final/netty-transport-native-unix-common-4.1.87.Final-sources.jar"
+ "sha256": "28c71aa6c4f36cd8cc472142c858f324f8a23b85150a443e742c8814f46bb596",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-transport-native-unix-common/4.1.100.Final/netty-transport-native-unix-common-4.1.100.Final-sources.jar"
},
{
- "coord": "io.netty:netty-transport:4.1.87.Final",
+ "coord": "io.netty:netty-transport:4.1.100.Final",
"dependencies": [
- "io.netty:netty-buffer:4.1.87.Final",
- "io.netty:netty-common:4.1.87.Final",
- "io.netty:netty-resolver:4.1.87.Final"
+ "io.netty:netty-buffer:4.1.100.Final",
+ "io.netty:netty-common:4.1.100.Final",
+ "io.netty:netty-resolver:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-buffer:4.1.87.Final",
- "io.netty:netty-common:4.1.87.Final",
- "io.netty:netty-resolver:4.1.87.Final"
+ "io.netty:netty-buffer:4.1.100.Final",
+ "io.netty:netty-common:4.1.100.Final",
+ "io.netty:netty-resolver:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-transport/4.1.87.Final/netty-transport-4.1.87.Final.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-transport/4.1.100.Final/netty-transport-4.1.100.Final.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-transport/4.1.87.Final/netty-transport-4.1.87.Final.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-transport/4.1.87.Final/netty-transport-4.1.87.Final.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-transport/4.1.100.Final/netty-transport-4.1.100.Final.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-transport/4.1.100.Final/netty-transport-4.1.100.Final.jar"
],
"packages": [
"io.netty.bootstrap",
@@ -2407,37 +2470,37 @@
"io.netty.channel.socket.nio",
"io.netty.channel.socket.oio"
],
- "sha256": "17ed465cf5b1579b72379e47be3ba7fccb09dda8d95d47c8d668118483b08f43",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-transport/4.1.87.Final/netty-transport-4.1.87.Final.jar"
+ "sha256": "b1deeceedab3734cdb959c55f4be5ab4a667a8aed59121ff93763f49470f5470",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-transport/4.1.100.Final/netty-transport-4.1.100.Final.jar"
},
{
- "coord": "io.netty:netty-transport:jar:sources:4.1.87.Final",
+ "coord": "io.netty:netty-transport:jar:sources:4.1.100.Final",
"dependencies": [
- "io.netty:netty-buffer:jar:sources:4.1.87.Final",
- "io.netty:netty-common:jar:sources:4.1.87.Final",
- "io.netty:netty-resolver:jar:sources:4.1.87.Final"
+ "io.netty:netty-buffer:jar:sources:4.1.100.Final",
+ "io.netty:netty-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-resolver:jar:sources:4.1.100.Final"
],
"directDependencies": [
- "io.netty:netty-buffer:jar:sources:4.1.87.Final",
- "io.netty:netty-common:jar:sources:4.1.87.Final",
- "io.netty:netty-resolver:jar:sources:4.1.87.Final"
+ "io.netty:netty-buffer:jar:sources:4.1.100.Final",
+ "io.netty:netty-common:jar:sources:4.1.100.Final",
+ "io.netty:netty-resolver:jar:sources:4.1.100.Final"
],
- "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-transport/4.1.87.Final/netty-transport-4.1.87.Final-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/io/netty/netty-transport/4.1.100.Final/netty-transport-4.1.100.Final-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/io/netty/netty-transport/4.1.87.Final/netty-transport-4.1.87.Final-sources.jar",
- "https://repo.maven.apache.org/maven2/io/netty/netty-transport/4.1.87.Final/netty-transport-4.1.87.Final-sources.jar"
+ "https://repo1.maven.org/maven2/io/netty/netty-transport/4.1.100.Final/netty-transport-4.1.100.Final-sources.jar",
+ "https://repo.maven.apache.org/maven2/io/netty/netty-transport/4.1.100.Final/netty-transport-4.1.100.Final-sources.jar"
],
"packages": [],
- "sha256": "f0f3423820aa0bf83dcef001a8343c742c69b0bd0041e2228a2c8d67ce0cacd8",
- "url": "https://repo1.maven.org/maven2/io/netty/netty-transport/4.1.87.Final/netty-transport-4.1.87.Final-sources.jar"
+ "sha256": "3c90fa8b30f71f02aff109d06931819ae54b16a3d61a15dfa9e24161057d57c4",
+ "url": "https://repo1.maven.org/maven2/io/netty/netty-transport/4.1.100.Final/netty-transport-4.1.100.Final-sources.jar"
},
{
"coord": "io.opencensus:opencensus-api:0.31.0",
"dependencies": [
- "io.grpc:grpc-context:1.56.0"
+ "io.grpc:grpc-context:1.64.2"
],
"directDependencies": [
- "io.grpc:grpc-context:1.56.0"
+ "io.grpc:grpc-context:1.64.2"
],
"file": "v1/https/repo1.maven.org/maven2/io/opencensus/opencensus-api/0.31.0/opencensus-api-0.31.0.jar",
"mirror_urls": [
@@ -2469,10 +2532,10 @@
{
"coord": "io.opencensus:opencensus-api:jar:sources:0.31.0",
"dependencies": [
- "io.grpc:grpc-context:jar:sources:1.56.0"
+ "io.grpc:grpc-context:jar:sources:1.64.2"
],
"directDependencies": [
- "io.grpc:grpc-context:jar:sources:1.56.0"
+ "io.grpc:grpc-context:jar:sources:1.64.2"
],
"file": "v1/https/repo1.maven.org/maven2/io/opencensus/opencensus-api/0.31.0/opencensus-api-0.31.0-sources.jar",
"mirror_urls": [
@@ -2486,12 +2549,12 @@
{
"coord": "io.opencensus:opencensus-contrib-grpc-metrics:0.31.0",
"dependencies": [
- "com.google.guava:guava:32.1.1-jre",
- "io.grpc:grpc-context:1.56.0",
+ "com.google.guava:guava:32.1.3-jre",
+ "io.grpc:grpc-context:1.64.2",
"io.opencensus:opencensus-api:0.31.0"
],
"directDependencies": [
- "com.google.guava:guava:32.1.1-jre",
+ "com.google.guava:guava:32.1.3-jre",
"io.opencensus:opencensus-api:0.31.0"
],
"file": "v1/https/repo1.maven.org/maven2/io/opencensus/opencensus-contrib-grpc-metrics/0.31.0/opencensus-contrib-grpc-metrics-0.31.0.jar",
@@ -2508,12 +2571,12 @@
{
"coord": "io.opencensus:opencensus-contrib-grpc-metrics:jar:sources:0.31.0",
"dependencies": [
- "com.google.guava:guava:jar:sources:32.1.1-jre",
- "io.grpc:grpc-context:jar:sources:1.56.0",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
+ "io.grpc:grpc-context:jar:sources:1.64.2",
"io.opencensus:opencensus-api:jar:sources:0.31.0"
],
"directDependencies": [
- "com.google.guava:guava:jar:sources:32.1.1-jre",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
"io.opencensus:opencensus-api:jar:sources:0.31.0"
],
"file": "v1/https/repo1.maven.org/maven2/io/opencensus/opencensus-contrib-grpc-metrics/0.31.0/opencensus-contrib-grpc-metrics-0.31.0-sources.jar",
@@ -2528,11 +2591,11 @@
{
"coord": "io.opencensus:opencensus-contrib-http-util:0.28.0",
"dependencies": [
- "com.google.guava:guava:32.1.1-jre",
+ "com.google.guava:guava:32.1.3-jre",
"io.opencensus:opencensus-api:0.31.0"
],
"directDependencies": [
- "com.google.guava:guava:32.1.1-jre",
+ "com.google.guava:guava:32.1.3-jre",
"io.opencensus:opencensus-api:0.31.0"
],
"file": "v1/https/repo1.maven.org/maven2/io/opencensus/opencensus-contrib-http-util/0.28.0/opencensus-contrib-http-util-0.28.0.jar",
@@ -2550,11 +2613,11 @@
{
"coord": "io.opencensus:opencensus-contrib-http-util:jar:sources:0.28.0",
"dependencies": [
- "com.google.guava:guava:jar:sources:32.1.1-jre",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
"io.opencensus:opencensus-api:jar:sources:0.31.0"
],
"directDependencies": [
- "com.google.guava:guava:jar:sources:32.1.1-jre",
+ "com.google.guava:guava:jar:sources:32.1.3-jre",
"io.opencensus:opencensus-api:jar:sources:0.31.0"
],
"file": "v1/https/repo1.maven.org/maven2/io/opencensus/opencensus-contrib-http-util/0.28.0/opencensus-contrib-http-util-0.28.0-sources.jar",
@@ -2862,13 +2925,13 @@
"file": null
},
{
- "coord": "org.checkerframework:checker-qual:3.33.0",
+ "coord": "org.checkerframework:checker-qual:3.37.0",
"dependencies": [],
"directDependencies": [],
- "file": "v1/https/repo1.maven.org/maven2/org/checkerframework/checker-qual/3.33.0/checker-qual-3.33.0.jar",
+ "file": "v1/https/repo1.maven.org/maven2/org/checkerframework/checker-qual/3.37.0/checker-qual-3.37.0.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/org/checkerframework/checker-qual/3.33.0/checker-qual-3.33.0.jar",
- "https://repo.maven.apache.org/maven2/org/checkerframework/checker-qual/3.33.0/checker-qual-3.33.0.jar"
+ "https://repo1.maven.org/maven2/org/checkerframework/checker-qual/3.37.0/checker-qual-3.37.0.jar",
+ "https://repo.maven.apache.org/maven2/org/checkerframework/checker-qual/3.37.0/checker-qual-3.37.0.jar"
],
"packages": [
"org.checkerframework.checker.builder.qual",
@@ -2902,21 +2965,21 @@
"org.checkerframework.dataflow.qual",
"org.checkerframework.framework.qual"
],
- "sha256": "e316255bbfcd9fe50d165314b85abb2b33cb2a66a93c491db648e498a82c2de1",
- "url": "https://repo1.maven.org/maven2/org/checkerframework/checker-qual/3.33.0/checker-qual-3.33.0.jar"
+ "sha256": "e4ce1376cc2735e1dde220b62ad0913f51297704daad155a33f386bc5db0d9f7",
+ "url": "https://repo1.maven.org/maven2/org/checkerframework/checker-qual/3.37.0/checker-qual-3.37.0.jar"
},
{
- "coord": "org.checkerframework:checker-qual:jar:sources:3.33.0",
+ "coord": "org.checkerframework:checker-qual:jar:sources:3.37.0",
"dependencies": [],
"directDependencies": [],
- "file": "v1/https/repo1.maven.org/maven2/org/checkerframework/checker-qual/3.33.0/checker-qual-3.33.0-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/org/checkerframework/checker-qual/3.37.0/checker-qual-3.37.0-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/org/checkerframework/checker-qual/3.33.0/checker-qual-3.33.0-sources.jar",
- "https://repo.maven.apache.org/maven2/org/checkerframework/checker-qual/3.33.0/checker-qual-3.33.0-sources.jar"
+ "https://repo1.maven.org/maven2/org/checkerframework/checker-qual/3.37.0/checker-qual-3.37.0-sources.jar",
+ "https://repo.maven.apache.org/maven2/org/checkerframework/checker-qual/3.37.0/checker-qual-3.37.0-sources.jar"
],
"packages": [],
- "sha256": "443fa6151982bb4c6ce62e2938f53660085b13a7dceb517202777b87d0dea2c7",
- "url": "https://repo1.maven.org/maven2/org/checkerframework/checker-qual/3.33.0/checker-qual-3.33.0-sources.jar"
+ "sha256": "2ca31c7e959ad82fe270b2baac11a59c570f8778191233c54927e94adab7b640",
+ "url": "https://repo1.maven.org/maven2/org/checkerframework/checker-qual/3.37.0/checker-qual-3.37.0-sources.jar"
},
{
"coord": "org.codehaus.mojo:animal-sniffer-annotations:1.23",
@@ -2977,33 +3040,157 @@
"url": "https://repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3-sources.jar"
},
{
- "coord": "org.ow2.asm:asm:9.1",
+ "coord": "org.jetbrains.kotlin:kotlin-stdlib-common:1.4.20",
+ "dependencies": [],
+ "directDependencies": [],
+ "file": "v1/https/repo1.maven.org/maven2/org/jetbrains/kotlin/kotlin-stdlib-common/1.4.20/kotlin-stdlib-common-1.4.20.jar",
+ "mirror_urls": [
+ "https://repo1.maven.org/maven2/org/jetbrains/kotlin/kotlin-stdlib-common/1.4.20/kotlin-stdlib-common-1.4.20.jar",
+ "https://repo.maven.apache.org/maven2/org/jetbrains/kotlin/kotlin-stdlib-common/1.4.20/kotlin-stdlib-common-1.4.20.jar"
+ ],
+ "packages": [],
+ "sha256": "a7112c9b3cefee418286c9c9372f7af992bd1e6e030691d52f60cb36dbec8320",
+ "url": "https://repo1.maven.org/maven2/org/jetbrains/kotlin/kotlin-stdlib-common/1.4.20/kotlin-stdlib-common-1.4.20.jar"
+ },
+ {
+ "coord": "org.jetbrains.kotlin:kotlin-stdlib-common:jar:sources:1.4.20",
+ "dependencies": [],
+ "directDependencies": [],
+ "file": "v1/https/repo1.maven.org/maven2/org/jetbrains/kotlin/kotlin-stdlib-common/1.4.20/kotlin-stdlib-common-1.4.20-sources.jar",
+ "mirror_urls": [
+ "https://repo1.maven.org/maven2/org/jetbrains/kotlin/kotlin-stdlib-common/1.4.20/kotlin-stdlib-common-1.4.20-sources.jar",
+ "https://repo.maven.apache.org/maven2/org/jetbrains/kotlin/kotlin-stdlib-common/1.4.20/kotlin-stdlib-common-1.4.20-sources.jar"
+ ],
+ "packages": [],
+ "sha256": "2ee47b54b4a20257d2ec73ddf34c44f72f4c7f6e5625b1a13de77c115a0e2afc",
+ "url": "https://repo1.maven.org/maven2/org/jetbrains/kotlin/kotlin-stdlib-common/1.4.20/kotlin-stdlib-common-1.4.20-sources.jar"
+ },
+ {
+ "coord": "org.jetbrains.kotlin:kotlin-stdlib:1.4.20",
+ "dependencies": [
+ "org.jetbrains.kotlin:kotlin-stdlib-common:1.4.20",
+ "org.jetbrains:annotations:13.0"
+ ],
+ "directDependencies": [
+ "org.jetbrains.kotlin:kotlin-stdlib-common:1.4.20",
+ "org.jetbrains:annotations:13.0"
+ ],
+ "file": "v1/https/repo1.maven.org/maven2/org/jetbrains/kotlin/kotlin-stdlib/1.4.20/kotlin-stdlib-1.4.20.jar",
+ "mirror_urls": [
+ "https://repo1.maven.org/maven2/org/jetbrains/kotlin/kotlin-stdlib/1.4.20/kotlin-stdlib-1.4.20.jar",
+ "https://repo.maven.apache.org/maven2/org/jetbrains/kotlin/kotlin-stdlib/1.4.20/kotlin-stdlib-1.4.20.jar"
+ ],
+ "packages": [
+ "kotlin",
+ "kotlin.annotation",
+ "kotlin.collections",
+ "kotlin.collections.builders",
+ "kotlin.collections.unsigned",
+ "kotlin.comparisons",
+ "kotlin.concurrent",
+ "kotlin.contracts",
+ "kotlin.coroutines",
+ "kotlin.coroutines.cancellation",
+ "kotlin.coroutines.intrinsics",
+ "kotlin.coroutines.jvm.internal",
+ "kotlin.experimental",
+ "kotlin.internal",
+ "kotlin.io",
+ "kotlin.js",
+ "kotlin.jvm",
+ "kotlin.jvm.functions",
+ "kotlin.jvm.internal",
+ "kotlin.jvm.internal.markers",
+ "kotlin.jvm.internal.unsafe",
+ "kotlin.math",
+ "kotlin.properties",
+ "kotlin.random",
+ "kotlin.ranges",
+ "kotlin.reflect",
+ "kotlin.sequences",
+ "kotlin.system",
+ "kotlin.text",
+ "kotlin.time"
+ ],
+ "sha256": "b8ab1da5cdc89cb084d41e1f28f20a42bd431538642a5741c52bbfae3fa3e656",
+ "url": "https://repo1.maven.org/maven2/org/jetbrains/kotlin/kotlin-stdlib/1.4.20/kotlin-stdlib-1.4.20.jar"
+ },
+ {
+ "coord": "org.jetbrains.kotlin:kotlin-stdlib:jar:sources:1.4.20",
+ "dependencies": [
+ "org.jetbrains.kotlin:kotlin-stdlib-common:jar:sources:1.4.20",
+ "org.jetbrains:annotations:jar:sources:13.0"
+ ],
+ "directDependencies": [
+ "org.jetbrains.kotlin:kotlin-stdlib-common:jar:sources:1.4.20",
+ "org.jetbrains:annotations:jar:sources:13.0"
+ ],
+ "file": "v1/https/repo1.maven.org/maven2/org/jetbrains/kotlin/kotlin-stdlib/1.4.20/kotlin-stdlib-1.4.20-sources.jar",
+ "mirror_urls": [
+ "https://repo1.maven.org/maven2/org/jetbrains/kotlin/kotlin-stdlib/1.4.20/kotlin-stdlib-1.4.20-sources.jar",
+ "https://repo.maven.apache.org/maven2/org/jetbrains/kotlin/kotlin-stdlib/1.4.20/kotlin-stdlib-1.4.20-sources.jar"
+ ],
+ "packages": [],
+ "sha256": "87f030b500d243f67ef25c3601daee17655ea9c82cb4916637f7b4f8bdd0a165",
+ "url": "https://repo1.maven.org/maven2/org/jetbrains/kotlin/kotlin-stdlib/1.4.20/kotlin-stdlib-1.4.20-sources.jar"
+ },
+ {
+ "coord": "org.jetbrains:annotations:13.0",
+ "dependencies": [],
+ "directDependencies": [],
+ "file": "v1/https/repo1.maven.org/maven2/org/jetbrains/annotations/13.0/annotations-13.0.jar",
+ "mirror_urls": [
+ "https://repo1.maven.org/maven2/org/jetbrains/annotations/13.0/annotations-13.0.jar",
+ "https://repo.maven.apache.org/maven2/org/jetbrains/annotations/13.0/annotations-13.0.jar"
+ ],
+ "packages": [
+ "org.intellij.lang.annotations",
+ "org.jetbrains.annotations"
+ ],
+ "sha256": "ace2a10dc8e2d5fd34925ecac03e4988b2c0f851650c94b8cef49ba1bd111478",
+ "url": "https://repo1.maven.org/maven2/org/jetbrains/annotations/13.0/annotations-13.0.jar"
+ },
+ {
+ "coord": "org.jetbrains:annotations:jar:sources:13.0",
+ "dependencies": [],
+ "directDependencies": [],
+ "file": "v1/https/repo1.maven.org/maven2/org/jetbrains/annotations/13.0/annotations-13.0-sources.jar",
+ "mirror_urls": [
+ "https://repo1.maven.org/maven2/org/jetbrains/annotations/13.0/annotations-13.0-sources.jar",
+ "https://repo.maven.apache.org/maven2/org/jetbrains/annotations/13.0/annotations-13.0-sources.jar"
+ ],
+ "packages": [],
+ "sha256": "42a5e144b8e81d50d6913d1007b695e62e614705268d8cf9f13dbdc478c2c68e",
+ "url": "https://repo1.maven.org/maven2/org/jetbrains/annotations/13.0/annotations-13.0-sources.jar"
+ },
+ {
+ "coord": "org.ow2.asm:asm:9.5",
"dependencies": [],
"directDependencies": [],
- "file": "v1/https/repo1.maven.org/maven2/org/ow2/asm/asm/9.1/asm-9.1.jar",
+ "file": "v1/https/repo1.maven.org/maven2/org/ow2/asm/asm/9.5/asm-9.5.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/org/ow2/asm/asm/9.1/asm-9.1.jar",
- "https://repo.maven.apache.org/maven2/org/ow2/asm/asm/9.1/asm-9.1.jar"
+ "https://repo1.maven.org/maven2/org/ow2/asm/asm/9.5/asm-9.5.jar",
+ "https://repo.maven.apache.org/maven2/org/ow2/asm/asm/9.5/asm-9.5.jar"
],
"packages": [
"org.objectweb.asm",
"org.objectweb.asm.signature"
],
- "sha256": "cda4de455fab48ff0bcb7c48b4639447d4de859a7afc30a094a986f0936beba2",
- "url": "https://repo1.maven.org/maven2/org/ow2/asm/asm/9.1/asm-9.1.jar"
+ "sha256": "b62e84b5980729751b0458c534cf1366f727542bb8d158621335682a460f0353",
+ "url": "https://repo1.maven.org/maven2/org/ow2/asm/asm/9.5/asm-9.5.jar"
},
{
- "coord": "org.ow2.asm:asm:jar:sources:9.1",
+ "coord": "org.ow2.asm:asm:jar:sources:9.5",
"dependencies": [],
"directDependencies": [],
- "file": "v1/https/repo1.maven.org/maven2/org/ow2/asm/asm/9.1/asm-9.1-sources.jar",
+ "file": "v1/https/repo1.maven.org/maven2/org/ow2/asm/asm/9.5/asm-9.5-sources.jar",
"mirror_urls": [
- "https://repo1.maven.org/maven2/org/ow2/asm/asm/9.1/asm-9.1-sources.jar",
- "https://repo.maven.apache.org/maven2/org/ow2/asm/asm/9.1/asm-9.1-sources.jar"
+ "https://repo1.maven.org/maven2/org/ow2/asm/asm/9.5/asm-9.5-sources.jar",
+ "https://repo.maven.apache.org/maven2/org/ow2/asm/asm/9.5/asm-9.5-sources.jar"
],
"packages": [],
- "sha256": "64a1059b152dee08e203b4e1117b7979fff1578c545573f05329fb6d9090bb41",
- "url": "https://repo1.maven.org/maven2/org/ow2/asm/asm/9.1/asm-9.1-sources.jar"
+ "sha256": "11214bbba797e0615402b8d57fd4be83c93a65244c5a88778015520d61078376",
+ "url": "https://repo1.maven.org/maven2/org/ow2/asm/asm/9.5/asm-9.5-sources.jar"
}
],
"version": "0.1.0"
diff --git a/bazel/zetasql_deps_step_1.bzl b/bazel/zetasql_deps_step_1.bzl
index e18aeea54..825bf8eac 100644
--- a/bazel/zetasql_deps_step_1.bzl
+++ b/bazel/zetasql_deps_step_1.bzl
@@ -22,11 +22,26 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
# but depend on them being something different. So we have to override them both
# by defining the repo first.
load("@com_google_zetasql//bazel:zetasql_bazel_version.bzl", "zetasql_bazel_version")
+load("@toolchains_llvm//toolchain:deps.bzl", "bazel_toolchain_dependencies")
+load("@toolchains_llvm//toolchain:rules.bzl", "llvm_toolchain")
def zetasql_deps_step_1(add_bazel_version = True):
if add_bazel_version:
zetasql_bazel_version()
+ bazel_toolchain_dependencies()
+ llvm_toolchain(
+ name = "llvm_toolchain",
+ llvm_versions = {
+ "": "16.0.0",
+ # The LLVM repo stops providing pre-built binaries for the MacOS x86_64
+ # architecture for versions >= 16.0.0: https://github.com/llvm/llvm-project/releases,
+ # but our Kokoro MacOS tests are still using x86_64 (ventura).
+ # TODO: Upgrade the MacOS version to sonoma-slcn.
+ "darwin-x86_64": "15.0.7",
+ },
+ )
+
http_archive(
name = "io_bazel_rules_go",
integrity = "sha256-M6zErg9wUC20uJPJ/B3Xqb+ZjCPn/yxFF3QdQEmpdvg=",
@@ -82,6 +97,13 @@ def zetasql_deps_step_1(add_bazel_version = True):
sha256 = "2a4d07cd64b0719b39a7c12218a3e507672b82a97b98c6a89d38565894cf7c51",
)
+ if not native.existing_rule("build_bazel_rules_apple"):
+ http_archive(
+ name = "build_bazel_rules_apple",
+ sha256 = "d0f566ad408a6e4d179f0ac4d50a93494a70fcff8fab4c4af0a25b2c241c9b8d",
+ url = "https://github.com/bazelbuild/rules_apple/releases/download/3.6.0/rules_apple.3.6.0.tar.gz",
+ )
+
http_archive(
name = "rules_m4",
sha256 = "b0309baacfd1b736ed82dc2bb27b0ec38455a31a3d5d20f8d05e831ebeef1a8e",
diff --git a/bazel/zetasql_deps_step_2.bzl b/bazel/zetasql_deps_step_2.bzl
index b449b359d..6873dbe9c 100644
--- a/bazel/zetasql_deps_step_2.bzl
+++ b/bazel/zetasql_deps_step_2.bzl
@@ -19,10 +19,9 @@
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
+load("@llvm_toolchain//:toolchains.bzl", "llvm_register_toolchains")
load("@rules_bison//bison:bison.bzl", "bison_register_toolchains")
load("@rules_flex//flex:flex.bzl", "flex_register_toolchains")
-
-# Followup from zetasql_deps_step_1.bzl
load("@rules_foreign_cc//foreign_cc:repositories.bzl", "rules_foreign_cc_dependencies")
load("@rules_m4//m4:m4.bzl", "m4_register_toolchains")
load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies")
@@ -30,8 +29,67 @@ load("@rules_proto//proto:setup.bzl", "rules_proto_setup")
load("@rules_proto//proto:toolchains.bzl", "rules_proto_toolchains")
def _load_deps_from_step_1():
+ llvm_register_toolchains()
rules_foreign_cc_dependencies()
+def textmapper_dependencies():
+ """Textmapper and its transitive dependencies."""
+ go_repository(
+ name = "com_github_segmentio_encoding",
+ importpath = "github.com/segmentio/encoding",
+ version = "v0.4.0",
+ sum = "h1:MEBYvRqiUB2nfR2criEXWqwdY6HJOUrCn5hboVOVmy8=",
+ )
+ go_repository(
+ name = "com_github_segmentio_asm",
+ importpath = "github.com/segmentio/asm",
+ version = "v1.2.0",
+ sum = "h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=",
+ )
+ go_repository(
+ name = "dev_lsp_go_jsonrpc2",
+ importpath = "go.lsp.dev/jsonrpc2",
+ commit = "8c68d4fd37cd4bd06b62b3243f0d2292c681d164",
+ )
+ go_repository(
+ name = "dev_lsp_go_protocol",
+ importpath = "go.lsp.dev/protocol",
+ commit = "da30f9ae0326cc45b76adc5cd8920ac1ffa14a15",
+ )
+ go_repository(
+ name = "dev_lsp_go_uri",
+ importpath = "go.lsp.dev/uri",
+ commit = "63eaac75cc850f596be19073ff6d4ec198603779",
+ )
+ go_repository(
+ name = "dev_lsp_go_pkg",
+ importpath = "go.lsp.dev/pkg",
+ commit = "384b27a52fb2b5d74d78cfe89c7738e9a3e216a5",
+ )
+ go_repository(
+ name = "org_uber_go_zap",
+ importpath = "go.uber.org/zap",
+ version = "v1.27.0",
+ sum = "h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=",
+ )
+ go_repository(
+ name = "org_uber_go_multierr",
+ importpath = "go.uber.org/multierr",
+ version = "v1.11.0",
+ sum = "h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=",
+ )
+ go_repository(
+ name = "org_uber_go_goleak",
+ importpath = "go.uber.org/goleak",
+ version = "v1.3.0",
+ sum = "h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=",
+ )
+ go_repository(
+ name = "com_github_inspirer_textmapper",
+ commit = "8c81908b0030f754c42426cf6c5bb218086ce590",
+ importpath = "github.com/inspirer/textmapper",
+ )
+
def zetasql_deps_step_2(
name = None,
analyzer_deps = True,
@@ -111,10 +169,10 @@ def zetasql_deps_step_2(
#
http_archive(
name = "com_google_absl",
- # Commit from 2024-05-31
- url = "https://github.com/abseil/abseil-cpp/archive/d06b82773e2306a99a8971934fb5845d5c04a170.tar.gz",
- sha256 = "fd4c78078d160951f2317229511340f3e92344213bc145939995eea9ff9b9e48",
- strip_prefix = "abseil-cpp-d06b82773e2306a99a8971934fb5845d5c04a170",
+ # Commit from 2024-07-15
+ sha256 = "04612122377806a412124a89f6258206783d4d53fbc5ad4c9cdc1f3b49411bfb",
+ url = "https://github.com/abseil/abseil-cpp/archive/eb852207758a773965301d0ae717e4235fc5301a.tar.gz",
+ strip_prefix = "abseil-cpp-eb852207758a773965301d0ae717e4235fc5301a",
)
# required by many python libraries
@@ -267,11 +325,21 @@ py_library(
if not native.existing_rule("com_github_grpc_grpc"):
http_archive(
name = "com_github_grpc_grpc",
- urls = ["https://github.com/grpc/grpc/archive/refs/tags/v1.61.2.tar.gz"],
- sha256 = "86f8773434c4b8a4b64c67c91a19a90991f0da0ba054bbeb299dc1bc95fad1e9",
- strip_prefix = "grpc-1.61.2",
- # from https://github.com/google/gvisor/blob/master/tools/grpc_extra_deps.patch
- patches = ["@com_google_zetasql//bazel:grpc_extra_deps.patch"],
+ urls = ["https://github.com/grpc/grpc/archive/refs/tags/v1.64.2.tar.gz"],
+ sha256 = "c682fc39baefc6e804d735e6b48141157b7213602cc66dbe0bf375b904d8b5f9",
+ strip_prefix = "grpc-1.64.2",
+ patches = [
+ # from https://github.com/google/gvisor/blob/master/tools/grpc_extra_deps.patch
+ "@com_google_zetasql//bazel:grpc_extra_deps.patch",
+ # The patch is to workaround the following error:
+ # ```
+ # external/com_github_grpc_grpc/src/core/lib/event_engine/cf_engine/cfstream_endpoint.cc:21:10: error: module com_github_grpc_grpc//src/core:cf_event_engine does not depend on a module exporting 'absl/status/status.h'
+ # #include "absl/status/status.h"
+ # ^
+ # 1 error generated.
+ # ```
+ "@com_google_zetasql//bazel:grpc_cf_engine.patch",
+ ],
)
if analyzer_deps:
@@ -376,9 +444,9 @@ py_library(
if not native.existing_rule("google_bazel_common"):
http_archive(
name = "google_bazel_common",
- strip_prefix = "bazel-common-e768dbfea5bac239734b3f59b2a1d7464c6dbd26",
- urls = ["https://github.com/google/bazel-common/archive/e768dbfea5bac239734b3f59b2a1d7464c6dbd26.zip"],
- sha256 = "17f66ba76073a290add024a4ce7f5f92883832b7da85ffd7677e1f5de9a36153",
+ sha256 = "82a49fb27c01ad184db948747733159022f9464fc2e62da996fa700594d9ea42",
+ strip_prefix = "bazel-common-2a6b6406e12208e02b2060df0631fb30919080f3",
+ urls = ["https://github.com/google/bazel-common/archive/2a6b6406e12208e02b2060df0631fb30919080f3.zip"],
)
if evaluator_deps:
if not native.existing_rule("org_publicsuffix"):
@@ -411,11 +479,7 @@ alias(
go_rules_dependencies()
go_register_toolchains(version = "1.21.6")
gazelle_dependencies()
- go_repository(
- name = "com_github_inspirer_textmapper",
- commit = "8fdc73e6bd65dc4478b9d6526fe6c282f9c8d25b",
- importpath = "github.com/inspirer/textmapper",
- )
+ textmapper_dependencies()
##########################################################################
# Rules which depend on rules_foreign_cc
diff --git a/bazel/zetasql_java_deps.bzl b/bazel/zetasql_java_deps.bzl
index af8c0fed2..c4c780254 100644
--- a/bazel/zetasql_java_deps.bzl
+++ b/bazel/zetasql_java_deps.bzl
@@ -16,33 +16,33 @@
""" Load ZetaSQL Java Dependencies. """
-load("@rules_jvm_external//:defs.bzl", "maven_install")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:java.bzl", "java_import_external")
load("@io_grpc_grpc_java//:repositories.bzl", "IO_GRPC_GRPC_JAVA_ARTIFACTS")
+load("@rules_jvm_external//:defs.bzl", "maven_install")
ZETASQL_MAVEN_ARTIFACTS = [
- "com.google.api.grpc:proto-google-common-protos:2.21.0",
+ "com.google.api.grpc:proto-google-common-protos:2.29.0",
"com.google.code.findbugs:jsr305:3.0.2",
- "com.google.errorprone:error_prone_annotations:2.11.0",
- "com.google.guava:guava:32.1.1-jre",
- "com.google.guava:guava-testlib:32.1.1-jre",
- "io.grpc:grpc-context:1.56.0",
- "io.grpc:grpc-core:1.56.0",
- "io.grpc:grpc-api:1.56.0",
- "io.grpc:grpc-netty:1.56.0",
- "io.grpc:grpc-protobuf-lite:1.56.0",
- "io.grpc:grpc-protobuf:1.56.0",
- "io.grpc:grpc-stub:1.56.0",
- "io.netty:netty-common:4.1.34.Final",
- "io.netty:netty-transport:4.1.34.Final",
- "io.opencensus:opencensus-api:0.21.0",
- "io.opencensus:opencensus-contrib-grpc-metrics:0.21.0",
+ "com.google.errorprone:error_prone_annotations:2.23.0",
+ "com.google.guava:guava:32.1.3-jre",
+ "com.google.guava:guava-testlib:32.1.3-jre",
+ "io.grpc:grpc-context:1.64.2",
+ "io.grpc:grpc-core:1.64.2",
+ "io.grpc:grpc-api:1.64.2",
+ "io.grpc:grpc-netty:1.64.2",
+ "io.grpc:grpc-protobuf-lite:1.64.2",
+ "io.grpc:grpc-protobuf:1.64.2",
+ "io.grpc:grpc-stub:1.64.2",
+ "io.netty:netty-common:4.1.100.Final",
+ "io.netty:netty-transport:4.1.100.Final",
+ "io.opencensus:opencensus-api:0.31.0",
+ "io.opencensus:opencensus-contrib-grpc-metrics:0.31.0",
"javax.annotation:javax.annotation-api:1.2",
"joda-time:joda-time:2.10.13",
- "com.google.code.gson:gson:jar:2.8.9",
+ "com.google.code.gson:gson:jar:2.10.1",
"com.google.protobuf:protobuf-java:3.23.1",
- "com.google.truth:truth:1.1.3",
+ "com.google.truth:truth:1.1.5",
"com.google.truth.extensions:truth-proto-extension:1.1.3",
"junit:junit:4.13.2",
]
diff --git a/docs/README.md b/docs/README.md
index 2f9f1067b..1c4774bd3 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -12,6 +12,7 @@ with ZetaSQL:
* [Data Types](data-types.md)
* [Query Syntax](query-syntax.md)
* [Pipe Query Syntax](pipe-syntax.md)
+* [Graph Query Syntax](graph-intro.md)
* [Data Manipulation Language Reference](data-manipulation-language.md)
* [Data Model](data-model.md)
* [Data Definition Language Reference](data-definition-language.md)
diff --git a/docs/aggregate-dp-functions.md b/docs/aggregate-dp-functions.md
index 5397fcdb2..7c06209ff 100644
--- a/docs/aggregate-dp-functions.md
+++ b/docs/aggregate-dp-functions.md
@@ -17,7 +17,7 @@ Note: In this topic, the privacy parameters in the examples are not
recommendations. You should work with your privacy or security officer to
determine the optimal privacy parameters for your dataset and organization.
-### Function list
+## Function list
@@ -29,20 +29,19 @@ determine the optimal privacy parameters for your dataset and organization.
- AVG
-
+ | AVG (Differential Privacy)
|
DIFFERENTIAL_PRIVACY -supported AVG .
Gets the differentially-private average of non-NULL ,
non-NaN values in a query with a
DIFFERENTIAL_PRIVACY clause.
+
|
- COUNT
-
+ | COUNT (Differential Privacy)
|
DIFFERENTIAL_PRIVACY -supported COUNT .
@@ -53,46 +52,46 @@ determine the optimal privacy parameters for your dataset and organization.
Signature 2: Gets the differentially-private count of rows with a
non-NULL expression in a query with a
DIFFERENTIAL_PRIVACY clause.
+
|
- PERCENTILE_CONT
-
+ | PERCENTILE_CONT (Differential Privacy)
|
DIFFERENTIAL_PRIVACY -supported PERCENTILE_CONT .
Computes a differentially-private percentile across privacy unit columns
in a query with a DIFFERENTIAL_PRIVACY clause.
+
|
- SUM
-
+ | SUM (Differential Privacy)
|
DIFFERENTIAL_PRIVACY -supported SUM .
Gets the differentially-private sum of non-NULL ,
non-NaN values in a query with a
DIFFERENTIAL_PRIVACY clause.
+
|
- VAR_POP
-
+ | VAR_POP (Differential Privacy)
|
- DIFFERENTIAL_PRIVACY -supported VAR_POP .
+ DIFFERENTIAL_PRIVACY -supported VAR_POP (Differential Privacy).
Computes the differentially-private population (biased) variance of values
in a query with a DIFFERENTIAL_PRIVACY clause.
+
|
- ANON_AVG
-
+ | ANON_AVG
|
Deprecated.
@@ -103,8 +102,7 @@ determine the optimal privacy parameters for your dataset and organization.
|
- ANON_COUNT
-
+ | ANON_COUNT
|
Deprecated.
@@ -121,8 +119,7 @@ determine the optimal privacy parameters for your dataset and organization.
|
- ANON_PERCENTILE_CONT
-
+ | ANON_PERCENTILE_CONT
|
Deprecated.
@@ -132,8 +129,7 @@ determine the optimal privacy parameters for your dataset and organization.
|
- ANON_QUANTILES
-
+ | ANON_QUANTILES
|
Deprecated.
@@ -143,8 +139,7 @@ determine the optimal privacy parameters for your dataset and organization.
|
- ANON_STDDEV_POP
-
+ | ANON_STDDEV_POP
|
Deprecated.
@@ -154,8 +149,7 @@ determine the optimal privacy parameters for your dataset and organization.
|
- ANON_SUM
-
+ | ANON_SUM
|
Deprecated.
@@ -166,8 +160,7 @@ determine the optimal privacy parameters for your dataset and organization.
|
- ANON_VAR_POP
-
+ | ANON_VAR_POP
|
Deprecated.
@@ -179,7 +172,7 @@ determine the optimal privacy parameters for your dataset and organization.
|
-### `AVG` (`DIFFERENTIAL_PRIVACY`)
+## `AVG` (`DIFFERENTIAL_PRIVACY`)
```sql
@@ -315,7 +308,7 @@ noise, see [Remove noise][dp-noise].
[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
-### `COUNT` (`DIFFERENTIAL_PRIVACY`)
+## `COUNT` (`DIFFERENTIAL_PRIVACY`)
+ [Signature 1](#dp_count_signature1): Returns the number of rows in a
@@ -585,7 +578,7 @@ noise, see [Remove noise][dp-noise].
[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
-### `PERCENTILE_CONT` (`DIFFERENTIAL_PRIVACY`)
+## `PERCENTILE_CONT` (`DIFFERENTIAL_PRIVACY`)
```sql
@@ -682,7 +675,7 @@ GROUP BY item;
[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
-### `SUM` (`DIFFERENTIAL_PRIVACY`)
+## `SUM` (`DIFFERENTIAL_PRIVACY`)
```sql
@@ -822,7 +815,7 @@ noise, see [Use differential privacy][dp-noise].
[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
-### `VAR_POP` (`DIFFERENTIAL_PRIVACY`)
+## `VAR_POP` (`DIFFERENTIAL_PRIVACY`)
```sql
@@ -921,7 +914,7 @@ GROUP BY item;
[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
-### `ANON_AVG` (DEPRECATED)
+## `ANON_AVG` (DEPRECATED)
Warning: This function has been deprecated. Use
@@ -1009,7 +1002,7 @@ noise [here][dp-noise].
[dp-clamp-between]: #dp_clamp_between
-### `ANON_COUNT` (DEPRECATED)
+## `ANON_COUNT` (DEPRECATED)
Warning: This function has been deprecated. Use
@@ -1174,7 +1167,7 @@ noise [here][dp-noise].
[dp-clamp-between]: #dp_clamp_between
-### `ANON_PERCENTILE_CONT` (DEPRECATED)
+## `ANON_PERCENTILE_CONT` (DEPRECATED)
Warning: This function has been deprecated. Use
@@ -1239,7 +1232,7 @@ GROUP BY item;
[dp-clamp-between]: #dp_clamp_between
-### `ANON_QUANTILES` (DEPRECATED)
+## `ANON_QUANTILES` (DEPRECATED)
Warning: This function has been deprecated. Use
@@ -1305,7 +1298,7 @@ GROUP BY item;
[dp-clamp-between]: #dp_clamp_between
-### `ANON_STDDEV_POP` (DEPRECATED)
+## `ANON_STDDEV_POP` (DEPRECATED)
Warning: This function has been deprecated. Use
@@ -1370,7 +1363,7 @@ GROUP BY item;
[dp-clamp-between]: #dp_clamp_between
-### `ANON_SUM` (DEPRECATED)
+## `ANON_SUM` (DEPRECATED)
Warning: This function has been deprecated. Use
@@ -1460,7 +1453,7 @@ noise [here][dp-noise].
[dp-clamp-between]: #dp_clamp_between
-### `ANON_VAR_POP` (DEPRECATED)
+## `ANON_VAR_POP` (DEPRECATED)
Warning: This function has been deprecated. Use
@@ -1529,6 +1522,8 @@ GROUP BY item;
[dp-clamp-between]: #dp_clamp_between
+## Supplemental materials
+
### Clamp values in a differentially private aggregate function
@@ -1800,12 +1795,8 @@ GROUP BY item;
Note: For more information about when and when not to use
noise, see [Remove noise][dp-noise].
-[dp-guide]: https://github.com/google/zetasql/blob/master/docs/differential-privacy.md
-
[dp-syntax]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_clause
-[agg-function-calls]: https://github.com/google/zetasql/blob/master/docs/aggregate-function-calls.md
-
[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
[dp-noise]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#eliminate_noise
@@ -1816,3 +1807,9 @@ noise, see [Remove noise][dp-noise].
[dp-clamped-named-imp]: #dp_clamped_named_implicit
+[dp-guide]: https://github.com/google/zetasql/blob/master/docs/differential-privacy.md
+
+[dp-syntax]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_clause
+
+[agg-function-calls]: https://github.com/google/zetasql/blob/master/docs/aggregate-function-calls.md
+
diff --git a/docs/aggregate_functions.md b/docs/aggregate_functions.md
index 38abf2d71..8283aaad6 100644
--- a/docs/aggregate_functions.md
+++ b/docs/aggregate_functions.md
@@ -8,7 +8,7 @@ ZetaSQL supports the following general aggregate functions.
To learn about the syntax for aggregate function calls, see
[Aggregate function calls][agg-function-calls].
-### Function list
+## Function list
@@ -20,8 +20,7 @@ To learn about the syntax for aggregate function calls, see
- ANY_VALUE
-
+ | ANY_VALUE
|
Gets an expression for some row.
@@ -29,62 +28,125 @@ To learn about the syntax for aggregate function calls, see
|
- ARRAY_AGG
+ | APPROX_COUNT_DISTINCT
+ |
+
+ Gets the approximate result for COUNT(DISTINCT expression) .
+ For more information, see Approximate aggregate functions.
+ |
+
+
+
+ APPROX_QUANTILES
|
- Gets an array of values.
+ Gets the approximate quantile boundaries.
+ For more information, see Approximate aggregate functions.
+
|
- ARRAY_CONCAT_AGG
+ | APPROX_TOP_COUNT
+ |
+
+ Gets the approximate top elements and their approximate count.
+ For more information, see Approximate aggregate functions.
+
+ |
+
+
+ APPROX_TOP_SUM
|
- Concatenates arrays and returns a single array as a result.
+ Gets the approximate top elements and sum, based on the approximate sum
+ of an assigned weight.
+ For more information, see Approximate aggregate functions.
+
|
- AVG
+ | ARRAY_AGG
+ |
+
+ Gets an array of values.
+
+ |
+
+
+
+ ARRAY_CONCAT_AGG
+ |
+
+ Concatenates arrays and returns a single array as a result.
+
+ |
+
+
+ AVG
|
Gets the average of non-NULL values.
+
|
- BIT_AND
+ | AVG (Differential Privacy)
+ |
+
+ DIFFERENTIAL_PRIVACY -supported AVG .
+ Gets the differentially-private average of non-NULL ,
+ non-NaN values in a query with a
+ DIFFERENTIAL_PRIVACY clause.
+
For more information, see Differential privacy functions.
+
+ |
+
+
+ BIT_AND
|
Performs a bitwise AND operation on an expression.
+
|
- BIT_OR
-
+ | BIT_OR
|
Performs a bitwise OR operation on an expression.
+
|
- BIT_XOR
-
+ | BIT_XOR
|
Performs a bitwise XOR operation on an expression.
+
|
- COUNT
+ | CORR
+ |
+
+ Computes the Pearson coefficient of correlation of a set of number pairs.
+ For more information, see Statistical aggregate functions.
+ |
+
+
+
+ COUNT
|
Gets the number of rows in the input, or the number of rows with an
@@ -93,17 +155,52 @@ To learn about the syntax for aggregate function calls, see
|
- COUNTIF
+ | COUNT (Differential Privacy)
+ |
+
+ DIFFERENTIAL_PRIVACY -supported COUNT .
+ Signature 1: Gets the differentially-private count of rows in a query with a
+ DIFFERENTIAL_PRIVACY clause.
+
+
+ Signature 2: Gets the differentially-private count of rows with a
+ non-NULL expression in a query with a
+ DIFFERENTIAL_PRIVACY clause.
+
For more information, see Differential privacy functions.
+ |
+
+
+
+ COUNTIF
|
- Gets the count of TRUE values for an expression.
+ Gets the number of TRUE values for an expression.
|
- GROUPING
+ | COVAR_POP
+ |
+
+ Computes the population covariance of a set of number pairs.
+ For more information, see Statistical aggregate functions.
+ |
+
+
+
+ COVAR_SAMP
+ |
+
+ Computes the sample covariance of a set of number pairs.
+ For more information, see Statistical aggregate functions.
+
+ |
+
+
+
+ GROUPING
|
Checks if a groupable value in the GROUP BY clause is
@@ -112,8 +209,7 @@ To learn about the syntax for aggregate function calls, see
|
- LOGICAL_AND
-
+ | LOGICAL_AND
|
Gets the logical AND of all non-NULL expressions.
@@ -121,8 +217,7 @@ To learn about the syntax for aggregate function calls, see
|
- LOGICAL_OR
-
+ | LOGICAL_OR
|
Gets the logical OR of all non-NULL expressions.
@@ -130,17 +225,16 @@ To learn about the syntax for aggregate function calls, see
|
- MAX
-
+ | MAX
|
Gets the maximum non-NULL value.
+
|
- MIN
-
+ | MIN
|
Gets the minimum non-NULL value.
@@ -148,28 +242,146 @@ To learn about the syntax for aggregate function calls, see
|
- STRING_AGG
+ | PERCENTILE_CONT (Differential Privacy)
+ |
+
+ DIFFERENTIAL_PRIVACY -supported PERCENTILE_CONT .
+ Computes a differentially-private percentile across privacy unit columns
+ in a query with a DIFFERENTIAL_PRIVACY clause.
+
For more information, see Differential privacy functions.
+
+ |
+
+
+
+ ST_EXTENT
+ |
+
+ Gets the bounding box for a group of GEOGRAPHY values.
+ For more information, see Geography functions.
+
+ |
+
+
+
+ ST_UNION_AGG
+ |
+
+ Aggregates over GEOGRAPHY values and gets their
+ point set union.
+ For more information, see Geography functions.
+
+ |
+
+
+
+ STDDEV
+ |
+
+ An alias of the STDDEV_SAMP function.
+ For more information, see Statistical aggregate functions.
+
+ |
+
+
+
+ STDDEV_POP
+ |
+
+ Computes the population (biased) standard deviation of the values.
+ For more information, see Statistical aggregate functions.
+
+ |
+
+
+
+ STDDEV_SAMP
+ |
+
+ Computes the sample (unbiased) standard deviation of the values.
+ For more information, see Statistical aggregate functions.
+
+ |
+
+
+ STRING_AGG
|
Concatenates non-NULL STRING or
BYTES values.
+
|
- SUM
-
+ | SUM
|
Gets the sum of non-NULL values.
+
+ |
+
+
+
+ SUM (Differential Privacy)
+ |
+
+ DIFFERENTIAL_PRIVACY -supported SUM .
+ Gets the differentially-private sum of non-NULL ,
+ non-NaN values in a query with a
+ DIFFERENTIAL_PRIVACY clause.
+
For more information, see Differential privacy functions.
+
+ |
+
+
+
+ VAR_POP
+ |
+
+ Computes the population (biased) variance of the values.
+ For more information, see Statistical aggregate functions.
+
+ |
+
+
+
+ VAR_POP (Differential Privacy)
+ |
+
+ DIFFERENTIAL_PRIVACY -supported VAR_POP (Differential Privacy).
+ Computes the differentially-private population (biased) variance of values
+ in a query with a DIFFERENTIAL_PRIVACY clause.
+
For more information, see Differential privacy functions.
+
+ |
+
+
+
+ VAR_SAMP
+ |
+
+ Computes the sample (unbiased) variance of the values.
+ For more information, see Statistical aggregate functions.
+
+ |
+
+
+
+ VARIANCE
+ |
+
+ An alias of VAR_SAMP .
+ For more information, see Statistical aggregate functions.
+
|
-### `ANY_VALUE`
+## `ANY_VALUE`
```sql
ANY_VALUE(
@@ -296,7 +508,7 @@ SELECT ANY_VALUE(fruit HAVING MIN sold) AS a_lowest_selling_fruit FROM Store;
*-------------------------*/
```
-### `ARRAY_AGG`
+## `ARRAY_AGG`
```sql
ARRAY_AGG(
@@ -467,7 +679,7 @@ FROM UNNEST([2, 1, -2, 3, -2, 1, 2]) AS x;
*----+-------------------------*/
```
-### `ARRAY_CONCAT_AGG`
+## `ARRAY_CONCAT_AGG`
```sql
ARRAY_CONCAT_AGG(
@@ -564,7 +776,7 @@ SELECT ARRAY_CONCAT_AGG(x ORDER BY ARRAY_LENGTH(x) LIMIT 2) AS array_concat_agg
*------------------*/
```
-### `AVG`
+## `AVG`
```sql
AVG(
@@ -696,7 +908,7 @@ FROM UNNEST([0, 2, NULL, 4, 4, 5]) AS x;
[dp-functions]: https://github.com/google/zetasql/blob/master/docs/aggregate-dp-functions.md
-### `BIT_AND`
+## `BIT_AND`
```sql
BIT_AND(
@@ -743,7 +955,7 @@ SELECT BIT_AND(x) as bit_and FROM UNNEST([0xF001, 0x00A1]) as x;
*---------*/
```
-### `BIT_OR`
+## `BIT_OR`
```sql
BIT_OR(
@@ -790,7 +1002,7 @@ SELECT BIT_OR(x) as bit_or FROM UNNEST([0xF001, 0x00A1]) as x;
*--------*/
```
-### `BIT_XOR`
+## `BIT_XOR`
```sql
BIT_XOR(
@@ -857,17 +1069,13 @@ SELECT BIT_XOR(DISTINCT x) AS bit_xor FROM UNNEST([1234, 5678, 1234]) AS x;
*---------*/
```
-### `COUNT`
-
-1.
+## `COUNT`
```sql
COUNT(*)
-[OVER over_clause]
+[ OVER over_clause ]
```
-2.
-
```sql
COUNT(
[ DISTINCT ]
@@ -889,16 +1097,26 @@ window_specification:
**Description**
-1. Returns the number of rows in the input.
-2. Returns the number of rows with `expression` evaluated to any value other
- than `NULL`.
-
-To learn more about the optional aggregate clauses that you can pass
-into this function, see
-[Aggregate function calls][aggregate-function-calls].
-
-This function can be used with the
-[`AGGREGATION_THRESHOLD` clause][agg-threshold-clause].
+Gets the number of rows in the input or the number of rows with an
+expression evaluated to any value other than `NULL`.
+
+**Definitions**
+
++ `*`: Use this value to get the number of all rows in the input.
++ `expression`: A value of any data type that represents the expression to
+ evaluate. If `DISTINCT` is present,
+ `expression` can only be a data type that is
+ [groupable][groupable-data-types].
++ `DISTINCT`: To learn more, see
+ [Aggregate function calls][aggregate-function-calls].
++ `HAVING { MAX | MIN }`: To learn more, see
+ [Aggregate function calls][aggregate-function-calls].
++ `OVER`: To learn more, see
+ [Aggregate function calls][aggregate-function-calls].
++ `over_clause`: To learn more, see
+ [Aggregate function calls][aggregate-function-calls].
++ `window_specification`: To learn more, see
+ [Window function calls][window-function-calls].
@@ -906,10 +1124,9 @@ This function can be used with the
[agg-threshold-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#agg_threshold_clause
-
+[window-function-calls]: https://github.com/google/zetasql/blob/master/docs/window-function-calls.md
-To learn more about the `OVER` clause and how to use it, see
-[Window function calls][window-function-calls].
+
@@ -917,22 +1134,34 @@ To learn more about the `OVER` clause and how to use it, see
-This function with DISTINCT supports specifying [collation][collation].
+**Details**
+
+To count the number of distinct values of an expression for which a
+certain condition is satisfied, you can use the following recipe:
+
+```sql
+COUNT(DISTINCT IF(condition, expression, NULL))
+```
+
+`IF` returns the value of `expression` if `condition` is `TRUE`, or
+`NULL` otherwise. The surrounding `COUNT(DISTINCT ...)` ignores the `NULL`
+values, so it counts only the distinct values of `expression` for which
+`condition` is `TRUE`.
+
+To count the number of non-distinct values of an expression for which a
+certain condition is satisfied, consider using the
+[`COUNTIF`][countif] function.
+
+This function with DISTINCT
supports specifying [collation][collation].
[collation]: https://github.com/google/zetasql/blob/master/docs/collation-concepts.md#collate_about
`COUNT` can be used with differential privacy. For more information, see
[Differentially private aggregate functions][dp-functions].
-**Supported Argument Types**
-
-`expression` can be any data type. If
-`DISTINCT` is present, `expression` can only be a data type that is
-[groupable][agg-data-type-properties].
-
-**Return Data Types**
+**Return type**
-INT64
+`INT64`
**Examples**
@@ -987,19 +1216,7 @@ FROM UNNEST([1, 4, NULL, 4, 5]) AS x;
*------+------------+---------*/
```
-If you want to count the number of distinct values of an expression for which a
-certain condition is satisfied, this is one recipe that you can use:
-
-```sql
-COUNT(DISTINCT IF(condition, expression, NULL))
-```
-
-Here, `IF` will return the value of `expression` if `condition` is `TRUE`, or
-`NULL` otherwise. The surrounding `COUNT(DISTINCT ...)` will ignore the `NULL`
-values, so it will count only the distinct values of `expression` for which
-`condition` is `TRUE`.
-
-For example, to count the number of distinct positive values of `x`:
+The following query counts the number of distinct positive values of `x`:
```sql
SELECT COUNT(DISTINCT IF(x > 0, x, NULL)) AS distinct_positive
@@ -1012,8 +1229,8 @@ FROM UNNEST([1, -2, 4, 1, -5, 4, 1, 3, -6, 1]) AS x;
*-------------------*/
```
-Or to count the number of distinct dates on which a certain kind of event
-occurred:
+The following query counts the number of distinct dates on which a certain kind
+of event occurred:
```sql
WITH Events AS (
@@ -1041,11 +1258,37 @@ FROM Events;
*------------------------------*/
```
-[agg-data-type-properties]: https://github.com/google/zetasql/blob/master/docs/data-types.md#data_type_properties
+The following query counts the number of distinct `id`s that exist in both
+the `customers` and `vendor` tables:
+
+```sql
+WITH
+ customers AS (
+ SELECT 1934 AS id, 'a' AS team UNION ALL
+ SELECT 2991, 'b' UNION ALL
+ SELECT 3988, 'c'),
+ vendors AS (
+ SELECT 1934 AS id, 'd' AS team UNION ALL
+ SELECT 2991, 'e' UNION ALL
+ SELECT 4366, 'f')
+SELECT
+ COUNT(DISTINCT IF(id IN (SELECT id FROM customers), id, NULL)) AS result
+FROM vendors;
+
+/*--------*
+ | result |
+ +--------+
+ | 2 |
+ *--------*/
+```
+
+[countif]: https://github.com/google/zetasql/blob/master/docs/aggregate_functions.md#countif
+
+[groupable-data-types]: https://github.com/google/zetasql/blob/master/docs/data-types.md#groupable_data_types
[dp-functions]: https://github.com/google/zetasql/blob/master/docs/aggregate-dp-functions.md
-### `COUNTIF`
+## `COUNTIF`
```sql
COUNTIF(
@@ -1068,30 +1311,21 @@ window_specification:
**Description**
-Returns the count of `TRUE` values for `expression`. Returns `0` if there are
-zero input rows, or if `expression` evaluates to `FALSE` or `NULL` for all rows.
-
-Since `expression` must be a `BOOL`, the form `COUNTIF(DISTINCT ...)` is
-generally not useful: there is only one distinct value of `TRUE`. So
-`COUNTIF(DISTINCT ...)` will return 1 if `expression` evaluates to `TRUE` for
-one or more input rows, or 0 otherwise.
-Usually when someone wants to combine `COUNTIF` and `DISTINCT`, they
-want to count the number of distinct values of an expression for which a certain
-condition is satisfied. One recipe to achieve this is the following:
+Gets the number of `TRUE` values for an expression.
-```sql
-COUNT(DISTINCT IF(condition, expression, NULL))
-```
-
-Note that this uses `COUNT`, not `COUNTIF`; the `IF` part has been moved inside.
-To learn more, see the examples for [`COUNT`](#count).
-
-To learn more about the optional aggregate clauses that you can pass
-into this function, see
-[Aggregate function calls][aggregate-function-calls].
+**Definitions**
-This function can be used with the
-[`AGGREGATION_THRESHOLD` clause][agg-threshold-clause].
++ `expression`: A `BOOL` value that represents the expression to evaluate.
++ `DISTINCT`: To learn more, see
+ [Aggregate function calls][aggregate-function-calls].
++ `HAVING { MAX | MIN }`: To learn more, see
+ [Aggregate function calls][aggregate-function-calls].
++ `OVER`: To learn more, see
+ [Aggregate function calls][aggregate-function-calls].
++ `over_clause`: To learn more, see
+ [Aggregate function calls][aggregate-function-calls].
++ `window_specification`: To learn more, see
+ [Window function calls][window-function-calls].
@@ -1099,10 +1333,9 @@ This function can be used with the
[agg-threshold-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#agg_threshold_clause
-
+[window-function-calls]: https://github.com/google/zetasql/blob/master/docs/window-function-calls.md
-To learn more about the `OVER` clause and how to use it, see
-[Window function calls][window-function-calls].
+
@@ -1110,13 +1343,15 @@ To learn more about the `OVER` clause and how to use it, see
-**Supported Argument Types**
+**Details**
-BOOL
+The function signature `COUNTIF(DISTINCT ...)` is generally not useful. If you
+would like to use `DISTINCT`, use `COUNT` with `DISTINCT IF`. For more
+information, see the [`COUNT`][count] function.
-**Return Data Types**
+**Return type**
-INT64
+`INT64`
**Examples**
@@ -1152,7 +1387,9 @@ FROM UNNEST([5, -2, 3, 6, -10, NULL, -7, 4, 0]) AS x;
*------+--------------*/
```
-### `GROUPING`
+[count]: https://github.com/google/zetasql/blob/master/docs/aggregate_functions.md#count
+
+## `GROUPING`
```sql
GROUPING(groupable_value)
@@ -1273,7 +1510,7 @@ ORDER BY product_name;
[group-by-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#group_by_clause
-### `LOGICAL_AND`
+## `LOGICAL_AND`
```sql
LOGICAL_AND(
@@ -1345,7 +1582,7 @@ SELECT LOGICAL_AND(x < 3) AS logical_and FROM UNNEST([1, 2, 4]) AS x;
*-------------*/
```
-### `LOGICAL_OR`
+## `LOGICAL_OR`
```sql
LOGICAL_OR(
@@ -1417,7 +1654,7 @@ SELECT LOGICAL_OR(x < 3) AS logical_or FROM UNNEST([1, 2, 4]) AS x;
*------------*/
```
-### `MAX`
+## `MAX`
```sql
MAX(
@@ -1509,7 +1746,7 @@ FROM UNNEST([8, NULL, 37, 55, NULL, 4]) AS x;
[agg-data-type-properties]: https://github.com/google/zetasql/blob/master/docs/data-types.md#data_type_properties
-### `MIN`
+## `MIN`
```sql
MIN(
@@ -1601,7 +1838,7 @@ FROM UNNEST([8, NULL, 37, 4, NULL, 55]) AS x;
[agg-data-type-properties]: https://github.com/google/zetasql/blob/master/docs/data-types.md#data_type_properties
-### `STRING_AGG`
+## `STRING_AGG`
```sql
STRING_AGG(
@@ -1745,7 +1982,7 @@ FROM UNNEST(["apple", NULL, "pear", "banana", "pear"]) AS fruit;
*--------+------------------------------*/
```
-### `SUM`
+## `SUM`
```sql
SUM(
diff --git a/docs/approximate_aggregate_functions.md b/docs/approximate_aggregate_functions.md
index 187a5b285..e06546232 100644
--- a/docs/approximate_aggregate_functions.md
+++ b/docs/approximate_aggregate_functions.md
@@ -23,7 +23,7 @@ sketches. If you would like to specify precision with sketches, see:
+ [HyperLogLog++ functions][hll-functions] to estimate cardinality.
-### Function list
+## Function list
-### `APPROX_COUNT_DISTINCT`
+## `APPROX_COUNT_DISTINCT`
```sql
APPROX_COUNT_DISTINCT(
@@ -115,7 +115,7 @@ FROM UNNEST([0, 1, 1, 2, 3, 5]) as x;
*-----------------*/
```
-### `APPROX_QUANTILES`
+## `APPROX_QUANTILES`
```sql
APPROX_QUANTILES(
@@ -217,7 +217,7 @@ FROM UNNEST([NULL, NULL, 1, 1, 1, 4, 5, 6, 7, 8, 9, 10]) AS x;
*------------------*/
```
-### `APPROX_TOP_COUNT`
+## `APPROX_TOP_COUNT`
```sql
APPROX_TOP_COUNT(
@@ -284,7 +284,7 @@ FROM UNNEST([NULL, "pear", "pear", "pear", "apple", NULL]) as x;
*------------------------*/
```
-### `APPROX_TOP_SUM`
+## `APPROX_TOP_SUM`
```sql
APPROX_TOP_SUM(
@@ -295,9 +295,9 @@ APPROX_TOP_SUM(
**Description**
-Returns the approximate top elements of `expression`, based on the sum of an
-assigned `weight`. The `number` parameter specifies the number of elements
-returned.
+Returns the approximate top elements of `expression`, ordered by the sum of the
+`weight` values provided for each unique value of `expression`. The `number`
+parameter specifies the number of elements returned.
If the `weight` input is negative or `NaN`, this function returns an error.
diff --git a/docs/array_functions.md b/docs/array_functions.md
index 233b23f3b..693e0583c 100644
--- a/docs/array_functions.md
+++ b/docs/array_functions.md
@@ -6,7 +6,7 @@
ZetaSQL supports the following array functions.
-### Function list
+## Function list
@@ -18,8 +18,7 @@ ZetaSQL supports the following array functions.
- ARRAY
-
+ | ARRAY
|
Produces an array with one element for each row in a subquery.
@@ -27,8 +26,17 @@ ZetaSQL supports the following array functions.
|
- ARRAY_AVG
+ | ARRAY_AGG
+ |
+
+ Gets an array of values.
+ For more information, see Aggregate functions.
+
+ |
+
+
+ ARRAY_AVG
|
Gets the average of non-NULL values in an array.
@@ -36,8 +44,7 @@ ZetaSQL supports the following array functions.
|
- ARRAY_CONCAT
-
+ | ARRAY_CONCAT
|
Concatenates one or more arrays with the same element type into a
@@ -46,8 +53,17 @@ ZetaSQL supports the following array functions.
|
- ARRAY_FILTER
+ | ARRAY_CONCAT_AGG
+ |
+
+ Concatenates arrays and returns a single array as a result.
+ For more information, see Aggregate functions.
+
+ |
+
+
+ ARRAY_FILTER
|
Takes an array, filters out unwanted elements, and returns the results
@@ -56,8 +72,7 @@ ZetaSQL supports the following array functions.
|
- ARRAY_FIRST
-
+ | ARRAY_FIRST
|
Gets the first element in an array.
@@ -65,8 +80,7 @@ ZetaSQL supports the following array functions.
|
- ARRAY_INCLUDES
-
+ | ARRAY_INCLUDES
|
Checks if there is an element in the array that is
@@ -75,8 +89,7 @@ ZetaSQL supports the following array functions.
|
- ARRAY_INCLUDES_ALL
-
+ | ARRAY_INCLUDES_ALL
|
Checks if all search values are in an array.
@@ -84,8 +97,7 @@ ZetaSQL supports the following array functions.
|
- ARRAY_INCLUDES_ANY
-
+ | ARRAY_INCLUDES_ANY
|
Checks if any search values are in an array.
@@ -93,8 +105,7 @@ ZetaSQL supports the following array functions.
|
- ARRAY_IS_DISTINCT
-
+ | ARRAY_IS_DISTINCT
|
Checks if an array contains no repeated elements.
@@ -102,8 +113,7 @@ ZetaSQL supports the following array functions.
|
- ARRAY_LAST
-
+ | ARRAY_LAST
|
Gets the last element in an array.
@@ -111,8 +121,7 @@ ZetaSQL supports the following array functions.
|
- ARRAY_LENGTH
-
+ | ARRAY_LENGTH
|
Gets the number of elements in an array.
@@ -120,8 +129,7 @@ ZetaSQL supports the following array functions.
|
- ARRAY_MAX
-
+ | ARRAY_MAX
|
Gets the maximum non-NULL value in an array.
@@ -129,8 +137,7 @@ ZetaSQL supports the following array functions.
|
- ARRAY_MIN
-
+ | ARRAY_MIN
|
Gets the minimum non-NULL value in an array.
@@ -138,8 +145,7 @@ ZetaSQL supports the following array functions.
|
- ARRAY_REVERSE
-
+ | ARRAY_REVERSE
|
Reverses the order of elements in an array.
@@ -147,8 +153,7 @@ ZetaSQL supports the following array functions.
|
- ARRAY_SLICE
-
+ | ARRAY_SLICE
|
Produces an array containing zero or more consecutive elements from an
@@ -157,8 +162,7 @@ ZetaSQL supports the following array functions.
|
- ARRAY_SUM
-
+ | ARRAY_SUM
|
Gets the sum of non-NULL values in an array.
@@ -166,18 +170,17 @@ ZetaSQL supports the following array functions.
|
- ARRAY_TO_STRING
-
+ | ARRAY_TO_STRING
|
Produces a concatenation of the elements in an array as a
STRING value.
+
|
- ARRAY_TRANSFORM
-
+ | ARRAY_TRANSFORM
|
Transforms the elements of an array, and returns the results in a new
@@ -186,8 +189,7 @@ ZetaSQL supports the following array functions.
|
- ARRAY_ZIP
-
+ | ARRAY_ZIP
|
Combines elements from two to four arrays into one array.
@@ -195,8 +197,7 @@ ZetaSQL supports the following array functions.
|
- FLATTEN
-
+ | FLATTEN
|
Flattens arrays of nested data to create a single flat array.
@@ -204,8 +205,7 @@ ZetaSQL supports the following array functions.
|
- GENERATE_ARRAY
-
+ | GENERATE_ARRAY
|
Generates an array of values in a range.
@@ -213,27 +213,132 @@ ZetaSQL supports the following array functions.
|
- GENERATE_DATE_ARRAY
-
+ | GENERATE_DATE_ARRAY
|
Generates an array of dates in a range.
+
|
- GENERATE_TIMESTAMP_ARRAY
+ | GENERATE_RANGE_ARRAY
+ |
+
+ Splits a range into an array of subranges.
+ For more information, see Range functions.
+
+ |
+
+
+ GENERATE_TIMESTAMP_ARRAY
|
Generates an array of timestamps in a range.
+
+ |
+
+
+
+ JSON_ARRAY
+ |
+
+ Creates a JSON array.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ JSON_ARRAY_APPEND
+ |
+
+ Appends JSON data to the end of a JSON array.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ JSON_ARRAY_INSERT
+ |
+
+ Inserts JSON data into a JSON array.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ JSON_EXTRACT_ARRAY
+ |
+
+ (Deprecated)
+ Extracts a JSON array and converts it to
+ a SQL ARRAY<JSON-formatted STRING>
+ or
+ ARRAY<JSON>
+
+ value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ JSON_EXTRACT_STRING_ARRAY
+ |
+
+ (Deprecated)
+ Extracts a JSON array of scalar values and converts it to a SQL
+ ARRAY<STRING> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ JSON_QUERY_ARRAY
+ |
+
+ Extracts a JSON array and converts it to
+ a SQL ARRAY<JSON-formatted STRING>
+ or
+ ARRAY<JSON>
+
+ value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ JSON_VALUE_ARRAY
+ |
+
+ Extracts a JSON array of scalar values and converts it to a SQL
+ ARRAY<STRING> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ RANGE_BUCKET
+ |
+
+ Scans through a sorted array and returns the 0-based position
+ of a point's upper bound.
+ For more information, see Mathematical functions.
+
|
-### `ARRAY`
+## `ARRAY`
```sql
ARRAY(subquery)
@@ -330,7 +435,7 @@ SELECT ARRAY
[array-data-type]: https://github.com/google/zetasql/blob/master/docs/data-types.md#array_type
-### `ARRAY_AVG`
+## `ARRAY_AVG`
```sql
ARRAY_AVG(input_array)
@@ -393,7 +498,7 @@ SELECT ARRAY_AVG([0, 2, NULL, 4, 4, 5]) as avg
*-----*/
```
-### `ARRAY_CONCAT`
+## `ARRAY_CONCAT`
```sql
ARRAY_CONCAT(array_expression[, ...])
@@ -426,7 +531,7 @@ SELECT ARRAY_CONCAT([1, 2], [3, 4], [5, 6]) as count_to_six;
[array-link-to-operators]: https://github.com/google/zetasql/blob/master/docs/operators.md
-### `ARRAY_FILTER`
+## `ARRAY_FILTER`
```sql
ARRAY_FILTER(array_expression, lambda_expression)
@@ -474,7 +579,7 @@ SELECT
[lambda-definition]: https://github.com/google/zetasql/blob/master/docs/functions-reference.md#lambdas
-### `ARRAY_FIRST`
+## `ARRAY_FIRST`
```sql
ARRAY_FIRST(array_expression)
@@ -508,7 +613,7 @@ SELECT ARRAY_FIRST(['a','b','c','d']) as first_element
[array-last]: #array_last
-### `ARRAY_INCLUDES`
+## `ARRAY_INCLUDES`
+ [Signature 1](#array_includes_signature1):
`ARRAY_INCLUDES(array_to_search, search_value)`
@@ -599,7 +704,7 @@ SELECT
[lambda-definition]: https://github.com/google/zetasql/blob/master/docs/functions-reference.md#lambdas
-### `ARRAY_INCLUDES_ALL`
+## `ARRAY_INCLUDES_ALL`
```sql
ARRAY_INCLUDES_ALL(array_to_search, search_values)
@@ -638,7 +743,7 @@ SELECT
*------+-------*/
```
-### `ARRAY_INCLUDES_ANY`
+## `ARRAY_INCLUDES_ANY`
```sql
ARRAY_INCLUDES_ANY(array_to_search, search_values)
@@ -677,7 +782,7 @@ SELECT
*------+-------*/
```
-### `ARRAY_IS_DISTINCT`
+## `ARRAY_IS_DISTINCT`
```sql
ARRAY_IS_DISTINCT(value)
@@ -763,7 +868,7 @@ SELECT ARRAY_IS_DISTINCT(NULL) AS is_distinct
*-------------*/
```
-### `ARRAY_LAST`
+## `ARRAY_LAST`
```sql
ARRAY_LAST(array_expression)
@@ -797,7 +902,7 @@ SELECT ARRAY_LAST(['a','b','c','d']) as last_element
[array-first]: #array_first
-### `ARRAY_LENGTH`
+## `ARRAY_LENGTH`
```sql
ARRAY_LENGTH(array_expression)
@@ -826,7 +931,7 @@ SELECT
*--------+--------*/
```
-### `ARRAY_MAX`
+## `ARRAY_MAX`
```sql
ARRAY_MAX(input_array)
@@ -865,7 +970,7 @@ SELECT ARRAY_MAX([8, 37, NULL, 55, 4]) as max
[data-type-properties]: https://github.com/google/zetasql/blob/master/docs/data-types.md#data_type_properties
-### `ARRAY_MIN`
+## `ARRAY_MIN`
```sql
ARRAY_MIN(input_array)
@@ -904,7 +1009,7 @@ SELECT ARRAY_MIN([8, 37, NULL, 4, 55]) as min
[data-type-properties]: https://github.com/google/zetasql/blob/master/docs/data-types.md#data_type_properties
-### `ARRAY_REVERSE`
+## `ARRAY_REVERSE`
```sql
ARRAY_REVERSE(value)
@@ -930,7 +1035,7 @@ SELECT ARRAY_REVERSE([1, 2, 3]) AS reverse_arr
*-------------*/
```
-### `ARRAY_SLICE`
+## `ARRAY_SLICE`
```sql
ARRAY_SLICE(array_to_slice, start_offset, end_offset)
@@ -1171,7 +1276,7 @@ SELECT ARRAY_SLICE(['a', 'b', NULL, 'd', 'e'], 1, 3) AS result
*--------------*/
```
-### `ARRAY_SUM`
+## `ARRAY_SUM`
```sql
ARRAY_SUM(input_array)
@@ -1233,7 +1338,7 @@ SELECT ARRAY_SUM([1, 2, 3, 4, 5, NULL, 4, 3, 2, 1]) as sum
*-----*/
```
-### `ARRAY_TO_STRING`
+## `ARRAY_TO_STRING`
```sql
ARRAY_TO_STRING(array_expression, delimiter[, null_text])
@@ -1279,7 +1384,7 @@ SELECT ARRAY_TO_STRING(['cake', 'pie', NULL], '--', 'MISSING') AS text
*--------------------------------*/
```
-### `ARRAY_TRANSFORM`
+## `ARRAY_TRANSFORM`
```sql
ARRAY_TRANSFORM(array_expression, lambda_expression)
@@ -1327,7 +1432,7 @@ SELECT
[lambda-definition]: https://github.com/google/zetasql/blob/master/docs/functions-reference.md#lambdas
-### `ARRAY_ZIP`
+## `ARRAY_ZIP`
```sql
ARRAY_ZIP(
@@ -1495,7 +1600,7 @@ SELECT ARRAY_ZIP([1, 2], ['a', 'b', 'c', 'd'], mode => 'TRUNCATE') AS results
-### `FLATTEN`
+## `FLATTEN`
```sql
FLATTEN(array_elements_field_access_expression)
@@ -1589,7 +1694,7 @@ For more examples, including how to use protocol buffers with `FLATTEN`, see the
[array-el-field-operator]: https://github.com/google/zetasql/blob/master/docs/operators.md#array_el_field_operator
-### `GENERATE_ARRAY`
+## `GENERATE_ARRAY`
```sql
GENERATE_ARRAY(start_expression, end_expression[, step_expression])
@@ -1711,7 +1816,7 @@ FROM UNNEST([3, 4, 5]) AS start;
+---------------*/
```
-### `GENERATE_DATE_ARRAY`
+## `GENERATE_DATE_ARRAY`
```sql
GENERATE_DATE_ARRAY(start_date, end_date[, INTERVAL INT64_expr date_part])
@@ -1854,7 +1959,7 @@ FROM (
*--------------------------------------------------------------*/
```
-### `GENERATE_TIMESTAMP_ARRAY`
+## `GENERATE_TIMESTAMP_ARRAY`
```sql
GENERATE_TIMESTAMP_ARRAY(start_timestamp, end_timestamp,
@@ -1998,6 +2103,8 @@ FROM
*--------------------------------------------------------------------------*/
```
+## Supplemental materials
+
### OFFSET and ORDINAL
For information about using `OFFSET` and `ORDINAL` with arrays, see
diff --git a/docs/bit_functions.md b/docs/bit_functions.md
index d61707ab7..32ab31641 100644
--- a/docs/bit_functions.md
+++ b/docs/bit_functions.md
@@ -6,7 +6,7 @@
ZetaSQL supports the following bit functions.
-### Function list
+## Function list
-### `BIT_CAST_TO_INT32`
+## `BIT_CAST_TO_INT32`
```sql
BIT_CAST_TO_INT32(value)
@@ -98,7 +127,7 @@ SELECT BIT_CAST_TO_UINT32(-1) as UINT32_value, BIT_CAST_TO_INT32(BIT_CAST_TO_UIN
*---------------+----------------------*/
```
-### `BIT_CAST_TO_INT64`
+## `BIT_CAST_TO_INT64`
```sql
BIT_CAST_TO_INT64(value)
@@ -131,7 +160,7 @@ SELECT BIT_CAST_TO_UINT64(-1) as UINT64_value, BIT_CAST_TO_INT64(BIT_CAST_TO_UIN
*-----------------------+----------------------*/
```
-### `BIT_CAST_TO_UINT32`
+## `BIT_CAST_TO_UINT32`
```sql
BIT_CAST_TO_UINT32(value)
@@ -164,7 +193,7 @@ SELECT -1 as UINT32_value, BIT_CAST_TO_UINT32(-1) as bit_cast_value;
*--------------+----------------------*/
```
-### `BIT_CAST_TO_UINT64`
+## `BIT_CAST_TO_UINT64`
```sql
BIT_CAST_TO_UINT64(value)
@@ -197,7 +226,7 @@ SELECT -1 as INT64_value, BIT_CAST_TO_UINT64(-1) as bit_cast_value;
*--------------+----------------------*/
```
-### `BIT_COUNT`
+## `BIT_COUNT`
```sql
BIT_COUNT(expression)
diff --git a/docs/conditional_expressions.md b/docs/conditional_expressions.md
index c73cc27a3..6230dec60 100644
--- a/docs/conditional_expressions.md
+++ b/docs/conditional_expressions.md
@@ -25,7 +25,6 @@ tuning.
CASE expr
-
|
Compares the given expression to each successive WHEN clause
@@ -35,7 +34,6 @@ tuning.
|
CASE
-
|
Evaluates the condition of each successive WHEN clause and
@@ -46,7 +44,6 @@ tuning.
|
COALESCE
-
|
Produces the value of the first non-NULL expression, if any,
@@ -56,7 +53,6 @@ tuning.
|
IF
-
|
If an expression evaluates to TRUE , produces a specified
@@ -66,7 +62,6 @@ tuning.
|
IFNULL
-
|
If an expression evaluates to NULL , produces a specified
@@ -76,7 +71,6 @@ tuning.
|
NULLIF
-
|
Produces NULL if the first expression that matches another
@@ -86,7 +80,6 @@ tuning.
|
NULLIFZERO
-
|
Produces NULL if an expression is 0 ,
@@ -96,7 +89,6 @@ tuning.
|
ZEROIFNULL
-
|
Produces 0 if an expression is NULL , otherwise
diff --git a/docs/conversion_functions.md b/docs/conversion_functions.md
index c483edd22..91debdf66 100644
--- a/docs/conversion_functions.md
+++ b/docs/conversion_functions.md
@@ -8,7 +8,7 @@ ZetaSQL supports conversion functions. These data type
conversions are explicit, but some conversions can happen implicitly. You can
learn more about implicit and explicit conversion [here][conversion-rules].
-### Function list
+## Function list
@@ -20,8 +20,79 @@ learn more about implicit and explicit conversion [here][conversion-rules].
- CAST
+ | ARRAY_TO_STRING
+ |
+
+ Produces a concatenation of the elements in an array as a
+ STRING value.
+ For more information, see Array functions.
+
+ |
+
+
+
+ BIT_CAST_TO_INT32
+ |
+
+ Cast bits to an INT32 value.
+ For more information, see Bit functions.
+
+ |
+
+
+
+ BIT_CAST_TO_INT64
+ |
+
+ Cast bits to an INT64 value.
+ For more information, see Bit functions.
+
+ |
+
+
+
+ BIT_CAST_TO_UINT32
+ |
+
+ Cast bits to an UINT32 value.
+ For more information, see Bit functions.
+
+ |
+
+
+
+ BIT_CAST_TO_UINT64
+ |
+
+ Cast bits to an UINT64 value.
+ For more information, see Bit functions.
+ |
+
+
+
+ BOOL
+ |
+
+ Converts a JSON boolean to a SQL BOOL value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ BOOL_ARRAY
+ |
+
+ Converts a JSON array of booleans to a
+ SQL ARRAY<BOOL> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ CAST
|
Convert the results of an expression to the given type.
@@ -29,8 +100,309 @@ learn more about implicit and explicit conversion [here][conversion-rules].
|
- PARSE_BIGNUMERIC
+ | CHR
+ |
+
+ Converts a Unicode code point to a character.
+ For more information, see String functions.
+
+ |
+
+
+
+ CODE_POINTS_TO_BYTES
+ |
+
+ Converts an array of extended ASCII code points to a
+ BYTES value.
+ For more information, see String aggregate functions.
+
+ |
+
+
+
+ CODE_POINTS_TO_STRING
+ |
+
+ Converts an array of extended ASCII code points to a
+ STRING value.
+ For more information, see String aggregate functions.
+
+ |
+
+
+
+ DATE_FROM_UNIX_DATE
+ |
+
+ Interprets an INT64 expression as the number of days
+ since 1970-01-01.
+ For more information, see Date functions.
+
+ |
+
+
+
+ FROM_BASE32
+ |
+
+ Converts a base32-encoded STRING value into a
+ BYTES value.
+ For more information, see String functions.
+
+ |
+
+
+
+ FROM_BASE64
+ |
+
+ Converts a base64-encoded STRING value into a
+ BYTES value.
+ For more information, see String functions.
+
+ |
+
+
+
+ FROM_HEX
+ |
+
+ Converts a hexadecimal-encoded STRING value into a
+ BYTES value.
+ For more information, see String functions.
+
+ |
+
+
+
+ FROM_PROTO
+ |
+
+ Converts a protocol buffer value into ZetaSQL value.
+ For more information, see Protocol buffer functions.
+
+ |
+
+
+
+ INT32
+ |
+
+ Converts a JSON number to a SQL INT32 value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ INT32_ARRAY
+ |
+
+ Converts a JSON number to a SQL ARRAY<INT32> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ INT64
+ |
+
+ Converts a JSON number to a SQL INT64 value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ INT64_ARRAY
+ |
+
+ Converts a JSON array of numbers to a
+ SQL ARRAY<INT64> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ LAX_BOOL
+ |
+
+ Attempts to convert a JSON value to a SQL BOOL value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ LAX_BOOL_ARRAY
+ |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<BOOL> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+
+
+ LAX_DOUBLE
+
+
+ |
+
+ Attempts to convert a JSON value to a
+ SQL DOUBLE value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+
+
+ LAX_DOUBLE_ARRAY
+
+
+ |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<DOUBLE> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+
+
+ LAX_FLOAT
+
+
+ |
+
+ Attempts to convert a JSON value to a
+ SQL FLOAT value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+
+
+ LAX_FLOAT_ARRAY
+
+
+ |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY>FLOAT< value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ LAX_INT32
+ |
+
+ Attempts to convert a JSON value to a SQL INT32 value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ LAX_INT32_ARRAY
+ |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<INT32> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ LAX_INT64
+ |
+
+ Attempts to convert a JSON value to a SQL INT64 value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ LAX_INT64_ARRAY
+ |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<INT64> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ LAX_STRING
+ |
+
+ Attempts to convert a JSON value to a SQL STRING value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ LAX_STRING_ARRAY
+ |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<STRING> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ LAX_UINT32
+ |
+
+ Attempts to convert a JSON value to a SQL UINT32 value.
+ For more information, see JSON functions.
+
+ |
+
+
+ LAX_UINT64
+ |
+
+ Attempts to convert a JSON value to a SQL UINT64 value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ LAX_UINT64_ARRAY
+ |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<UINT64> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ PARSE_BIGNUMERIC
|
Converts a STRING value to a BIGNUMERIC value.
@@ -38,8 +410,38 @@ learn more about implicit and explicit conversion [here][conversion-rules].
|
- PARSE_NUMERIC
+ | PARSE_DATE
+ |
+
+ Converts a STRING value to a DATE value.
+ For more information, see Date functions.
+
+ |
+
+
+
+ PARSE_DATETIME
+ |
+
+ Converts a STRING value to a DATETIME value.
+ For more information, see Datetime functions.
+
+ |
+
+
+
+ PARSE_JSON
+ |
+
+ Converts a JSON-formatted STRING value to a
+ JSON value.
+ For more information, see JSON functions.
+ |
+
+
+
+ PARSE_NUMERIC
|
Converts a STRING value to a NUMERIC value.
@@ -47,8 +449,27 @@ learn more about implicit and explicit conversion [here][conversion-rules].
|
- SAFE_CAST
+ | PARSE_TIME
+ |
+
+ Converts a STRING value to a TIME value.
+ For more information, see Time functions.
+
+ |
+
+
+
+ PARSE_TIMESTAMP
+ |
+
+ Converts a STRING value to a TIMESTAMP value.
+ For more information, see Timestamp functions.
+
+ |
+
+
+ SAFE_CAST
|
Similar to the CAST function, but returns NULL
@@ -56,10 +477,244 @@ learn more about implicit and explicit conversion [here][conversion-rules].
|
+
+ SAFE_CONVERT_BYTES_TO_STRING
+ |
+
+ Converts a BYTES value to a STRING value and
+ replace any invalid UTF-8 characters with the Unicode replacement character,
+ U+FFFD .
+ For more information, see String functions.
+
+ |
+
+
+
+ STRING (JSON)
+ |
+
+ Converts a JSON string to a SQL STRING value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ STRING_ARRAY
+ |
+
+ Converts a JSON array of strings to a SQL ARRAY<STRING>
+ value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ STRING (Timestamp)
+ |
+
+ Converts a TIMESTAMP value to a STRING value.
+ For more information, see Timestamp functions.
+
+ |
+
+
+
+ TIMESTAMP_MICROS
+ |
+
+ Converts the number of microseconds since
+ 1970-01-01 00:00:00 UTC to a TIMESTAMP .
+ For more information, see Timestamp functions.
+
+ |
+
+
+
+ TIMESTAMP_MILLIS
+ |
+
+ Converts the number of milliseconds since
+ 1970-01-01 00:00:00 UTC to a TIMESTAMP .
+ For more information, see Timestamp functions.
+
+ |
+
+
+
+ TIMESTAMP_SECONDS
+ |
+
+ Converts the number of seconds since
+ 1970-01-01 00:00:00 UTC to a TIMESTAMP .
+ For more information, see Timestamp functions.
+
+ |
+
+
+
+ TO_BASE32
+ |
+
+ Converts a BYTES value to a
+ base32-encoded STRING value.
+ For more information, see String functions.
+
+ |
+
+
+
+ TO_BASE64
+ |
+
+ Converts a BYTES value to a
+ base64-encoded STRING value.
+ For more information, see String functions.
+
+ |
+
+
+
+ TO_CODE_POINTS
+ |
+
+ Converts a STRING or BYTES value into an array of
+ extended ASCII code points.
+ For more information, see String functions.
+
+ |
+
+
+
+ TO_HEX
+ |
+
+ Converts a BYTES value to a
+ hexadecimal STRING value.
+ For more information, see String functions.
+
+ |
+
+
+
+ TO_JSON
+ |
+
+ Converts a SQL value to a JSON value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ TO_JSON_STRING
+ |
+
+ Converts a SQL value to a JSON-formatted STRING value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ TO_PROTO
+ |
+
+ Converts a ZetaSQL value into a protocol buffer value.
+ For more information, see Protocol buffer functions.
+
+ |
+
+
+
+ UINT32
+ |
+
+ Converts a JSON number to a SQL UINT32 value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ UINT32_ARRAY
+ |
+
+ Converts a JSON number to a
+ SQL ARRAY<UINT32> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ UINT64
+ |
+
+ Converts a JSON number to a SQL UINT64 value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ UINT64_ARRAY
+ |
+
+ Converts a JSON number to a SQL ARRAY<UINT64> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ UNIX_DATE
+ |
+
+ Converts a DATE value to the number of days since 1970-01-01.
+ For more information, see Date functions.
+
+ |
+
+
+
+ UNIX_MICROS
+ |
+
+ Converts a TIMESTAMP value to the number of microseconds since
+ 1970-01-01 00:00:00 UTC.
+ For more information, see Timestamp functions.
+
+ |
+
+
+
+ UNIX_MILLIS
+ |
+
+ Converts a TIMESTAMP value to the number of milliseconds
+ since 1970-01-01 00:00:00 UTC.
+ For more information, see Timestamp functions.
+
+ |
+
+
+
+ UNIX_SECONDS
+ |
+
+ Converts a TIMESTAMP value to the number of seconds since
+ 1970-01-01 00:00:00 UTC.
+ For more information, see Timestamp functions.
+
+ |
+
+
-### `CAST`
+## `CAST`
```sql
@@ -1390,7 +2045,7 @@ SELECT CAST('06/02/2020 17:00:53.110 +00' AS TIMESTAMP FORMAT 'MM/DD/YYYY HH24:M
[con-func-safecast]: #safe_casting
-### `PARSE_BIGNUMERIC`
+## `PARSE_BIGNUMERIC`
```sql
@@ -1623,7 +2278,7 @@ SELECT PARSE_BIGNUMERIC("$12.34") as parsed;
[bignumeric-type]: https://github.com/google/zetasql/blob/master/docs/data-types.md#decimal_types
-### `PARSE_NUMERIC`
+## `PARSE_NUMERIC`
```sql
PARSE_NUMERIC(string_expression)
@@ -1856,7 +2511,7 @@ SELECT PARSE_NUMERIC("$12.34") as parsed;
[numeric-type]: https://github.com/google/zetasql/blob/master/docs/data-types.md#decimal_types
-### `SAFE_CAST`
+## `SAFE_CAST`
@@ -1904,133 +2559,5 @@ are replaced with the unicode replacement character, `U+FFFD`.
[formatting-syntax]: https://github.com/google/zetasql/blob/master/docs/format-elements.md#formatting_syntax
-### Other conversion functions
-
-
-You can learn more about these conversion functions elsewhere in the
-documentation:
-
-
-
-Conversion function | From | To
-------- | -------- | -------
-[ARRAY_TO_STRING][ARRAY_STRING] | ARRAY | STRING
-[BIT_CAST_TO_INT32][BIT_I32] | UINT32 | INT32
-[BIT_CAST_TO_INT64][BIT_I64] | UINT64 | INT64
-[BIT_CAST_TO_UINT32][BIT_U32] | INT32 | UINT32
-[BIT_CAST_TO_UINT64][BIT_U64] | INT64 | UINT64
-[BOOL][JSON_TO_BOOL] | JSON | BOOL
-[DATE][T_DATE] | Various data types | DATE
-[DATE_FROM_UNIX_DATE][T_DATE_FROM_UNIX_DATE] | INT64 | DATE
-[DATETIME][T_DATETIME] | Various data types | DATETIME
-[DOUBLE][JSON_TO_DOUBLE] | JSON | DOUBLE
-[FROM_BASE32][F_B32] | STRING | BYTEs
-[FROM_BASE64][F_B64] | STRING | BYTES
-[FROM_HEX][F_HEX] | STRING | BYTES
-[FROM_PROTO][F_PROTO] | PROTO value | Most data types
-[INT64][JSON_TO_INT64] | JSON | INT64
-[PARSE_DATE][P_DATE] | STRING | DATE
-[PARSE_DATETIME][P_DATETIME] | STRING | DATETIME
-[PARSE_JSON][P_JSON] | STRING | JSON
-[PARSE_TIME][P_TIME] | STRING | TIME
-[PARSE_TIMESTAMP][P_TIMESTAMP] | STRING | TIMESTAMP
-[SAFE_CONVERT_BYTES_TO_STRING][SC_BTS] | BYTES | STRING
-[STRING][STRING_TIMESTAMP] | TIMESTAMP | STRING
-[STRING][JSON_TO_STRING] | JSON | STRING
-[TIME][T_TIME] | Various data types | TIME
-[TIMESTAMP][T_TIMESTAMP] | Various data types | TIMESTAMP
-[TIMESTAMP_FROM_UNIX_MICROS][T_TIMESTAMP_FROM_UNIX_MICROS] | INT64 | TIMESTAMP
-[TIMESTAMP_FROM_UNIX_MILLIS][T_TIMESTAMP_FROM_UNIX_MILLIS] | INT64 | TIMESTAMP
-[TIMESTAMP_FROM_UNIX_SECONDS][T_TIMESTAMP_FROM_UNIX_SECONDS] | INT64 | TIMESTAMP
-[TIMESTAMP_MICROS][T_TIMESTAMP_MICROS] | INT64 | TIMESTAMP
-[TIMESTAMP_MILLIS][T_TIMESTAMP_MILLIS] | INT64 | TIMESTAMP
-[TIMESTAMP_SECONDS][T_TIMESTAMP_SECONDS] | INT64 | TIMESTAMP
-[TO_BASE32][T_B32] | BYTES | STRING
-[TO_BASE64][T_B64] | BYTES | STRING
-[TO_HEX][T_HEX] | BYTES | STRING
-[TO_JSON][T_JSON] | All data types | JSON
-[TO_JSON_STRING][T_JSON_STRING] | All data types | STRING
-[TO_PROTO][T_PROTO] | Most data types | PROTO value
-
-
-
-
-
[conversion-rules]: https://github.com/google/zetasql/blob/master/docs/conversion_rules.md
-[ARRAY_STRING]: https://github.com/google/zetasql/blob/master/docs/array_functions.md#array_to_string
-
-[BIT_I32]: https://github.com/google/zetasql/blob/master/docs/bit_functions.md#bit_cast_to_int32
-
-[BIT_U32]: https://github.com/google/zetasql/blob/master/docs/bit_functions.md#bit_cast_to_uint32
-
-[BIT_I64]: https://github.com/google/zetasql/blob/master/docs/bit_functions.md#bit_cast_to_int64
-
-[BIT_U64]: https://github.com/google/zetasql/blob/master/docs/bit_functions.md#bit_cast_to_uint64
-
-[F_B32]: https://github.com/google/zetasql/blob/master/docs/string_functions.md#from_base32
-
-[F_B64]: https://github.com/google/zetasql/blob/master/docs/string_functions.md#from_base64
-
-[F_HEX]: https://github.com/google/zetasql/blob/master/docs/string_functions.md#from_hex
-
-[F_PROTO]: https://github.com/google/zetasql/blob/master/docs/protocol_buffer_functions.md#from_proto
-
-[P_DATE]: https://github.com/google/zetasql/blob/master/docs/date_functions.md#parse_date
-
-[P_DATETIME]: https://github.com/google/zetasql/blob/master/docs/datetime_functions.md#parse_datetime
-
-[P_JSON]: https://github.com/google/zetasql/blob/master/docs/json_functions.md#parse_json
-
-[P_TIME]: https://github.com/google/zetasql/blob/master/docs/time_functions.md#parse_time
-
-[P_TIMESTAMP]: https://github.com/google/zetasql/blob/master/docs/timestamp_functions.md#parse_timestamp
-
-[SC_BTS]: https://github.com/google/zetasql/blob/master/docs/string_functions.md#safe_convert_bytes_to_string
-
-[STRING_TIMESTAMP]: https://github.com/google/zetasql/blob/master/docs/timestamp_functions.md#string
-
-[T_B32]: https://github.com/google/zetasql/blob/master/docs/string_functions.md#to_base32
-
-[T_B64]: https://github.com/google/zetasql/blob/master/docs/string_functions.md#to_base64
-
-[T_HEX]: https://github.com/google/zetasql/blob/master/docs/string_functions.md#to_hex
-
-[T_JSON]: https://github.com/google/zetasql/blob/master/docs/json_functions.md#to_json
-
-[T_JSON_STRING]: https://github.com/google/zetasql/blob/master/docs/json_functions.md#to_json_string
-
-[T_PROTO]: https://github.com/google/zetasql/blob/master/docs/protocol_buffer_functions.md#to_proto
-
-[T_DATE]: https://github.com/google/zetasql/blob/master/docs/date_functions.md#date
-
-[T_DATETIME]: https://github.com/google/zetasql/blob/master/docs/datetime_functions.md#datetime
-
-[T_TIMESTAMP]: https://github.com/google/zetasql/blob/master/docs/timestamp_functions.md#timestamp
-
-[T_TIME]: https://github.com/google/zetasql/blob/master/docs/time_functions.md#time
-
-[JSON_TO_BOOL]: https://github.com/google/zetasql/blob/master/docs/json_functions.md#bool_for_json
-
-[JSON_TO_STRING]: https://github.com/google/zetasql/blob/master/docs/json_functions.md#string_for_json
-
-[JSON_TO_INT64]: https://github.com/google/zetasql/blob/master/docs/json_functions.md#int64_for_json
-
-[JSON_TO_DOUBLE]: https://github.com/google/zetasql/blob/master/docs/json_functions.md#double_for_json
-
-[T_DATE_FROM_UNIX_DATE]: https://github.com/google/zetasql/blob/master/docs/date_functions.md#date_from_unix_date
-
-[T_TIMESTAMP_FROM_UNIX_MICROS]: https://github.com/google/zetasql/blob/master/docs/timestamp_functions.md#timestamp_from_unix_micros
-
-[T_TIMESTAMP_FROM_UNIX_MILLIS]: https://github.com/google/zetasql/blob/master/docs/timestamp_functions.md#timestamp_from_unix_millis
-
-[T_TIMESTAMP_FROM_UNIX_SECONDS]: https://github.com/google/zetasql/blob/master/docs/timestamp_functions.md#timestamp_from_unix_seconds
-
-[T_TIMESTAMP_MICROS]: https://github.com/google/zetasql/blob/master/docs/timestamp_functions.md#timestamp_micros
-
-[T_TIMESTAMP_MILLIS]: https://github.com/google/zetasql/blob/master/docs/timestamp_functions.md#timestamp_millis
-
-[T_TIMESTAMP_SECONDS]: https://github.com/google/zetasql/blob/master/docs/timestamp_functions.md#timestamp_seconds
-
-
-
diff --git a/docs/conversion_rules.md b/docs/conversion_rules.md
index 4cb1e80a8..eb37b9e9e 100644
--- a/docs/conversion_rules.md
+++ b/docs/conversion_rules.md
@@ -396,6 +396,26 @@ ZetaSQL supports the following parameter coercions:
PROTO |
|
+
+ GRAPH_ELEMENT parameter |
+
+
+
+GRAPH_ELEMENT
+
+
+ Coercion is only allowed from one graph element type to another
+ graph element type if the second graph element type is a supertype of
+ the first. After the conversion, the graph element type can access the
+ properties that are described in its supertype.
+
+
+ With GRAPH_ELEMENT coercion, the property reference
+ returns NULL .
+
+ |
+
+
@@ -590,6 +610,26 @@ or more supertypes, including itself, which defines its set of supertypes.
+
+ GRAPH_ELEMENT |
+
+ GRAPH_ELEMENT . A graph element can be a supertype of
+ another graph element if the following is true:
+
+ -
+ Graph element
a is a supertype of graph element
+ b and they're the same element kind.
+
+ -
+ Graph element
a 's property type list is a
+ compatible superset of graph element b 's
+ property type list. This means that properties with the same name
+ must also have the same type.
+
+
+ |
+
+
RANGE |
RANGE with the same subtype. |
diff --git a/docs/data-definition-language.md b/docs/data-definition-language.md
index 58ade6421..5358f008c 100644
--- a/docs/data-definition-language.md
+++ b/docs/data-definition-language.md
@@ -946,9 +946,16 @@ denied from exercising these privileges on the object.
interpreted as having an empty ACL, in which case no users can exercise the
privileges.
+## `CREATE PROPERTY GRAPH`
+
+Creates a property graph. For more information, see
+[`CREATE PROPERTY GRAPH`][create-property-graph] in the GQL reference.
+
+[create-property-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#create_property_graph
+
## `CREATE TABLE FUNCTION`
-A table function, also known as a table value function (TVF), returns a table. A
+A table function, also known as a table-valued function (TVF), returns a table. A
TVF is called in the `FROM` clause like a table subquery. To create a TVF, see
[TVFs][tvfs].
@@ -1082,6 +1089,13 @@ object to drop.
+ `IF EXISTS`: If no object exists at `object_path`, the `DROP` statement will
have no effect.
+## `DROP PROPERTY GRAPH`
+
+Deletes a property graph. For more information, see
+[`DROP PROPERTY GRAPH`][drop-property-graph] in the GQL reference.
+
+[drop-property-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#drop_property_graph
+
## Terminology
### Primary key
diff --git a/docs/data-manipulation-language.md b/docs/data-manipulation-language.md
index 447ae7ff2..936b68970 100644
--- a/docs/data-manipulation-language.md
+++ b/docs/data-manipulation-language.md
@@ -1235,9 +1235,8 @@ WHERE s.SingerId = 5
ASSERT_ROWS_MODIFIED 1;
```
-ZetaSQL treats an array or repeated field inside a row that matches
-an `UPDATE WHERE` clause as a table, with individual elements of the array or
-field treated like rows. These rows can then have nested DML statements run
+ZetaSQL treats an array or a repeated field inside a row that matches
+an `UPDATE WHERE` clause as a table, with individual elements of the array or field treated like rows. These rows can then have nested DML statements run
against them, allowing you to delete, update, and insert data as needed.
#### Modifying multiple fields
diff --git a/docs/data-model.md b/docs/data-model.md
index 92557e8a9..df5d0df24 100644
--- a/docs/data-model.md
+++ b/docs/data-model.md
@@ -454,11 +454,34 @@ SELECT a.ROWNUM, a.albumtitle AS title FROM (SELECT a FROM AlbumReviewData AS a)
+## Property graphs
+
+
+A property graph is a directed graph that includes the following parts:
+
++ Nodes: Each node has a set of labels and properties. Each label has a
+ name identifier and determines a set of properties. Each property has a
+ name identifier and a value type.
++ Edges: Similar to nodes, each edge has
+ a set of labels and properties. Additionally, directed edges
+ include source nodes and destination nodes.
++ Labels: A label is identified by a unique name in the property graph and
+ determines a set of properties. Nodes and edges can expose the same set of
+ properties, using the same label.
++ Properties: A property is identified by a unique name in a property graph.
+ Properties declared on any label on a node or edge table are declared across
+ the whole enclosing property graph, so they must always be consistent.
+
+To create a property graph on top of a relational dataset, see the
+[CREATE PROPERTY GRAPH statement][create-property-graph] in the DDL.
+
[data-types]: https://github.com/google/zetasql/blob/master/docs/data-types.md
[data-manipulation-language]: https://github.com/google/zetasql/blob/master/docs/data-manipulation-language.md
+[create-property-graph]: https://github.com/google/zetasql/blob/master/docs/data-definition-language.md#create_property_graph
+
diff --git a/docs/data-types.md b/docs/data-types.md
index cd3cebf1a..5079780b4 100644
--- a/docs/data-types.md
+++ b/docs/data-types.md
@@ -23,7 +23,6 @@ information on data type literals and constructors, see
Array type
-
|
An ordered list of zero or more elements of non-array values.
@@ -33,17 +32,16 @@ information on data type literals and constructors, see
|
Boolean type
-
|
A value that can be either TRUE or FALSE .
- SQL type name: BOOL
+ SQL type name: BOOL
+ SQL aliases: BOOLEAN
|
Bytes type
-
|
Variable-length binary data.
@@ -53,7 +51,6 @@ information on data type literals and constructors, see
|
Date type
-
|
A Gregorian calendar date, independent of time zone.
@@ -63,7 +60,6 @@ information on data type literals and constructors, see
|
Datetime type
-
|
A Gregorian date and a time, as they might be displayed on a watch,
@@ -74,7 +70,6 @@ information on data type literals and constructors, see
|
Enum type
-
|
Named type that enumerates a list of possible values.
@@ -84,7 +79,6 @@ information on data type literals and constructors, see
|
Geography type
-
|
A collection of points, linestrings, and polygons, which is represented as a
@@ -94,8 +88,16 @@ information on data type literals and constructors, see
|
- Interval type
+ | Graph element type
+ |
+
+ An element in a property graph.
+ SQL type name: GRAPH_ELEMENT
+ |
+
+
+ Interval type
|
A duration of time, without referring to any specific point in time.
@@ -105,7 +107,6 @@ information on data type literals and constructors, see
|
JSON type
-
|
Represents JSON, a lightweight data-interchange format.
@@ -115,7 +116,6 @@ information on data type literals and constructors, see
|
Numeric types
-
|
@@ -187,7 +187,6 @@ information on data type literals and constructors, see
|
Protocol buffer type
-
|
A protocol buffer.
@@ -197,7 +196,6 @@ information on data type literals and constructors, see
|
Range type
-
|
Contiguous range between two dates, datetimes, or timestamps.
@@ -207,7 +205,6 @@ information on data type literals and constructors, see
|
String type
-
|
Variable-length character data.
@@ -217,7 +214,6 @@ information on data type literals and constructors, see
|
Struct type
-
|
Container of ordered fields.
@@ -227,7 +223,6 @@ information on data type literals and constructors, see
|
Time type
-
|
A time of day, as might be displayed on a clock, independent of a specific
@@ -238,7 +233,6 @@ information on data type literals and constructors, see
|
Timestamp type
-
|
A timestamp value represents an absolute point in time,
@@ -259,7 +253,8 @@ properties in mind:
### Nullable data types
For nullable data types, `NULL` is a valid value. Currently, all existing
-data types are nullable. Conditions apply for
+data types are nullable, except
+for `GRAPH_ELEMENT`. Conditions apply for
[arrays][array-nulls].
### Orderable data types
@@ -271,6 +266,7 @@ Applies to all data types except for:
+ `STRUCT`
+ `GEOGRAPHY`
+ `JSON`
++ `GRAPH_ELEMENT`
#### Ordering `NULL`s
@@ -350,6 +346,7 @@ Groupable data types can generally appear in an expression following `GROUP BY`,
+ `PROTO`
+ `GEOGRAPHY`
+ `JSON`
++ `GRAPH_ELEMENT`
#### Grouping with floating point types
@@ -367,7 +364,8 @@ both grouping done by a `GROUP BY` clause and grouping done by the
+ 0 or -0 — All zero values are considered equal when grouping.
+ `+inf`
-#### Grouping with arrays
+#### Grouping with arrays
+
An `ARRAY` type is groupable if its element type is
groupable.
@@ -379,7 +377,8 @@ is true:
+ The two arrays have the same number of elements and all corresponding
elements are in the same groups.
-#### Grouping with structs
+#### Grouping with structs
+
A `STRUCT` type is groupable if its field types are
groupable.
@@ -708,13 +707,18 @@ SELECT
|
-BOOL |
+
+ BOOL
+ BOOLEAN
+ |
Boolean values are represented by the keywords TRUE and
FALSE (case-insensitive). |
+`BOOLEAN` is an alias for `BOOL`.
+
Boolean values are sorted in this order, from least to greatest:
1. `NULL`
@@ -1141,6 +1145,54 @@ A geography is the result of, or an argument to, a
[geography-functions]: https://github.com/google/zetasql/blob/master/docs/geography_functions.md
+## Graph element type
+
+
+
+
+
+Name |
+Description |
+
+
+
+
+GRAPH_ELEMENT |
+
+ An element in a property graph.
+ |
+
+
+
+
+A variable with a `GRAPH_ELEMENT` type is produced by a graph query.
+The generated type has this format:
+
+```
+GRAPH_ELEMENT
+```
+
+A graph element can be one of two kinds: a node or edge.
+A graph element is similar to the struct type, except that fields are
+graph properties, and you can only access graph properties by name.
+A graph element can represent nodes or edges from multiple node or edge tables
+if multiple such tables match the given label expression.
+
+**Example**
+
+In the following example, `n` represents a graph element in the
+[`FinGraph`][fin-graph] property graph:
+
+```sql
+GRAPH FinGraph
+MATCH (n:Person)
+RETURN n.name
+```
+
+[graph-query]: https://github.com/google/zetasql/blob/master/docs/graph-intro.md
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
## Interval type
@@ -1546,7 +1598,6 @@ Floating point values are approximate numeric values with fractional components.
FLOAT
FLOAT32 |
-
Single precision (approximate) numeric values.
|
@@ -2653,17 +2704,44 @@ SELECT UNIX_MILLIS(TIMESTAMP '2008-12-25 15:30:00 America/Los_Angeles') AS milli
SELECT UNIX_MILLIS(TIMESTAMP '2008-12-25 15:30:00-08:00') AS millis;
```
-#### Offset from Coordinated Universal Time (UTC)
-
+#### Specify Coordinated Universal Time (UTC)
+
-Format:
+You can specify UTC using the following suffix:
```
-{+|-}H[H][:M[M]]
+{Z|z}
```
+You can also specify UTC using the following time zone name:
+
```
-{Z|z}
+{Etc/UTC}
+```
+
+The `Z` suffix is a placeholder that implies UTC when converting an [RFC
+3339-format][rfc-3339-format] value to a `TIMESTAMP` value. The value `Z` isn't
+a valid time zone for functions that accept a time zone. If you're specifying a
+time zone, or you're unsure of the format to use to specify UTC, we recommend
+using the `Etc/UTC` time zone name.
+
+The `Z` suffix isn't case sensitive. When using the `Z` suffix, no space is
+allowed between the `Z` and the rest of the timestamp. The following are
+examples of using the `Z` suffix and the `Etc/UTC` time zone name:
+
+```
+SELECT TIMESTAMP '2014-09-27T12:30:00.45Z'
+SELECT TIMESTAMP '2014-09-27 12:30:00.45z'
+SELECT TIMESTAMP '2014-09-27T12:30:00.45 Etc/UTC'
+```
+
+#### Specify an offset from Coordinated Universal Time (UTC)
+
+
+You can specify the offset from UTC using the following format:
+
+```
+{+|-}H[H][:M[M]]
```
Examples:
@@ -2674,7 +2752,6 @@ Examples:
+3:00
+07:30
-7
-Z
```
When using this format, no space is allowed between the time zone and the rest
@@ -2682,10 +2759,9 @@ of the timestamp.
```
2014-09-27 12:30:00.45-8:00
-2014-09-27T12:30:00.45Z
```
-#### Time zone name
+#### Time zone name {: #time_zone_name}
Format:
@@ -2739,6 +2815,8 @@ seconds are only observable through functions that measure real-world time. In
these functions, it is possible for a timestamp second to be skipped or repeated
when there is a leap second.
+[rfc-3339-format]: https://datatracker.ietf.org/doc/html/rfc3339#page-10
+
[tz-database]: http://www.iana.org/time-zones
[tz-database-list]: http://en.wikipedia.org/wiki/List_of_tz_database_time_zones
diff --git a/docs/date_functions.md b/docs/date_functions.md
index b81ba7a0c..a346f7589 100644
--- a/docs/date_functions.md
+++ b/docs/date_functions.md
@@ -6,7 +6,7 @@
ZetaSQL supports the following date functions.
-### Function list
+## Function list
@@ -18,8 +18,7 @@ ZetaSQL supports the following date functions.
- CURRENT_DATE
-
+ | CURRENT_DATE
|
Returns the current date as a DATE value.
@@ -27,8 +26,7 @@ ZetaSQL supports the following date functions.
|
- DATE
-
+ | DATE
|
Constructs a DATE value.
@@ -36,8 +34,7 @@ ZetaSQL supports the following date functions.
|
- DATE_ADD
-
+ | DATE_ADD
|
Adds a specified time interval to a DATE value.
@@ -45,8 +42,7 @@ ZetaSQL supports the following date functions.
|
- DATE_DIFF
-
+ | DATE_DIFF
|
Gets the number of unit boundaries between two DATE values
@@ -55,18 +51,17 @@ ZetaSQL supports the following date functions.
|
- DATE_FROM_UNIX_DATE
-
+ | DATE_FROM_UNIX_DATE
|
Interprets an INT64 expression as the number of days
since 1970-01-01.
+
|
- DATE_SUB
-
+ | DATE_SUB
|
Subtracts a specified time interval from a DATE value.
@@ -74,17 +69,19 @@ ZetaSQL supports the following date functions.
|
- DATE_TRUNC
-
+ | DATE_TRUNC
|
- Truncates a DATE value.
+
+ Truncates a DATE , DATETIME , or
+ TIMESTAMP value at a particular
+ granularity.
+
|
- EXTRACT
-
+ | EXTRACT
|
Extracts part of a date from a DATE value.
@@ -92,8 +89,7 @@ ZetaSQL supports the following date functions.
|
- FORMAT_DATE
-
+ | FORMAT_DATE
|
Formats a DATE value according to a specified format string.
@@ -101,8 +97,17 @@ ZetaSQL supports the following date functions.
|
- LAST_DAY
+ | GENERATE_DATE_ARRAY
+ |
+
+ Generates an array of dates in a range.
+ For more information, see Array functions.
+ |
+
+
+
+ LAST_DAY
|
Gets the last day in a specified time period that contains a
@@ -111,27 +116,27 @@ ZetaSQL supports the following date functions.
|
- PARSE_DATE
-
+ | PARSE_DATE
|
Converts a STRING value to a DATE value.
+
|
- UNIX_DATE
-
+ | UNIX_DATE
|
Converts a DATE value to the number of days since 1970-01-01.
+
|
-### `CURRENT_DATE`
+## `CURRENT_DATE`
```sql
CURRENT_DATE()
@@ -218,7 +223,7 @@ SELECT CURRENT_DATE AS the_date;
[date-timezone-definitions]: https://github.com/google/zetasql/blob/master/docs/data-types.md#time_zones
-### `DATE`
+## `DATE`
```sql
DATE(year, month, day)
@@ -273,7 +278,7 @@ SELECT
[date-timezone-definitions]: https://github.com/google/zetasql/blob/master/docs/timestamp_functions.md#timezone_definitions
-### `DATE_ADD`
+## `DATE_ADD`
```sql
DATE_ADD(date_expression, INTERVAL int64_expression date_part)
@@ -312,7 +317,7 @@ SELECT DATE_ADD(DATE '2008-12-25', INTERVAL 5 DAY) AS five_days_later;
*--------------------*/
```
-### `DATE_DIFF`
+## `DATE_DIFF`
```sql
DATE_DIFF(end_date, start_date, granularity)
@@ -430,7 +435,7 @@ SELECT
[ISO-8601-week]: https://en.wikipedia.org/wiki/ISO_week_date
-### `DATE_FROM_UNIX_DATE`
+## `DATE_FROM_UNIX_DATE`
```sql
DATE_FROM_UNIX_DATE(int64_expression)
@@ -456,7 +461,7 @@ SELECT DATE_FROM_UNIX_DATE(14238) AS date_from_epoch;
*-----------------+*/
```
-### `DATE_SUB`
+## `DATE_SUB`
```sql
DATE_SUB(date_expression, INTERVAL int64_expression date_part)
@@ -495,52 +500,84 @@ SELECT DATE_SUB(DATE '2008-12-25', INTERVAL 5 DAY) AS five_days_ago;
*---------------*/
```
-### `DATE_TRUNC`
+## `DATE_TRUNC`
+
+```sql
+DATE_TRUNC(date_value, date_granularity)
+```
+
+```sql
+DATE_TRUNC(datetime_value, datetime_granularity)
+```
```sql
-DATE_TRUNC(date_expression, granularity)
+DATE_TRUNC(timestamp_value, timestamp_granularity[, time_zone])
```
**Description**
-Truncates a `DATE` value at a particular time granularity. The `DATE` value
-is always rounded to the beginning of `granularity`.
+Truncates a `DATE`, `DATETIME`, or `TIMESTAMP` value at a particular
+granularity.
**Definitions**
-+ `date_expression`: The `DATE` value to truncate.
-+ `granularity`: The date part that represents the granularity. If
- you passed in a `DATE` value for the first argument, `granularity` can
- be:
++ `date_value`: A `DATE` value to truncate.
++ `date_granularity`: The truncation granularity for a `DATE` value.
+ [Date granularities][date-trunc-granularity-date] can be used.
++ `datetime_value`: A `DATETIME` value to truncate.
++ `datetime_granularity`: The truncation granularity for a `DATETIME` value.
+ [Date granularities][date-trunc-granularity-date] and
+ [time granularities][date-trunc-granularity-time] can be used.
++ `timestamp_value`: A `TIMESTAMP` value to truncate.
++ `timestamp_granularity`: The truncation granularity for a `TIMESTAMP` value.
+ [Date granularities][date-trunc-granularity-date] and
+ [time granularities][date-trunc-granularity-time] can be used.
++ `time_zone`: A time zone to use with the `TIMESTAMP` value.
+ [Time zone parts][date-time-zone-parts] can be used.
+ Use this argument if you want to use a time zone other than
+ the default time zone, which is implementation defined, as part of the
+ truncate operation.
+
+ Note: When truncating a timestamp to `MINUTE`
+ or `HOUR` parts, this function determines the civil time of the
+ timestamp in the specified (or default) time zone
+ and subtracts the minutes and seconds (when truncating to `HOUR`) or the
+ seconds (when truncating to `MINUTE`) from that timestamp.
+ While this provides intuitive results in most cases, the result is
+ non-intuitive near daylight savings transitions that are not hour-aligned.
+
+
+
+**Date granularity definitions**
+ `DAY`: The day in the Gregorian calendar year that contains the
- `DATE` value.
+ value to truncate.
+ `WEEK`: The first day in the week that contains the
- `DATE` value. Weeks begin on Sundays. `WEEK` is equivalent to
+ value to truncate. Weeks begin on Sundays. `WEEK` is equivalent to
`WEEK(SUNDAY)`.
+ `WEEK(WEEKDAY)`: The first day in the week that contains the
- `DATE` value. Weeks begin on `WEEKDAY`. `WEEKDAY` must be one of the
+ value to truncate. Weeks begin on `WEEKDAY`. `WEEKDAY` must be one of the
following: `SUNDAY`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`,
or `SATURDAY`.
+ `ISOWEEK`: The first day in the [ISO 8601 week][ISO-8601-week] that contains
- the `DATE` value. The ISO week begins on
+ the value to truncate. The ISO week begins on
Monday. The first ISO week of each ISO year contains the first Thursday of the
corresponding Gregorian calendar year.
+ `MONTH`: The first day in the month that contains the
- `DATE` value.
+ value to truncate.
+ `QUARTER`: The first day in the quarter that contains the
- `DATE` value.
+ value to truncate.
+ `YEAR`: The first day in the year that contains the
- `DATE` value.
+ value to truncate.
+ `ISOYEAR`: The first day in the [ISO 8601][ISO-8601] week-numbering year
- that contains the `DATE` value. The ISO year is the
+ that contains the value to truncate. The ISO year is the
Monday of the first week where Thursday belongs to the corresponding
Gregorian calendar year.
@@ -552,9 +589,44 @@ is always rounded to the beginning of `granularity`.
+
+
+**Time granularity definitions**
+
+ + `NANOSECOND`: If used, nothing is truncated from the value.
+
+ + `MICROSECOND`: The nearest lesser than or equal microsecond.
+
+ + `MILLISECOND`: The nearest lesser than or equal millisecond.
+
+ + `SECOND`: The nearest lesser than or equal second.
+
+ + `MINUTE`: The nearest lesser than or equal minute.
+
+ + `HOUR`: The nearest lesser than or equal hour.
+
+
+
+**Time zone part definitions**
+
++ `MINUTE`
++ `HOUR`
++ `DAY`
++ `WEEK`
++ `WEEK()`
++ `ISOWEEK`
++ `MONTH`
++ `QUARTER`
++ `YEAR`
++ `ISOYEAR`
+
+**Details**
+
+The resulting value is always rounded to the beginning of `granularity`.
+
**Return Data Type**
-`DATE`
+The same data type as the first argument passed into this function.
**Examples**
@@ -603,7 +675,13 @@ SELECT
*------------------+----------------*/
```
-### `EXTRACT`
+[date-trunc-granularity-date]: #date_trunc_granularity_date
+
+[date-trunc-granularity-time]: #date_trunc_granularity_time
+
+[date-time-zone-parts]: #date_time_zone_parts
+
+## `EXTRACT`
```sql
EXTRACT(part FROM date_expression)
@@ -714,7 +792,7 @@ SELECT
[ISO-8601-week]: https://en.wikipedia.org/wiki/ISO_week_date
-### `FORMAT_DATE`
+## `FORMAT_DATE`
```sql
FORMAT_DATE(format_string, date_expr)
@@ -722,14 +800,17 @@ FORMAT_DATE(format_string, date_expr)
**Description**
-Formats the `date_expr` according to the specified `format_string`.
+Formats a `DATE` value according to a specified format string.
-See [Supported Format Elements For DATE][date-format-elements]
-for a list of format elements that this function supports.
+**Definitions**
+
++ `format_string`: A `STRING` value that contains the
+ [format elements][date-format-elements] to use with `date_expr`.
++ `date_expr`: A `DATE` value that represents the date to format.
**Return Data Type**
-STRING
+`STRING`
**Examples**
@@ -765,7 +846,7 @@ SELECT FORMAT_DATE('%b %Y', DATE '2008-12-25') AS formatted;
[date-format-elements]: https://github.com/google/zetasql/blob/master/docs/format-elements.md#format_elements_date_time
-### `LAST_DAY`
+## `LAST_DAY`
```sql
LAST_DAY(date_expression[, date_part])
@@ -861,7 +942,7 @@ SELECT LAST_DAY(DATE '2008-11-10', WEEK(MONDAY)) AS last_day
[ISO-8601-week]: https://en.wikipedia.org/wiki/ISO_week_date
-### `PARSE_DATE`
+## `PARSE_DATE`
```sql
PARSE_DATE(format_string, date_string)
@@ -869,14 +950,19 @@ PARSE_DATE(format_string, date_string)
**Description**
-Converts a [string representation of date][date-format] to a
-`DATE` object.
+Converts a `STRING` value to a `DATE` value.
+
+**Definitions**
+
++ `format_string`: A `STRING` value that contains the
+ [format elements][date-format-elements] to use with `date_string`.
++ `date_string`: A `STRING` value that represents the date to parse.
+
+**Details**
-`format_string` contains the [format elements][date-format-elements]
-that define how `date_string` is formatted. Each element in
-`date_string` must have a corresponding element in `format_string`. The
-location of each element in `format_string` must match the location of
-each element in `date_string`.
+Each element in `date_string` must have a corresponding element in
+`format_string`. The location of each element in `format_string` must match the
+location of each element in `date_string`.
```sql
-- This works because elements on both sides match.
@@ -894,20 +980,20 @@ SELECT PARSE_DATE('%F', '2000-12-30');
When using `PARSE_DATE`, keep the following in mind:
-+ **Unspecified fields.** Any unspecified field is initialized from `1970-01-01`.
-+ **Case insensitivity.** Names, such as `Monday`, `February`, and so on, are
++ Unspecified fields. Any unspecified field is initialized from `1970-01-01`.
++ Case insensitivity. Names, such as `Monday`, `February`, and so on, are
case insensitive.
-+ **Whitespace.** One or more consecutive white spaces in the format string
++ Whitespace. One or more consecutive white spaces in the format string
matches zero or more consecutive white spaces in the date string. In
addition, leading and trailing white spaces in the date string are always
allowed -- even if they are not in the format string.
-+ **Format precedence.** When two (or more) format elements have overlapping
++ Format precedence. When two (or more) format elements have overlapping
information (for example both `%F` and `%Y` affect the year), the last one
generally overrides any earlier ones.
**Return Data Type**
-DATE
+`DATE`
**Examples**
@@ -939,7 +1025,7 @@ SELECT PARSE_DATE('%Y%m%d', '20081225') AS parsed;
[date-format-elements]: https://github.com/google/zetasql/blob/master/docs/format-elements.md#format_elements_date_time
-### `UNIX_DATE`
+## `UNIX_DATE`
```sql
UNIX_DATE(date_expression)
diff --git a/docs/datetime_functions.md b/docs/datetime_functions.md
index de534e2b7..a8f996a74 100644
--- a/docs/datetime_functions.md
+++ b/docs/datetime_functions.md
@@ -6,7 +6,7 @@
ZetaSQL supports the following datetime functions.
-### Function list
+## Function list
@@ -18,8 +18,7 @@ ZetaSQL supports the following datetime functions.
- CURRENT_DATETIME
-
+ | CURRENT_DATETIME
|
Returns the current date and time as a DATETIME value.
@@ -27,8 +26,7 @@ ZetaSQL supports the following datetime functions.
|
- DATETIME
-
+ | DATETIME
|
Constructs a DATETIME value.
@@ -36,8 +34,7 @@ ZetaSQL supports the following datetime functions.
|
- DATETIME_ADD
-
+ | DATETIME_ADD
|
Adds a specified time interval to a DATETIME value.
@@ -45,8 +42,7 @@ ZetaSQL supports the following datetime functions.
|
- DATETIME_DIFF
-
+ | DATETIME_DIFF
|
Gets the number of unit boundaries between two DATETIME values
@@ -55,8 +51,7 @@ ZetaSQL supports the following datetime functions.
|
- DATETIME_SUB
-
+ | DATETIME_SUB
|
Subtracts a specified time interval from a DATETIME value.
@@ -64,17 +59,19 @@ ZetaSQL supports the following datetime functions.
|
- DATETIME_TRUNC
-
+ | DATETIME_TRUNC
|
- Truncates a DATETIME value.
+
+ Truncates a DATETIME or
+ TIMESTAMP value at a particular
+ granularity.
+
|
- EXTRACT
-
+ | EXTRACT
|
Extracts part of a date and time from a DATETIME value.
@@ -82,8 +79,7 @@ ZetaSQL supports the following datetime functions.
|
- FORMAT_DATETIME
-
+ | FORMAT_DATETIME
|
Formats a DATETIME value according to a specified
@@ -92,8 +88,7 @@ ZetaSQL supports the following datetime functions.
|
- LAST_DAY
-
+ | LAST_DAY
|
Gets the last day in a specified time period that contains a
@@ -102,18 +97,18 @@ ZetaSQL supports the following datetime functions.
|
- PARSE_DATETIME
-
+ | PARSE_DATETIME
|
Converts a STRING value to a DATETIME value.
+
|
-### `CURRENT_DATETIME`
+## `CURRENT_DATETIME`
```sql
CURRENT_DATETIME([time_zone])
@@ -156,7 +151,7 @@ SELECT CURRENT_DATETIME() as now;
[datetime-timezone-definitions]: https://github.com/google/zetasql/blob/master/docs/timestamp_functions.md#timezone_definitions
-### `DATETIME`
+## `DATETIME`
```sql
1. DATETIME(year, month, day, hour, minute, second)
@@ -196,7 +191,7 @@ SELECT
[datetime-timezone-definitions]: https://github.com/google/zetasql/blob/master/docs/timestamp_functions.md#timezone_definitions
-### `DATETIME_ADD`
+## `DATETIME_ADD`
```sql
DATETIME_ADD(datetime_expression, INTERVAL int64_expression part)
@@ -244,7 +239,7 @@ SELECT
*-----------------------------+------------------------*/
```
-### `DATETIME_DIFF`
+## `DATETIME_DIFF`
```sql
DATETIME_DIFF(end_datetime, start_datetime, granularity)
@@ -383,7 +378,7 @@ SELECT
[ISO-8601-week]: https://en.wikipedia.org/wiki/ISO_week_date
-### `DATETIME_SUB`
+## `DATETIME_SUB`
```sql
DATETIME_SUB(datetime_expression, INTERVAL int64_expression part)
@@ -431,64 +426,76 @@ SELECT
*-----------------------------+------------------------*/
```
-### `DATETIME_TRUNC`
+## `DATETIME_TRUNC`
+
+```sql
+DATETIME_TRUNC(datetime_value, datetime_granularity)
+```
```sql
-DATETIME_TRUNC(datetime_expression, granularity)
+DATETIME_TRUNC(timestamp_value, timestamp_granularity[, time_zone])
```
**Description**
-Truncates a `DATETIME` value at a particular time granularity. The `DATETIME`
-value is always rounded to the beginning of `granularity`.
+Truncates a `DATETIME` or `TIMESTAMP` value at a particular granularity.
**Definitions**
-+ `datetime_expression`: The `DATETIME` value to truncate.
-+ `granularity`: The datetime part that represents the granularity. If
- you passed in a `DATETIME` value for the first argument, `granularity` can
- be:
-
- + `NANOSECOND`: If used, nothing is truncated from the value.
-
- + `MICROSECOND`: The nearest lesser than or equal microsecond.
-
- + `MILLISECOND`: The nearest lesser than or equal millisecond.
-
- + `SECOND`: The nearest lesser than or equal second.
-
- + `MINUTE`: The nearest lesser than or equal minute.
-
- + `HOUR`: The nearest lesser than or equal hour.
++ `datetime_value`: A `DATETIME` value to truncate.
++ `datetime_granularity`: The truncation granularity for a `DATETIME` value.
+ [Date granularities][datetime-trunc-granularity-date] and
+ [time granularities][datetime-trunc-granularity-time] can be used.
++ `timestamp_value`: A `TIMESTAMP` value to truncate.
++ `timestamp_granularity`: The truncation granularity for a `TIMESTAMP` value.
+ [Date granularities][datetime-trunc-granularity-date] and
+ [time granularities][datetime-trunc-granularity-time] can be used.
++ `time_zone`: A time zone to use with the `TIMESTAMP` value.
+ [Time zone parts][datetime-time-zone-parts] can be used.
+ Use this argument if you want to use a time zone other than
+ the default time zone, which is implementation defined, as part of the
+ truncate operation.
+
+ Note: When truncating a timestamp to `MINUTE`
+ or `HOUR` parts, this function determines the civil time of the
+ timestamp in the specified (or default) time zone
+ and subtracts the minutes and seconds (when truncating to `HOUR`) or the
+ seconds (when truncating to `MINUTE`) from that timestamp.
+ While this provides intuitive results in most cases, the result is
+ non-intuitive near daylight savings transitions that are not hour-aligned.
+
+
+
+**Date granularity definitions**
+ `DAY`: The day in the Gregorian calendar year that contains the
- `DATETIME` value.
+ value to truncate.
+ `WEEK`: The first day in the week that contains the
- `DATETIME` value. Weeks begin on Sundays. `WEEK` is equivalent to
+ value to truncate. Weeks begin on Sundays. `WEEK` is equivalent to
`WEEK(SUNDAY)`.
+ `WEEK(WEEKDAY)`: The first day in the week that contains the
- `DATETIME` value. Weeks begin on `WEEKDAY`. `WEEKDAY` must be one of the
+ value to truncate. Weeks begin on `WEEKDAY`. `WEEKDAY` must be one of the
following: `SUNDAY`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`,
or `SATURDAY`.
+ `ISOWEEK`: The first day in the [ISO 8601 week][ISO-8601-week] that contains
- the `DATETIME` value. The ISO week begins on
+ the value to truncate. The ISO week begins on
Monday. The first ISO week of each ISO year contains the first Thursday of the
corresponding Gregorian calendar year.
+ `MONTH`: The first day in the month that contains the
- `DATETIME` value.
+ value to truncate.
+ `QUARTER`: The first day in the quarter that contains the
- `DATETIME` value.
+ value to truncate.
+ `YEAR`: The first day in the year that contains the
- `DATETIME` value.
+ value to truncate.
+ `ISOYEAR`: The first day in the [ISO 8601][ISO-8601] week-numbering year
- that contains the `DATETIME` value. The ISO year is the
+ that contains the value to truncate. The ISO year is the
Monday of the first week where Thursday belongs to the corresponding
Gregorian calendar year.
@@ -500,9 +507,44 @@ value is always rounded to the beginning of `granularity`.
+
+
+**Time granularity definitions**
+
+ + `NANOSECOND`: If used, nothing is truncated from the value.
+
+ + `MICROSECOND`: The nearest lesser than or equal microsecond.
+
+ + `MILLISECOND`: The nearest lesser than or equal millisecond.
+
+ + `SECOND`: The nearest lesser than or equal second.
+
+ + `MINUTE`: The nearest lesser than or equal minute.
+
+ + `HOUR`: The nearest lesser than or equal hour.
+
+
+
+**Time zone part definitions**
+
++ `MINUTE`
++ `HOUR`
++ `DAY`
++ `WEEK`
++ `WEEK()`
++ `ISOWEEK`
++ `MONTH`
++ `QUARTER`
++ `YEAR`
++ `ISOYEAR`
+
+**Details**
+
+The resulting value is always rounded to the beginning of `granularity`.
+
**Return Data Type**
-`DATETIME`
+The same data type as the first argument passed into this function.
**Examples**
@@ -555,7 +597,13 @@ SELECT
*---------------------+----------------*/
```
-### `EXTRACT`
+[datetime-trunc-granularity-date]: #datetime_trunc_granularity_date
+
+[datetime-trunc-granularity-time]: #datetime_trunc_granularity_time
+
+[datetime-time-zone-parts]: #datetime_time_zone_parts
+
+## `EXTRACT`
```sql
EXTRACT(part FROM datetime_expression)
@@ -680,17 +728,23 @@ FROM table;
[ISO-8601-week]: https://en.wikipedia.org/wiki/ISO_week_date
-### `FORMAT_DATETIME`
+## `FORMAT_DATETIME`
```sql
-FORMAT_DATETIME(format_string, datetime_expression)
+FORMAT_DATETIME(format_string, datetime_expr)
```
**Description**
-Formats a `DATETIME` object according to the specified `format_string`. See
-[Supported Format Elements For DATETIME][datetime-format-elements]
-for a list of format elements that this function supports.
+Formats a `DATETIME` value according to a specified format string.
+
+**Definitions**
+
++ `format_string`: A `STRING` value that contains the
+ [format elements][datetime-format-elements] to use with
+ `datetime_expr`.
++ `datetime_expr`: A `DATETIME` value that represents the date and time to
+ format.
**Return Data Type**
@@ -736,7 +790,7 @@ SELECT
[datetime-format-elements]: https://github.com/google/zetasql/blob/master/docs/format-elements.md#format_elements_date_time
-### `LAST_DAY`
+## `LAST_DAY`
```sql
LAST_DAY(datetime_expression[, date_part])
@@ -832,7 +886,7 @@ SELECT LAST_DAY(DATETIME '2008-11-10 15:30:00', WEEK(MONDAY)) AS last_day
[ISO-8601-week]: https://en.wikipedia.org/wiki/ISO_week_date
-### `PARSE_DATETIME`
+## `PARSE_DATETIME`
```sql
PARSE_DATETIME(format_string, datetime_string)
@@ -840,14 +894,20 @@ PARSE_DATETIME(format_string, datetime_string)
**Description**
-Converts a [string representation of a datetime][datetime-format] to a
-`DATETIME` object.
+Converts a `STRING` value to a `DATETIME` value.
+
+**Definitions**
+
++ `format_string`: A `STRING` value that contains the
+ [format elements][datetime-format-elements] to use with `datetime_string`.
++ `datetime_string`: A `STRING` value that represents the date and time to
+ parse.
+
+**Details**
-`format_string` contains the [format elements][datetime-format-elements]
-that define how `datetime_string` is formatted. Each element in
-`datetime_string` must have a corresponding element in `format_string`. The
-location of each element in `format_string` must match the location of
-each element in `datetime_string`.
+Each element in `datetime_string` must have a corresponding element in
+`format_string`. The location of each element in `format_string` must match the
+location of each element in `datetime_string`.
```sql
-- This works because elements on both sides match.
@@ -865,23 +925,23 @@ SELECT PARSE_DATETIME("%c", "Thu Dec 25 07:30:00 2008");
`PARSE_DATETIME` parses `string` according to the following rules:
-+ **Unspecified fields.** Any unspecified field is initialized from
++ Unspecified fields. Any unspecified field is initialized from
`1970-01-01 00:00:00.0`. For example, if the year is unspecified then it
defaults to `1970`.
-+ **Case insensitivity.** Names, such as `Monday` and `February`,
++ Case insensitivity. Names, such as `Monday` and `February`,
are case insensitive.
-+ **Whitespace.** One or more consecutive white spaces in the format string
++ Whitespace. One or more consecutive white spaces in the format string
matches zero or more consecutive white spaces in the
`DATETIME` string. Leading and trailing
white spaces in the `DATETIME` string are always
allowed, even if they are not in the format string.
-+ **Format precedence.** When two or more format elements have overlapping
++ Format precedence. When two or more format elements have overlapping
information, the last one generally overrides any earlier ones, with some
exceptions. For example, both `%F` and `%Y` affect the year, so the earlier
element overrides the later. See the descriptions
of `%s`, `%C`, and `%y` in
[Supported Format Elements For DATETIME][datetime-format-elements].
-+ **Format divergence.** `%p` can be used with `am`, `AM`, `pm`, and `PM`.
++ Format divergence. `%p` can be used with `am`, `AM`, `pm`, and `PM`.
**Return Data Type**
diff --git a/docs/debugging_functions.md b/docs/debugging_functions.md
index c171e00e5..1164035ef 100644
--- a/docs/debugging_functions.md
+++ b/docs/debugging_functions.md
@@ -6,7 +6,7 @@
ZetaSQL supports the following debugging functions.
-### Function list
+## Function list
@@ -18,8 +18,7 @@ ZetaSQL supports the following debugging functions.
- ERROR
-
+ | ERROR
|
Produces an error with a custom error message.
@@ -27,8 +26,7 @@ ZetaSQL supports the following debugging functions.
|
- IFERROR
-
+ | IFERROR
|
Evaluates a try expression, and if an evaluation error is produced, returns
@@ -37,8 +35,7 @@ ZetaSQL supports the following debugging functions.
|
- ISERROR
-
+ | ISERROR
|
Evaluates a try expression, and if an evaluation error is produced, returns
@@ -47,8 +44,7 @@ ZetaSQL supports the following debugging functions.
|
- NULLIFERROR
-
+ | NULLIFERROR
|
Evaluates a try expression, and if an evaluation error is produced, returns
@@ -59,7 +55,7 @@ ZetaSQL supports the following debugging functions.
|
-### `ERROR`
+## `ERROR`
```sql
ERROR(error_message)
@@ -134,7 +130,7 @@ WHERE IF(x > 0, true, ERROR(FORMAT('Error: x must be positive but is %t', x)));
-- Error: x must be positive but is -1
```
-### `IFERROR`
+## `IFERROR`
```sql
IFERROR(try_expression, catch_expression)
@@ -278,7 +274,7 @@ SELECT IFERROR(ERROR('a'), ERROR('b')) AS result
[supertype]: https://github.com/google/zetasql/blob/master/docs/conversion_rules.md#supertypes
-### `ISERROR`
+## `ISERROR`
```sql
ISERROR(try_expression)
@@ -383,7 +379,7 @@ SELECT ISERROR((SELECT e FROM UNNEST([1, 2]) AS e)) AS is_error
*----------*/
```
-### `NULLIFERROR`
+## `NULLIFERROR`
```sql
NULLIFERROR(try_expression)
diff --git a/docs/format-elements.md b/docs/format-elements.md
index bc6484b7e..dfdd32926 100644
--- a/docs/format-elements.md
+++ b/docs/format-elements.md
@@ -620,7 +620,7 @@ format_model:
The format clause can be used in some [`CAST` functions][cast-functions]. You
use a format clause to provide instructions for how to conduct a
cast. For example, you could
-instruct a cast to convert a sequence of bytes to a BASE64-encoded string
+instruct a cast to convert a sequence of bytes to a base64-encoded string
instead of a UTF-8-encoded string.
The format clause includes a format model. The format model can contain
@@ -675,7 +675,7 @@ result is `NULL`. Format elements are case-insensitive.
BASE64M |
Converts a sequence of bytes into a
- BASE64-encoded string based on
+ base64-encoded string based on
rfc 2045
for MIME. Generates a newline character ("\n") every 76 characters.
|
@@ -779,7 +779,7 @@ if the `BASE64` or `BASE64M` format element is used.
bytes. X represents one of these numbers: 2, 8, 16, 32, 64. An error
is returned if the input contains characters that are not part of the
BASEX encoding alphabet, except whitespace characters if the
- format element is BASE64.
+ format element is BASE64
.
Input as BASE8: '00410473'
@@ -789,12 +789,12 @@ if the `BASE64` or `BASE64M` format element is used.
|
BASE64M |
- Converts a BASE64-encoded string to
+ Converts a base64-encoded string to
bytes. If the input contains characters that are not whitespace and not
- part of the BASE64 encoding alphabet defined at
+ part of the base64 encoding alphabet defined at
rfc 2045,
- an error is returned. BASE64M and BASE64 decoding have the same
- behavior.
+ an error is returned. BASE64M and BASE64
+ decoding have the same behavior.
|
Input: '3q2+7w=='
@@ -843,6 +843,8 @@ if the `BASE64` or `BASE64M` format element is used.
```sql
SELECT CAST('Hello' AS BYTES FORMAT 'ASCII') AS string_to_bytes
+-- Displays the bytes output value (b'\x48\x65\x6c\x6c\x6f').
+
/*-------------------------*
| string_to_bytes |
+-------------------------+
diff --git a/docs/functions-and-operators.md b/docs/functions-and-operators.md
index 306412075..8050445c2 100644
--- a/docs/functions-and-operators.md
+++ b/docs/functions-and-operators.md
@@ -72,7 +72,6 @@ statement.
| ARRAY |
Array position. Must be used with OFFSET or ORDINAL —see
Array Functions
-
. |
Binary |
@@ -329,30 +328,34 @@ statement.
+For example, the logical expression:
+
+`x OR y AND z`
+
+is interpreted as:
+
+`( x OR ( y AND z ) )`
+
Operators with the same precedence are left associative. This means that those
operators are grouped together starting from the left and moving right. For
example, the expression:
`x AND y AND z`
-is interpreted as
+is interpreted as:
`( ( x AND y ) AND z )`
The expression:
-```
-x * y / z
-```
+`x * y / z`
is interpreted as:
-```
-( ( x * y ) / z )
-```
+`( ( x * y ) / z )`
All comparison operators have the same priority, but comparison operators are
-not associative. Therefore, parentheses are required in order to resolve
+not associative. Therefore, parentheses are required to resolve
ambiguity. For example:
`(x < y) IS FALSE`
@@ -370,70 +373,60 @@ ambiguity. For example:
Field access operator
-
|
Gets the value of a field. |
Array subscript operator
-
|
Gets a value from an array at a specific position. |
Struct subscript operator
-
|
Gets the value of a field at a selected position in a struct. |
JSON subscript operator
-
|
Gets a value of an array element or field in a JSON expression. |
Protocol buffer map subscript operator
-
|
Gets the value in a protocol buffer map for a given key. |
Array elements field access operator
-
|
Traverses through the levels of a nested data type inside an array. |
Arithmetic operators
-
|
Performs arithmetic operations. |
Date arithmetics operators
-
|
Performs arithmetic operations on dates. |
Datetime subtraction
-
|
Computes the difference between two datetimes as an interval. |
Interval arithmetic operators
-
|
Adds an interval to a datetime or subtracts an interval from a datetime.
@@ -442,14 +435,12 @@ ambiguity. For example:
|
Bitwise operators
-
|
Performs bit manipulation. |
Logical operators
-
|
Tests for the truth of some condition and produces TRUE ,
@@ -458,8 +449,60 @@ ambiguity. For example:
|
- Comparison operators
+ | Graph concatenation operator
+ |
+
+ Combines multiple graph paths into one and preserves the original order of
+ the nodes and edges.
+ |
+
+
+
+ Graph logical operators
+ |
+
+ Tests for the truth of a condition in a graph and produces either
+ TRUE or FALSE .
+ |
+
+
+
+ Graph predicates
+ |
+
+ Tests for the truth of a condition for a graph element and produces
+ TRUE , FALSE , or NULL .
+ |
+
+
+
+ IS DESTINATION predicate
+ |
+ In a graph, checks to see if a node is or isn't the destination of an edge. |
+
+
+
+ IS SOURCE predicate
+ |
+ In a graph, checks to see if a node is or isn't the source of an edge. |
+
+
+
+ PROPERTY_EXISTS predicate
+ |
+ In a graph, checks to see if a property exists for an element. |
+
+
+
+ SAME predicate
+ |
+
+ In a graph, determines if all graph elements in a list bind to the same node or edge.
+ |
+
+
+ Comparison operators
|
Compares operands and produces the results of the comparison as a
@@ -469,21 +512,18 @@ ambiguity. For example:
|
EXISTS operator
-
|
Checks if a subquery produces one or more rows. |
IN operator
-
|
Checks for an equal value in a set of values. |
IS operators
-
|
Checks for the truth of a condition and produces either TRUE or
@@ -493,42 +533,36 @@ ambiguity. For example:
|
IS DISTINCT FROM operator
-
|
Checks if values are considered to be distinct from each other. |
LIKE operator
-
|
Checks if values are like or not like one another. |
Quantified LIKE operator
-
|
Checks a search value for matches against several patterns. |
NEW operator
-
|
Creates a protocol buffer. |
Concatenation operator
-
|
Combines multiple values into one. |
WITH expression
-
|
Creates variables for re-use and produces a result expression. |
@@ -553,6 +587,7 @@ Input values:
+ `STRUCT`
+ `PROTO`
+ `JSON`
++ `GRAPH_ELEMENT`
Note: If the field to access is within a `STRUCT`, you can use the
[struct subscript operator][struct-subscript-operator] to access the field by
@@ -567,6 +602,8 @@ a field by position is useful when fields are un-named or have ambiguous names.
the protocol buffer, an error is thrown.
+ For `JSON`: `JSON`. If a field is not found in a JSON value, a SQL `NULL` is
returned.
++ For `GRAPH_ELEMENT`: SQL data type of `fieldname`. If a field (property) is
+ not found in the graph element, an error is produced.
**Example**
@@ -1747,6 +1784,355 @@ SELECT entry FROM entry_table WHERE entry IS NULL
[three-valued-logic]: https://en.wikipedia.org/wiki/Three-valued_logic
+### Graph concatenation operator
+
+
+```sql
+graph_path || graph_path [ || ... ]
+```
+
+**Description**
+
+Combines multiple graph paths into one and preserves the original order of the
+nodes and edges.
+
+Arguments:
+
++ `graph_path`: A `GRAPH_PATH` value that represents a graph path to
+ concatenate.
+
+**Details**
+
+This operator produces an error if the last node in the first path isn't the
+same as the first node in the second path.
+
+```sql
+-- This successfully produces the concatenated path called `full_path`.
+MATCH
+ p=(src:Account)-[t1:Transfers]->(mid:Account),
+ q=(mid)-[t2:Transfers]->(dst:Account)
+LET full_path = p || q
+```
+
+```sql
+-- This produces an error because the first node of the path to be concatenated
+-- (mid2) is not equal to the last node of the previous path (mid1).
+MATCH
+ p=(src:Account)-[t1:Transfers]->(mid1:Account),
+ q=(mid2:Account)-[t2:Transfers]->(dst:Account)
+LET full_path = p || q
+```
+
+The first node in each subsequent path is removed from the
+concatenated path.
+
+```sql
+-- The concatenated path called `full_path` contains these elements:
+-- src, t1, mid, t2, dst.
+MATCH
+ p=(src:Account)-[t1:Transfers]->(mid:Account),
+ q=(mid)-[t2:Transfers]->(dst:Account)
+LET full_path = p || q
+```
+
+If any `graph_path` is `NULL`, produces `NULL`.
+
+**Example**
+
+In the following query, a path called `p` and `q` are concatenated. Notice that
+`mid` is used at the end of the first path and at the beginning of the
+second path. Also notice that the duplicate `mid` is removed from the
+concatenated path called `full_path`:
+
+```sql
+GRAPH FinGraph
+MATCH
+ p=(src:Account)-[t1:Transfers]->(mid:Account),
+ q = (mid)-[t2:Transfers]->(dst:Account)
+LET full_path = p || q
+RETURN
+ JSON_QUERY(TO_JSON(full_path)[0], '$.labels') AS element_a,
+ JSON_QUERY(TO_JSON(full_path)[1], '$.labels') AS element_b,
+ JSON_QUERY(TO_JSON(full_path)[2], '$.labels') AS element_c,
+ JSON_QUERY(TO_JSON(full_path)[3], '$.labels') AS element_d,
+ JSON_QUERY(TO_JSON(full_path)[4], '$.labels') AS element_e,
+ JSON_QUERY(TO_JSON(full_path)[5], '$.labels') AS element_f
+
+/*-------------------------------------------------------------------------------------*
+ | element_a | element_b | element_c | element_d | element_e | element_f |
+ +-------------------------------------------------------------------------------------+
+ | ["Account"] | ["Transfers"] | ["Account"] | ["Transfers"] | ["Account"] | |
+ | ... | ... | ... | ... | ... | ... |
+ *-------------------------------------------------------------------------------------/*
+```
+
+The following query produces an error because the last node for `p` must
+be the first node for `q`:
+
+```sql
+-- Error: `mid1` and `mid2` are not equal.
+GRAPH FinGraph
+MATCH
+ p=(src:Account)-[t1:Transfers]->(mid1:Account),
+ q=(mid2:Account)-[t2:Transfers]->(dst:Account)
+LET full_path = p || q
+RETURN TO_JSON(full_path) AS results
+```
+
+The following query produces an error because the path called `p` is `NULL`:
+
+```sql
+-- Error: a graph path is NULL.
+GRAPH FinGraph
+MATCH
+ p=NULL,
+ q=(mid:Account)-[t2:Transfers]->(dst:Account)
+LET full_path = p || q
+RETURN TO_JSON(full_path) AS results
+```
+
+### Graph logical operators
+
+
+ZetaSQL supports the following logical operators in
+[element pattern label expressions][element-pattern-definition]:
+
+
+
+
+ Name |
+ Syntax |
+ Description |
+
+
+
+
+ NOT |
+ !X |
+
+ Returns TRUE if X is not included, otherwise,
+ returns FALSE .
+ |
+
+
+ OR |
+ X | Y |
+
+ Returns TRUE if either X or Y is
+ included, otherwise, returns FALSE .
+ |
+
+
+ AND |
+ X & Y |
+
+ Returns TRUE if both X and Y are
+ included, otherwise, returns FALSE .
+ |
+
+
+
+
+[element-pattern-definition]: https://github.com/google/zetasql/blob/master/docs/graph-patterns.md#element_pattern_definition
+
+### Graph predicates
+
+
+ZetaSQL supports the following graph-specific predicates in
+graph expressions. A predicate can produce `TRUE`, `FALSE`, or `NULL`.
+
++ [`PROPERTY_EXISTS` predicate][property-exists-predicate]
++ [`IS SOURCE` predicate][is-source-predicate]
++ [`IS DESTINATION` predicate][is-destination-predicate]
++ [`SAME` predicate][same-predicate]
+
+[property-exists-predicate]: #property_exists_predicate
+
+[is-source-predicate]: #is_source_predicate
+
+[is-destination-predicate]: #is_destination_predicate
+
+[same-predicate]: #same_predicate
+
+### `IS DESTINATION` predicate
+
+
+```sql
+node IS [ NOT ] DESTINATION [ OF ] edge
+```
+
+**Description**
+
+In a graph, checks to see if a node is or isn't the destination of an edge.
+Can produce `TRUE`, `FALSE`, or `NULL`.
+
+Arguments:
+
++ `node`: The graph pattern variable for the node element.
++ `edge`: The graph pattern variable for the edge element.
+
+**Examples**
+
+```sql
+GRAPH FinGraph
+MATCH (a:Account)-[transfer:Transfers]-(b:Account)
+WHERE a IS DESTINATION of transfer
+RETURN a.id AS a_id, b.id AS b_id
+
+/*-------------+
+ | a_id | b_id |
+ +-------------+
+ | 16 | 7 |
+ | 16 | 7 |
+ | 20 | 16 |
+ | 7 | 20 |
+ | 16 | 20 |
+ +-------------*/
+```
+
+```sql
+GRAPH FinGraph
+MATCH (a:Account)-[transfer:Transfers]-(b:Account)
+WHERE b IS DESTINATION of transfer
+RETURN a.id AS a_id, b.id AS b_id
+
+/*-------------+
+ | a_id | b_id |
+ +-------------+
+ | 7 | 16 |
+ | 7 | 16 |
+ | 16 | 20 |
+ | 20 | 7 |
+ | 20 | 16 |
+ +-------------*/
+```
+
+### `IS SOURCE` predicate
+
+
+```sql
+node IS [ NOT ] SOURCE [ OF ] edge
+```
+
+**Description**
+
+In a graph, checks to see if a node is or isn't the source of an edge.
+Can produce `TRUE`, `FALSE`, or `NULL`.
+
+Arguments:
+
++ `node`: The graph pattern variable for the node element.
++ `edge`: The graph pattern variable for the edge element.
+
+**Examples**
+
+```sql
+GRAPH FinGraph
+MATCH (a:Account)-[transfer:Transfers]-(b:Account)
+WHERE a IS SOURCE of transfer
+RETURN a.id AS a_id, b.id AS b_id
+
+/*-------------+
+ | a_id | b_id |
+ +-------------+
+ | 20 | 7 |
+ | 7 | 16 |
+ | 7 | 16 |
+ | 20 | 16 |
+ | 16 | 20 |
+ +-------------*/
+```
+
+```sql
+GRAPH FinGraph
+MATCH (a:Account)-[transfer:Transfers]-(b:Account)
+WHERE b IS SOURCE of transfer
+RETURN a.id AS a_id, b.id AS b_id
+
+/*-------------+
+ | a_id | b_id |
+ +-------------+
+ | 7 | 20 |
+ | 16 | 7 |
+ | 16 | 7 |
+ | 16 | 20 |
+ | 20 | 16 |
+ +-------------*/
+```
+
+### `PROPERTY_EXISTS` predicate
+
+
+```sql
+PROPERTY_EXISTS(element, element_property)
+```
+
+**Description**
+
+In a graph, checks to see if a property exists for an element.
+Can produce `TRUE`, `FALSE`, or `NULL`.
+
+Arguments:
+
++ `element`: The graph pattern variable for a node or edge element.
++ `element_property`: The name of the property to look for in `element`.
+ The property name must refer to a property in the graph. If the property
+ does not exist in the graph, an error is produced. The property name is
+ resolved in a case-insensitive manner.
+
+**Example**
+
+```sql
+GRAPH FinGraph
+MATCH (n:Person|Account WHERE PROPERTY_EXISTS(n, name))
+RETURN n.name
+
+/*------+
+ | name |
+ +------+
+ | Alex |
+ | Dana |
+ | Lee |
+ +------*/
+```
+
+### `SAME` predicate
+
+
+```sql
+SAME (element, element[, element])
+```
+
+**Description**
+
+In a graph, determines if all graph elements in a list bind to the same node or
+edge. Can produce `TRUE`, `FALSE`, or `NULL`.
+
+Arguments:
+
++ `element`: The graph pattern variable for a node or edge element.
+
+**Example**
+
+The following query checks to see if `a` and `b` are not the same person.
+
+```sql
+GRAPH FinGraph
+MATCH (src:Account)<-[transfer:Transfers]-(dest:Account)
+WHERE NOT SAME(src, dest)
+RETURN src.id AS source_id, dest.id AS destination_id
+
+/*----------------------------+
+ | source_id | destination_id |
+ +----------------------------+
+ | 7 | 20 |
+ | 16 | 7 |
+ | 16 | 7 |
+ | 16 | 20 |
+ | 20 | 16 |
+ +----------------------------*/
+```
+
### Comparison operators
@@ -3033,7 +3419,7 @@ NEW Universe {
radius_miles: 432,690
age: 4,603,000,000
}
- constellations [{
+ constellations: [{
name: "Libra"
index: 0
}, {
@@ -3093,6 +3479,12 @@ The concatenation operator combines multiple values into one.
+Note: The concatenation operator is translated into a nested
+[`CONCAT`][concat] function call. For example, `'A' || 'B' || 'C'` becomes
+`CONCAT('A', CONCAT('B', 'C'))`.
+
+[concat]: #concat
+
### `WITH` expression
@@ -3218,7 +3610,6 @@ tuning.
CASE expr
-
|
Compares the given expression to each successive WHEN clause
@@ -3228,7 +3619,6 @@ tuning.
|
CASE
-
|
Evaluates the condition of each successive WHEN clause and
@@ -3239,7 +3629,6 @@ tuning.
|
COALESCE
-
|
Produces the value of the first non-NULL expression, if any,
@@ -3249,7 +3638,6 @@ tuning.
|
IF
-
|
If an expression evaluates to TRUE , produces a specified
@@ -3259,7 +3647,6 @@ tuning.
|
IFNULL
-
|
If an expression evaluates to NULL , produces a specified
@@ -3269,7 +3656,6 @@ tuning.
|
NULLIF
-
|
Produces NULL if the first expression that matches another
@@ -3279,7 +3665,6 @@ tuning.
|
NULLIFZERO
-
|
Produces NULL if an expression is 0 ,
@@ -3289,7 +3674,6 @@ tuning.
|
ZEROIFNULL
-
|
Produces 0 if an expression is NULL , otherwise
@@ -3705,7 +4089,6 @@ functions work, see [AEAD encryption concepts][aead-encryption-concepts].
|
AEAD.DECRYPT_BYTES
-
|
Uses the matching key from a keyset to decrypt a
@@ -3715,7 +4098,6 @@ functions work, see [AEAD encryption concepts][aead-encryption-concepts].
|
AEAD.DECRYPT_STRING
-
|
Uses the matching key from a keyset to decrypt a BYTES
@@ -3725,7 +4107,6 @@ functions work, see [AEAD encryption concepts][aead-encryption-concepts].
|
AEAD.ENCRYPT
-
|
Encrypts STRING plaintext, using the primary cryptographic key
@@ -3735,7 +4116,6 @@ functions work, see [AEAD encryption concepts][aead-encryption-concepts].
|
DETERMINISTIC_DECRYPT_BYTES
-
|
Uses the matching key from a keyset to decrypt a BYTES
@@ -3745,7 +4125,6 @@ functions work, see [AEAD encryption concepts][aead-encryption-concepts].
|
DETERMINISTIC_DECRYPT_STRING
-
|
Uses the matching key from a keyset to decrypt a BYTES
@@ -3755,7 +4134,6 @@ functions work, see [AEAD encryption concepts][aead-encryption-concepts].
|
DETERMINISTIC_ENCRYPT
-
|
Encrypts STRING plaintext, using the primary cryptographic key
@@ -3765,7 +4143,6 @@ functions work, see [AEAD encryption concepts][aead-encryption-concepts].
|
KEYS.ADD_KEY_FROM_RAW_BYTES
-
|
Adds a key to a keyset, and return the new keyset as a serialized
@@ -3775,7 +4152,6 @@ functions work, see [AEAD encryption concepts][aead-encryption-concepts].
|
KEYS.KEYSET_FROM_JSON
-
|
Converts a STRING JSON keyset to a serialized
@@ -3785,7 +4161,6 @@ functions work, see [AEAD encryption concepts][aead-encryption-concepts].
|
KEYS.KEYSET_LENGTH
-
|
Gets the number of keys in the provided keyset.
@@ -3794,7 +4169,6 @@ functions work, see [AEAD encryption concepts][aead-encryption-concepts].
|
KEYS.KEYSET_TO_JSON
-
|
Gets a JSON STRING representation of a keyset.
@@ -3803,7 +4177,6 @@ functions work, see [AEAD encryption concepts][aead-encryption-concepts].
|
KEYS.NEW_KEYSET
-
|
Gets a serialized keyset containing a new key based on the key type.
@@ -3812,7 +4185,6 @@ functions work, see [AEAD encryption concepts][aead-encryption-concepts].
|
KEYS.ROTATE_KEYSET
-
|
Adds a new primary cryptographic key to a keyset, based on the key type.
@@ -4557,7 +4929,6 @@ To learn about the syntax for aggregate function calls, see
|
ANY_VALUE
-
|
Gets an expression for some row.
@@ -4565,62 +4936,125 @@ To learn about the syntax for aggregate function calls, see
|
- ARRAY_AGG
+ | APPROX_COUNT_DISTINCT
+ |
+
+ Gets the approximate result for COUNT(DISTINCT expression) .
+ For more information, see Approximate aggregate functions.
+
+ |
+
+
+
+ APPROX_QUANTILES
+ |
+
+ Gets the approximate quantile boundaries.
+ For more information, see Approximate aggregate functions.
+
+ |
+
+
+ APPROX_TOP_COUNT
+ |
+
+ Gets the approximate top elements and their approximate count.
+ For more information, see Approximate aggregate functions.
+
+ |
+
+
+
+ APPROX_TOP_SUM
+ |
+
+ Gets the approximate top elements and sum, based on the approximate sum
+ of an assigned weight.
+ For more information, see Approximate aggregate functions.
+
+ |
+
+
+
+ ARRAY_AGG
|
Gets an array of values.
+
|
ARRAY_CONCAT_AGG
-
|
Concatenates arrays and returns a single array as a result.
+
|
AVG
-
|
Gets the average of non-NULL values.
+
|
- BIT_AND
+ | AVG (Differential Privacy)
+ |
+
+ DIFFERENTIAL_PRIVACY -supported AVG .
+ Gets the differentially-private average of non-NULL ,
+ non-NaN values in a query with a
+ DIFFERENTIAL_PRIVACY clause.
+
For more information, see Differential privacy functions.
+
+ |
+
+
+ BIT_AND
|
Performs a bitwise AND operation on an expression.
+
|
BIT_OR
-
|
Performs a bitwise OR operation on an expression.
+
|
BIT_XOR
-
|
Performs a bitwise XOR operation on an expression.
+
|
- COUNT
+ | CORR
+ |
+
+ Computes the Pearson coefficient of correlation of a set of number pairs.
+ For more information, see Statistical aggregate functions.
+ |
+
+
+
+ COUNT
|
Gets the number of rows in the input, or the number of rows with an
@@ -4628,18 +5062,53 @@ To learn about the syntax for aggregate function calls, see
|
+
+ COUNT (Differential Privacy)
+ |
+
+ DIFFERENTIAL_PRIVACY -supported COUNT .
+ Signature 1: Gets the differentially-private count of rows in a query with a
+ DIFFERENTIAL_PRIVACY clause.
+
+
+ Signature 2: Gets the differentially-private count of rows with a
+ non-NULL expression in a query with a
+ DIFFERENTIAL_PRIVACY clause.
+
For more information, see Differential privacy functions.
+
+ |
+
+
COUNTIF
+ |
+
+ Gets the number of TRUE values for an expression.
+ |
+
+
+ COVAR_POP
|
- Gets the count of TRUE values for an expression.
+ Computes the population covariance of a set of number pairs.
+ For more information, see Statistical aggregate functions.
+
|
- GROUPING
+ | COVAR_SAMP
+ |
+
+ Computes the sample covariance of a set of number pairs.
+ For more information, see Statistical aggregate functions.
+
+ |
+
+
+ GROUPING
|
Checks if a groupable value in the GROUP BY clause is
@@ -4649,7 +5118,6 @@ To learn about the syntax for aggregate function calls, see
|
LOGICAL_AND
-
|
Gets the logical AND of all non-NULL expressions.
@@ -4658,7 +5126,6 @@ To learn about the syntax for aggregate function calls, see
|
LOGICAL_OR
-
|
Gets the logical OR of all non-NULL expressions.
@@ -4667,16 +5134,15 @@ To learn about the syntax for aggregate function calls, see
|
MAX
-
|
Gets the maximum non-NULL value.
+
|
MIN
-
|
Gets the minimum non-NULL value.
@@ -4684,21 +5150,139 @@ To learn about the syntax for aggregate function calls, see
|
- STRING_AGG
+ | PERCENTILE_CONT (Differential Privacy)
+ |
+
+ DIFFERENTIAL_PRIVACY -supported PERCENTILE_CONT .
+ Computes a differentially-private percentile across privacy unit columns
+ in a query with a DIFFERENTIAL_PRIVACY clause.
+
For more information, see Differential privacy functions.
+
+ |
+
+
+
+ ST_EXTENT
+ |
+
+ Gets the bounding box for a group of GEOGRAPHY values.
+ For more information, see Geography functions.
+
+ |
+
+
+
+ ST_UNION_AGG
+ |
+
+ Aggregates over GEOGRAPHY values and gets their
+ point set union.
+ For more information, see Geography functions.
+
+ |
+
+
+
+ STDDEV
+ |
+
+ An alias of the STDDEV_SAMP function.
+ For more information, see Statistical aggregate functions.
+
+ |
+
+
+
+ STDDEV_POP
+ |
+
+ Computes the population (biased) standard deviation of the values.
+ For more information, see Statistical aggregate functions.
+
+ |
+
+
+
+ STDDEV_SAMP
+ |
+
+ Computes the sample (unbiased) standard deviation of the values.
+ For more information, see Statistical aggregate functions.
+
+ |
+
+
+ STRING_AGG
|
Concatenates non-NULL STRING or
BYTES values.
+
|
SUM
-
|
Gets the sum of non-NULL values.
+
+ |
+
+
+
+ SUM (Differential Privacy)
+ |
+
+ DIFFERENTIAL_PRIVACY -supported SUM .
+ Gets the differentially-private sum of non-NULL ,
+ non-NaN values in a query with a
+ DIFFERENTIAL_PRIVACY clause.
+
For more information, see Differential privacy functions.
+
+ |
+
+
+
+ VAR_POP
+ |
+
+ Computes the population (biased) variance of the values.
+ For more information, see Statistical aggregate functions.
+
+ |
+
+
+
+ VAR_POP (Differential Privacy)
+ |
+
+ DIFFERENTIAL_PRIVACY -supported VAR_POP (Differential Privacy).
+ Computes the differentially-private population (biased) variance of values
+ in a query with a DIFFERENTIAL_PRIVACY clause.
+
For more information, see Differential privacy functions.
+
+ |
+
+
+
+ VAR_SAMP
+ |
+
+ Computes the sample (unbiased) variance of the values.
+ For more information, see Statistical aggregate functions.
+
+ |
+
+
+
+ VARIANCE
+ |
+
+ An alias of VAR_SAMP .
+ For more information, see Statistical aggregate functions.
+
|
@@ -5395,15 +5979,11 @@ SELECT BIT_XOR(DISTINCT x) AS bit_xor FROM UNNEST([1234, 5678, 1234]) AS x;
### `COUNT`
-1.
-
```sql
COUNT(*)
-[OVER over_clause]
+[ OVER over_clause ]
```
-2.
-
```sql
COUNT(
[ DISTINCT ]
@@ -5425,16 +6005,26 @@ window_specification:
**Description**
-1. Returns the number of rows in the input.
-2. Returns the number of rows with `expression` evaluated to any value other
- than `NULL`.
+Gets the number of rows in the input or the number of rows with an
+expression evaluated to any value other than `NULL`.
-To learn more about the optional aggregate clauses that you can pass
-into this function, see
-[Aggregate function calls][aggregate-function-calls].
+**Definitions**
-This function can be used with the
-[`AGGREGATION_THRESHOLD` clause][agg-threshold-clause].
++ `*`: Use this value to get the number of all rows in the input.
++ `expression`: A value of any data type that represents the expression to
+ evaluate. If `DISTINCT` is present,
+ `expression` can only be a data type that is
+ [groupable][groupable-data-types].
++ `DISTINCT`: To learn more, see
+ [Aggregate function calls][aggregate-function-calls].
++ `HAVING { MAX | MIN }`: To learn more, see
+ [Aggregate function calls][aggregate-function-calls].
++ `OVER`: To learn more, see
+ [Aggregate function calls][aggregate-function-calls].
++ `over_clause`: To learn more, see
+ [Aggregate function calls][aggregate-function-calls].
++ `window_specification`: To learn more, see
+ [Window function calls][window-function-calls].
@@ -5442,10 +6032,9 @@ This function can be used with the
[agg-threshold-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#agg_threshold_clause
-
+[window-function-calls]: https://github.com/google/zetasql/blob/master/docs/window-function-calls.md
-To learn more about the `OVER` clause and how to use it, see
-[Window function calls][window-function-calls].
+
@@ -5453,22 +6042,34 @@ To learn more about the `OVER` clause and how to use it, see
-This function with DISTINCT supports specifying [collation][collation].
+**Details**
+
+To count the number of distinct values of an expression for which a
+certain condition is satisfied, you can use the following recipe:
+
+```sql
+COUNT(DISTINCT IF(condition, expression, NULL))
+```
+
+`IF` returns the value of `expression` if `condition` is `TRUE`, or
+`NULL` otherwise. The surrounding `COUNT(DISTINCT ...)` ignores the `NULL`
+values, so it counts only the distinct values of `expression` for which
+`condition` is `TRUE`.
+
+To count the number of non-distinct values of an expression for which a
+certain condition is satisfied, consider using the
+[`COUNTIF`][countif] function.
+
+This function with DISTINCT
supports specifying [collation][collation].
[collation]: https://github.com/google/zetasql/blob/master/docs/collation-concepts.md#collate_about
`COUNT` can be used with differential privacy. For more information, see
[Differentially private aggregate functions][dp-functions].
-**Supported Argument Types**
-
-`expression` can be any data type. If
-`DISTINCT` is present, `expression` can only be a data type that is
-[groupable][agg-data-type-properties].
-
-**Return Data Types**
+**Return type**
-INT64
+`INT64`
**Examples**
@@ -5523,19 +6124,7 @@ FROM UNNEST([1, 4, NULL, 4, 5]) AS x;
*------+------------+---------*/
```
-If you want to count the number of distinct values of an expression for which a
-certain condition is satisfied, this is one recipe that you can use:
-
-```sql
-COUNT(DISTINCT IF(condition, expression, NULL))
-```
-
-Here, `IF` will return the value of `expression` if `condition` is `TRUE`, or
-`NULL` otherwise. The surrounding `COUNT(DISTINCT ...)` will ignore the `NULL`
-values, so it will count only the distinct values of `expression` for which
-`condition` is `TRUE`.
-
-For example, to count the number of distinct positive values of `x`:
+The following query counts the number of distinct positive values of `x`:
```sql
SELECT COUNT(DISTINCT IF(x > 0, x, NULL)) AS distinct_positive
@@ -5548,8 +6137,8 @@ FROM UNNEST([1, -2, 4, 1, -5, 4, 1, 3, -6, 1]) AS x;
*-------------------*/
```
-Or to count the number of distinct dates on which a certain kind of event
-occurred:
+The following query counts the number of distinct dates on which a certain kind
+of event occurred:
```sql
WITH Events AS (
@@ -5577,7 +6166,33 @@ FROM Events;
*------------------------------*/
```
-[agg-data-type-properties]: https://github.com/google/zetasql/blob/master/docs/data-types.md#data_type_properties
+The following query counts the number of distinct `id`s that exist in both
+the `customers` and `vendor` tables:
+
+```sql
+WITH
+ customers AS (
+ SELECT 1934 AS id, 'a' AS team UNION ALL
+ SELECT 2991, 'b' UNION ALL
+ SELECT 3988, 'c'),
+ vendors AS (
+ SELECT 1934 AS id, 'd' AS team UNION ALL
+ SELECT 2991, 'e' UNION ALL
+ SELECT 4366, 'f')
+SELECT
+ COUNT(DISTINCT IF(id IN (SELECT id FROM customers), id, NULL)) AS result
+FROM vendors;
+
+/*--------*
+ | result |
+ +--------+
+ | 2 |
+ *--------*/
+```
+
+[countif]: https://github.com/google/zetasql/blob/master/docs/aggregate_functions.md#countif
+
+[groupable-data-types]: https://github.com/google/zetasql/blob/master/docs/data-types.md#groupable_data_types
[dp-functions]: #aggregate-dp-functions
@@ -5604,30 +6219,21 @@ window_specification:
**Description**
-Returns the count of `TRUE` values for `expression`. Returns `0` if there are
-zero input rows, or if `expression` evaluates to `FALSE` or `NULL` for all rows.
-
-Since `expression` must be a `BOOL`, the form `COUNTIF(DISTINCT ...)` is
-generally not useful: there is only one distinct value of `TRUE`. So
-`COUNTIF(DISTINCT ...)` will return 1 if `expression` evaluates to `TRUE` for
-one or more input rows, or 0 otherwise.
-Usually when someone wants to combine `COUNTIF` and `DISTINCT`, they
-want to count the number of distinct values of an expression for which a certain
-condition is satisfied. One recipe to achieve this is the following:
-
-```sql
-COUNT(DISTINCT IF(condition, expression, NULL))
-```
-
-Note that this uses `COUNT`, not `COUNTIF`; the `IF` part has been moved inside.
-To learn more, see the examples for [`COUNT`](#count).
+Gets the number of `TRUE` values for an expression.
-To learn more about the optional aggregate clauses that you can pass
-into this function, see
-[Aggregate function calls][aggregate-function-calls].
+**Definitions**
-This function can be used with the
-[`AGGREGATION_THRESHOLD` clause][agg-threshold-clause].
++ `expression`: A `BOOL` value that represents the expression to evaluate.
++ `DISTINCT`: To learn more, see
+ [Aggregate function calls][aggregate-function-calls].
++ `HAVING { MAX | MIN }`: To learn more, see
+ [Aggregate function calls][aggregate-function-calls].
++ `OVER`: To learn more, see
+ [Aggregate function calls][aggregate-function-calls].
++ `over_clause`: To learn more, see
+ [Aggregate function calls][aggregate-function-calls].
++ `window_specification`: To learn more, see
+ [Window function calls][window-function-calls].
@@ -5635,10 +6241,9 @@ This function can be used with the
[agg-threshold-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#agg_threshold_clause
-
+[window-function-calls]: https://github.com/google/zetasql/blob/master/docs/window-function-calls.md
-To learn more about the `OVER` clause and how to use it, see
-[Window function calls][window-function-calls].
+
@@ -5646,13 +6251,15 @@ To learn more about the `OVER` clause and how to use it, see
-**Supported Argument Types**
+**Details**
-BOOL
+The function signature `COUNTIF(DISTINCT ...)` is generally not useful. If you
+would like to use `DISTINCT`, use `COUNT` with `DISTINCT IF`. For more
+information, see the [`COUNT`][count] function.
-**Return Data Types**
+**Return type**
-INT64
+`INT64`
**Examples**
@@ -5688,6 +6295,8 @@ FROM UNNEST([5, -2, 3, 6, -10, NULL, -7, 4, 0]) AS x;
*------+--------------*/
```
+[count]: https://github.com/google/zetasql/blob/master/docs/aggregate_functions.md#count
+
### `GROUPING`
```sql
@@ -6484,38 +7093,38 @@ sketches. If you would like to specify precision with sketches, see:
APPROX_COUNT_DISTINCT
-
|
Gets the approximate result for COUNT(DISTINCT expression) .
+
|
APPROX_QUANTILES
-
|
Gets the approximate quantile boundaries.
+
|
APPROX_TOP_COUNT
-
|
Gets the approximate top elements and their approximate count.
+
|
APPROX_TOP_SUM
-
|
Gets the approximate top elements and sum, based on the approximate sum
of an assigned weight.
+
|
@@ -6743,9 +7352,9 @@ APPROX_TOP_SUM(
**Description**
-Returns the approximate top elements of `expression`, based on the sum of an
-assigned `weight`. The `number` parameter specifies the number of elements
-returned.
+Returns the approximate top elements of `expression`, ordered by the sum of the
+`weight` values provided for each unique value of `expression`. The `number`
+parameter specifies the number of elements returned.
If the `weight` input is negative or `NaN`, this function returns an error.
@@ -6865,7 +7474,6 @@ ZetaSQL supports the following array functions.
ARRAY
-
|
Produces an array with one element for each row in a subquery.
@@ -6873,8 +7481,17 @@ ZetaSQL supports the following array functions.
|
- ARRAY_AVG
+ | ARRAY_AGG
+ |
+
+ Gets an array of values.
+ For more information, see Aggregate functions.
+
+ |
+
+
+ ARRAY_AVG
|
Gets the average of non-NULL values in an array.
@@ -6883,7 +7500,6 @@ ZetaSQL supports the following array functions.
|
ARRAY_CONCAT
-
|
Concatenates one or more arrays with the same element type into a
@@ -6892,8 +7508,17 @@ ZetaSQL supports the following array functions.
|
- ARRAY_FILTER
+ | ARRAY_CONCAT_AGG
+ |
+
+ Concatenates arrays and returns a single array as a result.
+ For more information, see Aggregate functions.
+
+ |
+
+
+ ARRAY_FILTER
|
Takes an array, filters out unwanted elements, and returns the results
@@ -6903,7 +7528,6 @@ ZetaSQL supports the following array functions.
|
ARRAY_FIRST
-
|
Gets the first element in an array.
@@ -6912,7 +7536,6 @@ ZetaSQL supports the following array functions.
|
ARRAY_INCLUDES
-
|
Checks if there is an element in the array that is
@@ -6922,7 +7545,6 @@ ZetaSQL supports the following array functions.
|
ARRAY_INCLUDES_ALL
-
|
Checks if all search values are in an array.
@@ -6931,7 +7553,6 @@ ZetaSQL supports the following array functions.
|
ARRAY_INCLUDES_ANY
-
|
Checks if any search values are in an array.
@@ -6940,7 +7561,6 @@ ZetaSQL supports the following array functions.
|
ARRAY_IS_DISTINCT
-
|
Checks if an array contains no repeated elements.
@@ -6949,7 +7569,6 @@ ZetaSQL supports the following array functions.
|
ARRAY_LAST
-
|
Gets the last element in an array.
@@ -6958,7 +7577,6 @@ ZetaSQL supports the following array functions.
|
ARRAY_LENGTH
-
|
Gets the number of elements in an array.
@@ -6967,7 +7585,6 @@ ZetaSQL supports the following array functions.
|
ARRAY_MAX
-
|
Gets the maximum non-NULL value in an array.
@@ -6976,7 +7593,6 @@ ZetaSQL supports the following array functions.
|
ARRAY_MIN
-
|
Gets the minimum non-NULL value in an array.
@@ -6985,7 +7601,6 @@ ZetaSQL supports the following array functions.
|
ARRAY_REVERSE
-
|
Reverses the order of elements in an array.
@@ -6994,7 +7609,6 @@ ZetaSQL supports the following array functions.
|
ARRAY_SLICE
-
|
Produces an array containing zero or more consecutive elements from an
@@ -7004,7 +7618,6 @@ ZetaSQL supports the following array functions.
|
ARRAY_SUM
-
|
Gets the sum of non-NULL values in an array.
@@ -7013,17 +7626,16 @@ ZetaSQL supports the following array functions.
|
ARRAY_TO_STRING
-
|
Produces a concatenation of the elements in an array as a
STRING value.
+
|
ARRAY_TRANSFORM
-
|
Transforms the elements of an array, and returns the results in a new
@@ -7033,7 +7645,6 @@ ZetaSQL supports the following array functions.
|
ARRAY_ZIP
-
|
Combines elements from two to four arrays into one array.
@@ -7042,7 +7653,6 @@ ZetaSQL supports the following array functions.
|
FLATTEN
-
|
Flattens arrays of nested data to create a single flat array.
@@ -7051,7 +7661,6 @@ ZetaSQL supports the following array functions.
|
GENERATE_ARRAY
-
|
Generates an array of values in a range.
@@ -7060,19 +7669,124 @@ ZetaSQL supports the following array functions.
|
GENERATE_DATE_ARRAY
-
|
Generates an array of dates in a range.
+
|
- GENERATE_TIMESTAMP_ARRAY
+ | GENERATE_RANGE_ARRAY
+ |
+
+ Splits a range into an array of subranges.
+ For more information, see Range functions.
+
+ |
+
+
+ GENERATE_TIMESTAMP_ARRAY
|
Generates an array of timestamps in a range.
+
+ |
+
+
+
+ JSON_ARRAY
+ |
+
+ Creates a JSON array.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ JSON_ARRAY_APPEND
+ |
+
+ Appends JSON data to the end of a JSON array.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ JSON_ARRAY_INSERT
+ |
+
+ Inserts JSON data into a JSON array.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ JSON_EXTRACT_ARRAY
+ |
+
+ (Deprecated)
+ Extracts a JSON array and converts it to
+ a SQL ARRAY<JSON-formatted STRING>
+ or
+ ARRAY<JSON>
+
+ value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ JSON_EXTRACT_STRING_ARRAY
+ |
+
+ (Deprecated)
+ Extracts a JSON array of scalar values and converts it to a SQL
+ ARRAY<STRING> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ JSON_QUERY_ARRAY
+ |
+
+ Extracts a JSON array and converts it to
+ a SQL ARRAY<JSON-formatted STRING>
+ or
+ ARRAY<JSON>
+
+ value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ JSON_VALUE_ARRAY
+ |
+
+ Extracts a JSON array of scalar values and converts it to a SQL
+ ARRAY<STRING> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ RANGE_BUCKET
+ |
+
+ Scans through a sorted array and returns the 0-based position
+ of a point's upper bound.
+ For more information, see Mathematical functions.
+
|
@@ -8844,6 +9558,8 @@ FROM
*--------------------------------------------------------------------------*/
```
+### Supplemental materials
+
### OFFSET and ORDINAL
For information about using `OFFSET` and `ORDINAL` with arrays, see
@@ -8874,50 +9590,79 @@ ZetaSQL supports the following bit functions.
- BIT_CAST_TO_INT32
+ | BIT_AND
+ |
+
+ Performs a bitwise AND operation on an expression.
+ For more information, see Aggregate functions.
+
+ |
+
+
+ BIT_CAST_TO_INT32
|
Cast bits to an INT32 value.
+
|
BIT_CAST_TO_INT64
-
|
Cast bits to an INT64 value.
+
|
BIT_CAST_TO_UINT32
-
|
Cast bits to an UINT32 value.
+
|
BIT_CAST_TO_UINT64
-
|
Cast bits to an UINT64 value.
+
|
BIT_COUNT
-
|
Gets the number of bits that are set in an input expression.
|
+
+ BIT_OR
+ |
+
+ Performs a bitwise OR operation on an expression.
+ For more information, see Aggregate functions.
+
+ |
+
+
+
+ BIT_XOR
+ |
+
+ Performs a bitwise XOR operation on an expression.
+ For more information, see Aggregate functions.
+
+ |
+
+
@@ -9114,8 +9859,79 @@ learn more about implicit and explicit conversion [here][conversion-rules].
- CAST
+ | ARRAY_TO_STRING
+ |
+
+ Produces a concatenation of the elements in an array as a
+ STRING value.
+ For more information, see Array functions.
+
+ |
+
+
+
+ BIT_CAST_TO_INT32
+ |
+
+ Cast bits to an INT32 value.
+ For more information, see Bit functions.
+
+ |
+
+
+
+ BIT_CAST_TO_INT64
+ |
+
+ Cast bits to an INT64 value.
+ For more information, see Bit functions.
+
+ |
+
+
+
+ BIT_CAST_TO_UINT32
+ |
+
+ Cast bits to an UINT32 value.
+ For more information, see Bit functions.
+
+ |
+
+
+
+ BIT_CAST_TO_UINT64
+ |
+
+ Cast bits to an UINT64 value.
+ For more information, see Bit functions.
+
+ |
+
+
+
+ BOOL
+ |
+
+ Converts a JSON boolean to a SQL BOOL value.
+ For more information, see JSON functions.
+ |
+
+
+
+ BOOL_ARRAY
+ |
+
+ Converts a JSON array of booleans to a
+ SQL ARRAY<BOOL> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ CAST
|
Convert the results of an expression to the given type.
@@ -9123,8 +9939,309 @@ learn more about implicit and explicit conversion [here][conversion-rules].
|
- PARSE_BIGNUMERIC
+ | CHR
+ |
+
+ Converts a Unicode code point to a character.
+ For more information, see String functions.
+
+ |
+
+
+
+ CODE_POINTS_TO_BYTES
+ |
+
+ Converts an array of extended ASCII code points to a
+ BYTES value.
+ For more information, see String aggregate functions.
+
+ |
+
+
+
+ CODE_POINTS_TO_STRING
+ |
+
+ Converts an array of extended ASCII code points to a
+ STRING value.
+ For more information, see String aggregate functions.
+
+ |
+
+
+
+ DATE_FROM_UNIX_DATE
+ |
+
+ Interprets an INT64 expression as the number of days
+ since 1970-01-01.
+ For more information, see Date functions.
+ |
+
+
+
+ FROM_BASE32
+ |
+
+ Converts a base32-encoded STRING value into a
+ BYTES value.
+ For more information, see String functions.
+
+ |
+
+
+
+ FROM_BASE64
+ |
+
+ Converts a base64-encoded STRING value into a
+ BYTES value.
+ For more information, see String functions.
+
+ |
+
+
+
+ FROM_HEX
+ |
+
+ Converts a hexadecimal-encoded STRING value into a
+ BYTES value.
+ For more information, see String functions.
+
+ |
+
+
+
+ FROM_PROTO
+ |
+
+ Converts a protocol buffer value into ZetaSQL value.
+ For more information, see Protocol buffer functions.
+
+ |
+
+
+
+ INT32
+ |
+
+ Converts a JSON number to a SQL INT32 value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ INT32_ARRAY
+ |
+
+ Converts a JSON number to a SQL ARRAY<INT32> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ INT64
+ |
+
+ Converts a JSON number to a SQL INT64 value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ INT64_ARRAY
+ |
+
+ Converts a JSON array of numbers to a
+ SQL ARRAY<INT64> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ LAX_BOOL
+ |
+
+ Attempts to convert a JSON value to a SQL BOOL value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ LAX_BOOL_ARRAY
+ |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<BOOL> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+
+
+ LAX_DOUBLE
+
+
+ |
+
+ Attempts to convert a JSON value to a
+ SQL DOUBLE value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+
+
+ LAX_DOUBLE_ARRAY
+
+
+ |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<DOUBLE> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+
+
+ LAX_FLOAT
+
+
+ |
+
+ Attempts to convert a JSON value to a
+ SQL FLOAT value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+
+
+ LAX_FLOAT_ARRAY
+
+
+ |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY>FLOAT< value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ LAX_INT32
+ |
+
+ Attempts to convert a JSON value to a SQL INT32 value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ LAX_INT32_ARRAY
+ |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<INT32> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ LAX_INT64
+ |
+
+ Attempts to convert a JSON value to a SQL INT64 value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ LAX_INT64_ARRAY
+ |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<INT64> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ LAX_STRING
+ |
+
+ Attempts to convert a JSON value to a SQL STRING value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ LAX_STRING_ARRAY
+ |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<STRING> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ LAX_UINT32
+ |
+
+ Attempts to convert a JSON value to a SQL UINT32 value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ LAX_UINT64
+ |
+
+ Attempts to convert a JSON value to a SQL UINT64 value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ LAX_UINT64_ARRAY
+ |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<UINT64> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ PARSE_BIGNUMERIC
|
Converts a STRING value to a BIGNUMERIC value.
@@ -9132,8 +10249,38 @@ learn more about implicit and explicit conversion [here][conversion-rules].
|
- PARSE_NUMERIC
+ | PARSE_DATE
+ |
+
+ Converts a STRING value to a DATE value.
+ For more information, see Date functions.
+
+ |
+
+
+ PARSE_DATETIME
+ |
+
+ Converts a STRING value to a DATETIME value.
+ For more information, see Datetime functions.
+
+ |
+
+
+
+ PARSE_JSON
+ |
+
+ Converts a JSON-formatted STRING value to a
+ JSON value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ PARSE_NUMERIC
|
Converts a STRING value to a NUMERIC value.
@@ -9141,8 +10288,27 @@ learn more about implicit and explicit conversion [here][conversion-rules].
|
- SAFE_CAST
+ | PARSE_TIME
+ |
+
+ Converts a STRING value to a TIME value.
+ For more information, see Time functions.
+
+ |
+
+
+
+ PARSE_TIMESTAMP
+ |
+
+ Converts a STRING value to a TIMESTAMP value.
+ For more information, see Timestamp functions.
+
+ |
+
+
+ SAFE_CAST
|
Similar to the CAST function, but returns NULL
@@ -9150,6 +10316,240 @@ learn more about implicit and explicit conversion [here][conversion-rules].
|
+
+ SAFE_CONVERT_BYTES_TO_STRING
+ |
+
+ Converts a BYTES value to a STRING value and
+ replace any invalid UTF-8 characters with the Unicode replacement character,
+ U+FFFD .
+ For more information, see String functions.
+
+ |
+
+
+
+ STRING (JSON)
+ |
+
+ Converts a JSON string to a SQL STRING value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ STRING_ARRAY
+ |
+
+ Converts a JSON array of strings to a SQL ARRAY<STRING>
+ value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ STRING (Timestamp)
+ |
+
+ Converts a TIMESTAMP value to a STRING value.
+ For more information, see Timestamp functions.
+
+ |
+
+
+
+ TIMESTAMP_MICROS
+ |
+
+ Converts the number of microseconds since
+ 1970-01-01 00:00:00 UTC to a TIMESTAMP .
+ For more information, see Timestamp functions.
+
+ |
+
+
+
+ TIMESTAMP_MILLIS
+ |
+
+ Converts the number of milliseconds since
+ 1970-01-01 00:00:00 UTC to a TIMESTAMP .
+ For more information, see Timestamp functions.
+
+ |
+
+
+
+ TIMESTAMP_SECONDS
+ |
+
+ Converts the number of seconds since
+ 1970-01-01 00:00:00 UTC to a TIMESTAMP .
+ For more information, see Timestamp functions.
+
+ |
+
+
+
+ TO_BASE32
+ |
+
+ Converts a BYTES value to a
+ base32-encoded STRING value.
+ For more information, see String functions.
+
+ |
+
+
+
+ TO_BASE64
+ |
+
+ Converts a BYTES value to a
+ base64-encoded STRING value.
+ For more information, see String functions.
+
+ |
+
+
+
+ TO_CODE_POINTS
+ |
+
+ Converts a STRING or BYTES value into an array of
+ extended ASCII code points.
+ For more information, see String functions.
+
+ |
+
+
+
+ TO_HEX
+ |
+
+ Converts a BYTES value to a
+ hexadecimal STRING value.
+ For more information, see String functions.
+
+ |
+
+
+
+ TO_JSON
+ |
+
+ Converts a SQL value to a JSON value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ TO_JSON_STRING
+ |
+
+ Converts a SQL value to a JSON-formatted STRING value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ TO_PROTO
+ |
+
+ Converts a ZetaSQL value into a protocol buffer value.
+ For more information, see Protocol buffer functions.
+
+ |
+
+
+
+ UINT32
+ |
+
+ Converts a JSON number to a SQL UINT32 value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ UINT32_ARRAY
+ |
+
+ Converts a JSON number to a
+ SQL ARRAY<UINT32> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ UINT64
+ |
+
+ Converts a JSON number to a SQL UINT64 value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ UINT64_ARRAY
+ |
+
+ Converts a JSON number to a SQL ARRAY<UINT64> value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ UNIX_DATE
+ |
+
+ Converts a DATE value to the number of days since 1970-01-01.
+ For more information, see Date functions.
+
+ |
+
+
+
+ UNIX_MICROS
+ |
+
+ Converts a TIMESTAMP value to the number of microseconds since
+ 1970-01-01 00:00:00 UTC.
+ For more information, see Timestamp functions.
+
+ |
+
+
+
+ UNIX_MILLIS
+ |
+
+ Converts a TIMESTAMP value to the number of milliseconds
+ since 1970-01-01 00:00:00 UTC.
+ For more information, see Timestamp functions.
+
+ |
+
+
+
+ UNIX_SECONDS
+ |
+
+ Converts a TIMESTAMP value to the number of seconds since
+ 1970-01-01 00:00:00 UTC.
+ For more information, see Timestamp functions.
+
+ |
+
+
@@ -10998,136 +12398,8 @@ are replaced with the unicode replacement character, `U+FFFD`.
[formatting-syntax]: https://github.com/google/zetasql/blob/master/docs/format-elements.md#formatting_syntax
-### Other conversion functions
-
-
-You can learn more about these conversion functions elsewhere in the
-documentation:
-
-
-
-Conversion function | From | To
-------- | -------- | -------
-[ARRAY_TO_STRING][ARRAY_STRING] | ARRAY | STRING
-[BIT_CAST_TO_INT32][BIT_I32] | UINT32 | INT32
-[BIT_CAST_TO_INT64][BIT_I64] | UINT64 | INT64
-[BIT_CAST_TO_UINT32][BIT_U32] | INT32 | UINT32
-[BIT_CAST_TO_UINT64][BIT_U64] | INT64 | UINT64
-[BOOL][JSON_TO_BOOL] | JSON | BOOL
-[DATE][T_DATE] | Various data types | DATE
-[DATE_FROM_UNIX_DATE][T_DATE_FROM_UNIX_DATE] | INT64 | DATE
-[DATETIME][T_DATETIME] | Various data types | DATETIME
-[DOUBLE][JSON_TO_DOUBLE] | JSON | DOUBLE
-[FROM_BASE32][F_B32] | STRING | BYTEs
-[FROM_BASE64][F_B64] | STRING | BYTES
-[FROM_HEX][F_HEX] | STRING | BYTES
-[FROM_PROTO][F_PROTO] | PROTO value | Most data types
-[INT64][JSON_TO_INT64] | JSON | INT64
-[PARSE_DATE][P_DATE] | STRING | DATE
-[PARSE_DATETIME][P_DATETIME] | STRING | DATETIME
-[PARSE_JSON][P_JSON] | STRING | JSON
-[PARSE_TIME][P_TIME] | STRING | TIME
-[PARSE_TIMESTAMP][P_TIMESTAMP] | STRING | TIMESTAMP
-[SAFE_CONVERT_BYTES_TO_STRING][SC_BTS] | BYTES | STRING
-[STRING][STRING_TIMESTAMP] | TIMESTAMP | STRING
-[STRING][JSON_TO_STRING] | JSON | STRING
-[TIME][T_TIME] | Various data types | TIME
-[TIMESTAMP][T_TIMESTAMP] | Various data types | TIMESTAMP
-[TIMESTAMP_FROM_UNIX_MICROS][T_TIMESTAMP_FROM_UNIX_MICROS] | INT64 | TIMESTAMP
-[TIMESTAMP_FROM_UNIX_MILLIS][T_TIMESTAMP_FROM_UNIX_MILLIS] | INT64 | TIMESTAMP
-[TIMESTAMP_FROM_UNIX_SECONDS][T_TIMESTAMP_FROM_UNIX_SECONDS] | INT64 | TIMESTAMP
-[TIMESTAMP_MICROS][T_TIMESTAMP_MICROS] | INT64 | TIMESTAMP
-[TIMESTAMP_MILLIS][T_TIMESTAMP_MILLIS] | INT64 | TIMESTAMP
-[TIMESTAMP_SECONDS][T_TIMESTAMP_SECONDS] | INT64 | TIMESTAMP
-[TO_BASE32][T_B32] | BYTES | STRING
-[TO_BASE64][T_B64] | BYTES | STRING
-[TO_HEX][T_HEX] | BYTES | STRING
-[TO_JSON][T_JSON] | All data types | JSON
-[TO_JSON_STRING][T_JSON_STRING] | All data types | STRING
-[TO_PROTO][T_PROTO] | Most data types | PROTO value
-
-
-
-
-
[conversion-rules]: https://github.com/google/zetasql/blob/master/docs/conversion_rules.md
-[ARRAY_STRING]: #array_to_string
-
-[BIT_I32]: #bit_cast_to_int32
-
-[BIT_U32]: #bit_cast_to_uint32
-
-[BIT_I64]: #bit_cast_to_int64
-
-[BIT_U64]: #bit_cast_to_uint64
-
-[F_B32]: #from_base32
-
-[F_B64]: #from_base64
-
-[F_HEX]: #from_hex
-
-[F_PROTO]: #from_proto
-
-[P_DATE]: #parse_date
-
-[P_DATETIME]: #parse_datetime
-
-[P_JSON]: #parse_json
-
-[P_TIME]: #parse_time
-
-[P_TIMESTAMP]: #parse_timestamp
-
-[SC_BTS]: #safe_convert_bytes_to_string
-
-[STRING_TIMESTAMP]: #string
-
-[T_B32]: #to_base32
-
-[T_B64]: #to_base64
-
-[T_HEX]: #to_hex
-
-[T_JSON]: #to_json
-
-[T_JSON_STRING]: #to_json_string
-
-[T_PROTO]: #to_proto
-
-[T_DATE]: #date
-
-[T_DATETIME]: #datetime
-
-[T_TIMESTAMP]: #timestamp
-
-[T_TIME]: #time
-
-[JSON_TO_BOOL]: #bool_for_json
-
-[JSON_TO_STRING]: #string_for_json
-
-[JSON_TO_INT64]: #int64_for_json
-
-[JSON_TO_DOUBLE]: #double_for_json
-
-[T_DATE_FROM_UNIX_DATE]: #date_from_unix_date
-
-[T_TIMESTAMP_FROM_UNIX_MICROS]: #timestamp_from_unix_micros
-
-[T_TIMESTAMP_FROM_UNIX_MILLIS]: #timestamp_from_unix_millis
-
-[T_TIMESTAMP_FROM_UNIX_SECONDS]: #timestamp_from_unix_seconds
-
-[T_TIMESTAMP_MICROS]: #timestamp_micros
-
-[T_TIMESTAMP_MILLIS]: #timestamp_millis
-
-[T_TIMESTAMP_SECONDS]: #timestamp_seconds
-
-
-
## Date functions
ZetaSQL supports the following date functions.
@@ -11145,7 +12417,6 @@ ZetaSQL supports the following date functions.
CURRENT_DATE
-
|
Returns the current date as a DATE value.
@@ -11154,7 +12425,6 @@ ZetaSQL supports the following date functions.
|
DATE
-
|
Constructs a DATE value.
@@ -11163,7 +12433,6 @@ ZetaSQL supports the following date functions.
|
DATE_ADD
-
|
Adds a specified time interval to a DATE value.
@@ -11172,7 +12441,6 @@ ZetaSQL supports the following date functions.
|
DATE_DIFF
-
|
Gets the number of unit boundaries between two DATE values
@@ -11182,17 +12450,16 @@ ZetaSQL supports the following date functions.
|
DATE_FROM_UNIX_DATE
-
|
Interprets an INT64 expression as the number of days
since 1970-01-01.
+
|
DATE_SUB
-
|
Subtracts a specified time interval from a DATE value.
@@ -11201,16 +12468,18 @@ ZetaSQL supports the following date functions.
|
DATE_TRUNC
-
|
- Truncates a DATE value.
+
+ Truncates a DATE , DATETIME , or
+ TIMESTAMP value at a particular
+ granularity.
+
|
EXTRACT
-
|
Extracts part of a date from a DATE value.
@@ -11219,7 +12488,6 @@ ZetaSQL supports the following date functions.
|
FORMAT_DATE
-
|
Formats a DATE value according to a specified format string.
@@ -11227,8 +12495,17 @@ ZetaSQL supports the following date functions.
|
- LAST_DAY
+ | GENERATE_DATE_ARRAY
+ |
+
+ Generates an array of dates in a range.
+ For more information, see Array functions.
+
+ |
+
+
+ LAST_DAY
|
Gets the last day in a specified time period that contains a
@@ -11238,19 +12515,19 @@ ZetaSQL supports the following date functions.
|
PARSE_DATE
-
|
Converts a STRING value to a DATE value.
+
|
UNIX_DATE
-
|
Converts a DATE value to the number of days since 1970-01-01.
+
|
@@ -11624,49 +12901,81 @@ SELECT DATE_SUB(DATE '2008-12-25', INTERVAL 5 DAY) AS five_days_ago;
### `DATE_TRUNC`
```sql
-DATE_TRUNC(date_expression, granularity)
+DATE_TRUNC(date_value, date_granularity)
+```
+
+```sql
+DATE_TRUNC(datetime_value, datetime_granularity)
+```
+
+```sql
+DATE_TRUNC(timestamp_value, timestamp_granularity[, time_zone])
```
**Description**
-Truncates a `DATE` value at a particular time granularity. The `DATE` value
-is always rounded to the beginning of `granularity`.
+Truncates a `DATE`, `DATETIME`, or `TIMESTAMP` value at a particular
+granularity.
**Definitions**
-+ `date_expression`: The `DATE` value to truncate.
-+ `granularity`: The date part that represents the granularity. If
- you passed in a `DATE` value for the first argument, `granularity` can
- be:
++ `date_value`: A `DATE` value to truncate.
++ `date_granularity`: The truncation granularity for a `DATE` value.
+ [Date granularities][date-trunc-granularity-date] can be used.
++ `datetime_value`: A `DATETIME` value to truncate.
++ `datetime_granularity`: The truncation granularity for a `DATETIME` value.
+ [Date granularities][date-trunc-granularity-date] and
+ [time granularities][date-trunc-granularity-time] can be used.
++ `timestamp_value`: A `TIMESTAMP` value to truncate.
++ `timestamp_granularity`: The truncation granularity for a `TIMESTAMP` value.
+ [Date granularities][date-trunc-granularity-date] and
+ [time granularities][date-trunc-granularity-time] can be used.
++ `time_zone`: A time zone to use with the `TIMESTAMP` value.
+ [Time zone parts][date-time-zone-parts] can be used.
+ Use this argument if you want to use a time zone other than
+ the default time zone, which is implementation defined, as part of the
+ truncate operation.
+
+ Note: When truncating a timestamp to `MINUTE`
+ or `HOUR` parts, this function determines the civil time of the
+ timestamp in the specified (or default) time zone
+ and subtracts the minutes and seconds (when truncating to `HOUR`) or the
+ seconds (when truncating to `MINUTE`) from that timestamp.
+ While this provides intuitive results in most cases, the result is
+ non-intuitive near daylight savings transitions that are not hour-aligned.
+
+
+
+**Date granularity definitions**
+ `DAY`: The day in the Gregorian calendar year that contains the
- `DATE` value.
+ value to truncate.
+ `WEEK`: The first day in the week that contains the
- `DATE` value. Weeks begin on Sundays. `WEEK` is equivalent to
+ value to truncate. Weeks begin on Sundays. `WEEK` is equivalent to
`WEEK(SUNDAY)`.
+ `WEEK(WEEKDAY)`: The first day in the week that contains the
- `DATE` value. Weeks begin on `WEEKDAY`. `WEEKDAY` must be one of the
+ value to truncate. Weeks begin on `WEEKDAY`. `WEEKDAY` must be one of the
following: `SUNDAY`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`,
or `SATURDAY`.
+ `ISOWEEK`: The first day in the [ISO 8601 week][ISO-8601-week] that contains
- the `DATE` value. The ISO week begins on
+ the value to truncate. The ISO week begins on
Monday. The first ISO week of each ISO year contains the first Thursday of the
corresponding Gregorian calendar year.
+ `MONTH`: The first day in the month that contains the
- `DATE` value.
+ value to truncate.
+ `QUARTER`: The first day in the quarter that contains the
- `DATE` value.
+ value to truncate.
+ `YEAR`: The first day in the year that contains the
- `DATE` value.
+ value to truncate.
+ `ISOYEAR`: The first day in the [ISO 8601][ISO-8601] week-numbering year
- that contains the `DATE` value. The ISO year is the
+ that contains the value to truncate. The ISO year is the
Monday of the first week where Thursday belongs to the corresponding
Gregorian calendar year.
@@ -11678,9 +12987,44 @@ is always rounded to the beginning of `granularity`.
+
+
+**Time granularity definitions**
+
+ + `NANOSECOND`: If used, nothing is truncated from the value.
+
+ + `MICROSECOND`: The nearest lesser than or equal microsecond.
+
+ + `MILLISECOND`: The nearest lesser than or equal millisecond.
+
+ + `SECOND`: The nearest lesser than or equal second.
+
+ + `MINUTE`: The nearest lesser than or equal minute.
+
+ + `HOUR`: The nearest lesser than or equal hour.
+
+
+
+**Time zone part definitions**
+
++ `MINUTE`
++ `HOUR`
++ `DAY`
++ `WEEK`
++ `WEEK()`
++ `ISOWEEK`
++ `MONTH`
++ `QUARTER`
++ `YEAR`
++ `ISOYEAR`
+
+**Details**
+
+The resulting value is always rounded to the beginning of `granularity`.
+
**Return Data Type**
-`DATE`
+The same data type as the first argument passed into this function.
**Examples**
@@ -11729,6 +13073,12 @@ SELECT
*------------------+----------------*/
```
+[date-trunc-granularity-date]: #date_trunc_granularity_date
+
+[date-trunc-granularity-time]: #date_trunc_granularity_time
+
+[date-time-zone-parts]: #date_time_zone_parts
+
### `EXTRACT`
```sql
@@ -11848,14 +13198,17 @@ FORMAT_DATE(format_string, date_expr)
**Description**
-Formats the `date_expr` according to the specified `format_string`.
+Formats a `DATE` value according to a specified format string.
+
+**Definitions**
-See [Supported Format Elements For DATE][date-format-elements]
-for a list of format elements that this function supports.
++ `format_string`: A `STRING` value that contains the
+ [format elements][date-format-elements] to use with `date_expr`.
++ `date_expr`: A `DATE` value that represents the date to format.
**Return Data Type**
-STRING
+`STRING`
**Examples**
@@ -11995,14 +13348,19 @@ PARSE_DATE(format_string, date_string)
**Description**
-Converts a [string representation of date][date-format] to a
-`DATE` object.
+Converts a `STRING` value to a `DATE` value.
-`format_string` contains the [format elements][date-format-elements]
-that define how `date_string` is formatted. Each element in
-`date_string` must have a corresponding element in `format_string`. The
-location of each element in `format_string` must match the location of
-each element in `date_string`.
+**Definitions**
+
++ `format_string`: A `STRING` value that contains the
+ [format elements][date-format-elements] to use with `date_string`.
++ `date_string`: A `STRING` value that represents the date to parse.
+
+**Details**
+
+Each element in `date_string` must have a corresponding element in
+`format_string`. The location of each element in `format_string` must match the
+location of each element in `date_string`.
```sql
-- This works because elements on both sides match.
@@ -12020,20 +13378,20 @@ SELECT PARSE_DATE('%F', '2000-12-30');
When using `PARSE_DATE`, keep the following in mind:
-+ **Unspecified fields.** Any unspecified field is initialized from `1970-01-01`.
-+ **Case insensitivity.** Names, such as `Monday`, `February`, and so on, are
++ Unspecified fields. Any unspecified field is initialized from `1970-01-01`.
++ Case insensitivity. Names, such as `Monday`, `February`, and so on, are
case insensitive.
-+ **Whitespace.** One or more consecutive white spaces in the format string
++ Whitespace. One or more consecutive white spaces in the format string
matches zero or more consecutive white spaces in the date string. In
addition, leading and trailing white spaces in the date string are always
allowed -- even if they are not in the format string.
-+ **Format precedence.** When two (or more) format elements have overlapping
++ Format precedence. When two (or more) format elements have overlapping
information (for example both `%F` and `%Y` affect the year), the last one
generally overrides any earlier ones.
**Return Data Type**
-DATE
+`DATE`
**Examples**
@@ -12108,7 +13466,6 @@ ZetaSQL supports the following datetime functions.
CURRENT_DATETIME
-
|
Returns the current date and time as a DATETIME value.
@@ -12117,7 +13474,6 @@ ZetaSQL supports the following datetime functions.
|
DATETIME
-
|
Constructs a DATETIME value.
@@ -12126,7 +13482,6 @@ ZetaSQL supports the following datetime functions.
|
DATETIME_ADD
-
|
Adds a specified time interval to a DATETIME value.
@@ -12135,7 +13490,6 @@ ZetaSQL supports the following datetime functions.
|
DATETIME_DIFF
-
|
Gets the number of unit boundaries between two DATETIME values
@@ -12145,7 +13499,6 @@ ZetaSQL supports the following datetime functions.
|
DATETIME_SUB
-
|
Subtracts a specified time interval from a DATETIME value.
@@ -12154,16 +13507,18 @@ ZetaSQL supports the following datetime functions.
|
DATETIME_TRUNC
-
|
- Truncates a DATETIME value.
+
+ Truncates a DATETIME or
+ TIMESTAMP value at a particular
+ granularity.
+
|
EXTRACT
-
|
Extracts part of a date and time from a DATETIME value.
@@ -12172,7 +13527,6 @@ ZetaSQL supports the following datetime functions.
|
FORMAT_DATETIME
-
|
Formats a DATETIME value according to a specified
@@ -12182,7 +13536,6 @@ ZetaSQL supports the following datetime functions.
|
LAST_DAY
-
|
Gets the last day in a specified time period that contains a
@@ -12192,10 +13545,10 @@ ZetaSQL supports the following datetime functions.
|
PARSE_DATETIME
-
|
Converts a STRING value to a DATETIME value.
+
|
@@ -12523,61 +13876,73 @@ SELECT
### `DATETIME_TRUNC`
```sql
-DATETIME_TRUNC(datetime_expression, granularity)
+DATETIME_TRUNC(datetime_value, datetime_granularity)
+```
+
+```sql
+DATETIME_TRUNC(timestamp_value, timestamp_granularity[, time_zone])
```
**Description**
-Truncates a `DATETIME` value at a particular time granularity. The `DATETIME`
-value is always rounded to the beginning of `granularity`.
+Truncates a `DATETIME` or `TIMESTAMP` value at a particular granularity.
**Definitions**
-+ `datetime_expression`: The `DATETIME` value to truncate.
-+ `granularity`: The datetime part that represents the granularity. If
- you passed in a `DATETIME` value for the first argument, `granularity` can
- be:
-
- + `NANOSECOND`: If used, nothing is truncated from the value.
-
- + `MICROSECOND`: The nearest lesser than or equal microsecond.
-
- + `MILLISECOND`: The nearest lesser than or equal millisecond.
++ `datetime_value`: A `DATETIME` value to truncate.
++ `datetime_granularity`: The truncation granularity for a `DATETIME` value.
+ [Date granularities][datetime-trunc-granularity-date] and
+ [time granularities][datetime-trunc-granularity-time] can be used.
++ `timestamp_value`: A `TIMESTAMP` value to truncate.
++ `timestamp_granularity`: The truncation granularity for a `TIMESTAMP` value.
+ [Date granularities][datetime-trunc-granularity-date] and
+ [time granularities][datetime-trunc-granularity-time] can be used.
++ `time_zone`: A time zone to use with the `TIMESTAMP` value.
+ [Time zone parts][datetime-time-zone-parts] can be used.
+ Use this argument if you want to use a time zone other than
+ the default time zone, which is implementation defined, as part of the
+ truncate operation.
- + `SECOND`: The nearest lesser than or equal second.
+ Note: When truncating a timestamp to `MINUTE`
+ or `HOUR` parts, this function determines the civil time of the
+ timestamp in the specified (or default) time zone
+ and subtracts the minutes and seconds (when truncating to `HOUR`) or the
+ seconds (when truncating to `MINUTE`) from that timestamp.
+ While this provides intuitive results in most cases, the result is
+ non-intuitive near daylight savings transitions that are not hour-aligned.
- + `MINUTE`: The nearest lesser than or equal minute.
+
- + `HOUR`: The nearest lesser than or equal hour.
+**Date granularity definitions**
+ `DAY`: The day in the Gregorian calendar year that contains the
- `DATETIME` value.
+ value to truncate.
+ `WEEK`: The first day in the week that contains the
- `DATETIME` value. Weeks begin on Sundays. `WEEK` is equivalent to
+ value to truncate. Weeks begin on Sundays. `WEEK` is equivalent to
`WEEK(SUNDAY)`.
+ `WEEK(WEEKDAY)`: The first day in the week that contains the
- `DATETIME` value. Weeks begin on `WEEKDAY`. `WEEKDAY` must be one of the
+ value to truncate. Weeks begin on `WEEKDAY`. `WEEKDAY` must be one of the
following: `SUNDAY`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`,
or `SATURDAY`.
+ `ISOWEEK`: The first day in the [ISO 8601 week][ISO-8601-week] that contains
- the `DATETIME` value. The ISO week begins on
+ the value to truncate. The ISO week begins on
Monday. The first ISO week of each ISO year contains the first Thursday of the
corresponding Gregorian calendar year.
+ `MONTH`: The first day in the month that contains the
- `DATETIME` value.
+ value to truncate.
+ `QUARTER`: The first day in the quarter that contains the
- `DATETIME` value.
+ value to truncate.
+ `YEAR`: The first day in the year that contains the
- `DATETIME` value.
+ value to truncate.
+ `ISOYEAR`: The first day in the [ISO 8601][ISO-8601] week-numbering year
- that contains the `DATETIME` value. The ISO year is the
+ that contains the value to truncate. The ISO year is the
Monday of the first week where Thursday belongs to the corresponding
Gregorian calendar year.
@@ -12589,9 +13954,44 @@ value is always rounded to the beginning of `granularity`.
+
+
+**Time granularity definitions**
+
+ + `NANOSECOND`: If used, nothing is truncated from the value.
+
+ + `MICROSECOND`: The nearest lesser than or equal microsecond.
+
+ + `MILLISECOND`: The nearest lesser than or equal millisecond.
+
+ + `SECOND`: The nearest lesser than or equal second.
+
+ + `MINUTE`: The nearest lesser than or equal minute.
+
+ + `HOUR`: The nearest lesser than or equal hour.
+
+
+
+**Time zone part definitions**
+
++ `MINUTE`
++ `HOUR`
++ `DAY`
++ `WEEK`
++ `WEEK()`
++ `ISOWEEK`
++ `MONTH`
++ `QUARTER`
++ `YEAR`
++ `ISOYEAR`
+
+**Details**
+
+The resulting value is always rounded to the beginning of `granularity`.
+
**Return Data Type**
-`DATETIME`
+The same data type as the first argument passed into this function.
**Examples**
@@ -12644,6 +14044,12 @@ SELECT
*---------------------+----------------*/
```
+[datetime-trunc-granularity-date]: #datetime_trunc_granularity_date
+
+[datetime-trunc-granularity-time]: #datetime_trunc_granularity_time
+
+[datetime-time-zone-parts]: #datetime_time_zone_parts
+
### `EXTRACT`
```sql
@@ -12772,14 +14178,20 @@ FROM table;
### `FORMAT_DATETIME`
```sql
-FORMAT_DATETIME(format_string, datetime_expression)
+FORMAT_DATETIME(format_string, datetime_expr)
```
**Description**
-Formats a `DATETIME` object according to the specified `format_string`. See
-[Supported Format Elements For DATETIME][datetime-format-elements]
-for a list of format elements that this function supports.
+Formats a `DATETIME` value according to a specified format string.
+
+**Definitions**
+
++ `format_string`: A `STRING` value that contains the
+ [format elements][datetime-format-elements] to use with
+ `datetime_expr`.
++ `datetime_expr`: A `DATETIME` value that represents the date and time to
+ format.
**Return Data Type**
@@ -12929,14 +14341,20 @@ PARSE_DATETIME(format_string, datetime_string)
**Description**
-Converts a [string representation of a datetime][datetime-format] to a
-`DATETIME` object.
+Converts a `STRING` value to a `DATETIME` value.
+
+**Definitions**
+
++ `format_string`: A `STRING` value that contains the
+ [format elements][datetime-format-elements] to use with `datetime_string`.
++ `datetime_string`: A `STRING` value that represents the date and time to
+ parse.
+
+**Details**
-`format_string` contains the [format elements][datetime-format-elements]
-that define how `datetime_string` is formatted. Each element in
-`datetime_string` must have a corresponding element in `format_string`. The
-location of each element in `format_string` must match the location of
-each element in `datetime_string`.
+Each element in `datetime_string` must have a corresponding element in
+`format_string`. The location of each element in `format_string` must match the
+location of each element in `datetime_string`.
```sql
-- This works because elements on both sides match.
@@ -12954,23 +14372,23 @@ SELECT PARSE_DATETIME("%c", "Thu Dec 25 07:30:00 2008");
`PARSE_DATETIME` parses `string` according to the following rules:
-+ **Unspecified fields.** Any unspecified field is initialized from
++ Unspecified fields. Any unspecified field is initialized from
`1970-01-01 00:00:00.0`. For example, if the year is unspecified then it
defaults to `1970`.
-+ **Case insensitivity.** Names, such as `Monday` and `February`,
++ Case insensitivity. Names, such as `Monday` and `February`,
are case insensitive.
-+ **Whitespace.** One or more consecutive white spaces in the format string
++ Whitespace. One or more consecutive white spaces in the format string
matches zero or more consecutive white spaces in the
`DATETIME` string. Leading and trailing
white spaces in the `DATETIME` string are always
allowed, even if they are not in the format string.
-+ **Format precedence.** When two or more format elements have overlapping
++ Format precedence. When two or more format elements have overlapping
information, the last one generally overrides any earlier ones, with some
exceptions. For example, both `%F` and `%Y` affect the year, so the earlier
element overrides the later. See the descriptions
of `%s`, `%C`, and `%y` in
[Supported Format Elements For DATETIME][datetime-format-elements].
-+ **Format divergence.** `%p` can be used with `am`, `AM`, `pm`, and `PM`.
++ Format divergence. `%p` can be used with `am`, `AM`, `pm`, and `PM`.
**Return Data Type**
@@ -13039,7 +14457,6 @@ ZetaSQL supports the following debugging functions.
ERROR
-
|
Produces an error with a custom error message.
@@ -13048,7 +14465,6 @@ ZetaSQL supports the following debugging functions.
|
IFERROR
-
|
Evaluates a try expression, and if an evaluation error is produced, returns
@@ -13058,7 +14474,6 @@ ZetaSQL supports the following debugging functions.
|
ISERROR
-
|
Evaluates a try expression, and if an evaluation error is produced, returns
@@ -13068,7 +14483,6 @@ ZetaSQL supports the following debugging functions.
|
NULLIFERROR
-
|
Evaluates a try expression, and if an evaluation error is produced, returns
@@ -13523,20 +14937,19 @@ determine the optimal privacy parameters for your dataset and organization.
|
- AVG
-
+ | AVG (Differential Privacy)
|
DIFFERENTIAL_PRIVACY -supported AVG .
Gets the differentially-private average of non-NULL ,
non-NaN values in a query with a
DIFFERENTIAL_PRIVACY clause.
+
|
- COUNT
-
+ | COUNT (Differential Privacy)
|
DIFFERENTIAL_PRIVACY -supported COUNT .
@@ -13547,46 +14960,46 @@ determine the optimal privacy parameters for your dataset and organization.
Signature 2: Gets the differentially-private count of rows with a
non-NULL expression in a query with a
DIFFERENTIAL_PRIVACY clause.
+
|
- PERCENTILE_CONT
-
+ | PERCENTILE_CONT (Differential Privacy)
|
DIFFERENTIAL_PRIVACY -supported PERCENTILE_CONT .
Computes a differentially-private percentile across privacy unit columns
in a query with a DIFFERENTIAL_PRIVACY clause.
+
|
- SUM
-
+ | SUM (Differential Privacy)
|
DIFFERENTIAL_PRIVACY -supported SUM .
Gets the differentially-private sum of non-NULL ,
non-NaN values in a query with a
DIFFERENTIAL_PRIVACY clause.
+
|
- VAR_POP
-
+ | VAR_POP (Differential Privacy)
|
- DIFFERENTIAL_PRIVACY -supported VAR_POP .
+ DIFFERENTIAL_PRIVACY -supported VAR_POP (Differential Privacy).
Computes the differentially-private population (biased) variance of values
in a query with a DIFFERENTIAL_PRIVACY clause.
+
|
ANON_AVG
-
|
Deprecated.
@@ -13598,7 +15011,6 @@ determine the optimal privacy parameters for your dataset and organization.
|
ANON_COUNT
-
|
Deprecated.
@@ -13616,7 +15028,6 @@ determine the optimal privacy parameters for your dataset and organization.
|
ANON_PERCENTILE_CONT
-
|
Deprecated.
@@ -13627,7 +15038,6 @@ determine the optimal privacy parameters for your dataset and organization.
|
ANON_QUANTILES
-
|
Deprecated.
@@ -13638,7 +15048,6 @@ determine the optimal privacy parameters for your dataset and organization.
|
ANON_STDDEV_POP
-
|
Deprecated.
@@ -13649,7 +15058,6 @@ determine the optimal privacy parameters for your dataset and organization.
|
ANON_SUM
-
|
Deprecated.
@@ -13661,7 +15069,6 @@ determine the optimal privacy parameters for your dataset and organization.
|
ANON_VAR_POP
-
|
Deprecated.
@@ -13706,9 +15113,514 @@ and can support the following arguments:
**Examples**
-The following differentially private query gets the average number of each item
-requested per professor. Smaller aggregations might not be included. This query
-references a table called [`professors`][dp-example-tables].
+The following differentially private query gets the average number of each item
+requested per professor. Smaller aggregations might not be included. This query
+references a table called [`professors`][dp-example-tables].
+
+```sql
+-- With noise, using the epsilon parameter.
+SELECT
+ WITH DIFFERENTIAL_PRIVACY
+ OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1, privacy_unit_column=id)
+ item,
+ AVG(quantity, contribution_bounds_per_group => (0,100)) average_quantity
+FROM professors
+GROUP BY item;
+
+-- These results will change each time you run the query.
+-- Smaller aggregations might be removed.
+/*----------+------------------*
+ | item | average_quantity |
+ +----------+------------------+
+ | pencil | 38.5038356810269 |
+ | pen | 13.4725028762032 |
+ *----------+------------------*/
+```
+
+```sql
+-- Without noise, using the epsilon parameter.
+-- (this un-noised version is for demonstration only)
+SELECT
+ WITH DIFFERENTIAL_PRIVACY
+ OPTIONS(epsilon=1e20, delta=.01, max_groups_contributed=1, privacy_unit_column=id)
+ item,
+ AVG(quantity) average_quantity
+FROM professors
+GROUP BY item;
+
+-- These results will not change when you run the query.
+/*----------+------------------*
+ | item | average_quantity |
+ +----------+------------------+
+ | scissors | 8 |
+ | pencil | 40 |
+ | pen | 18.5 |
+ *----------+------------------*/
+```
+
+The following differentially private query gets the average number of each item
+requested per professor. Smaller aggregations might not be included. This query
+references a view called [`view_on_professors`][dp-example-views].
+
+```sql
+-- With noise, using the epsilon parameter.
+SELECT
+ WITH DIFFERENTIAL_PRIVACY
+ OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
+ item,
+ AVG(quantity, contribution_bounds_per_group=>(0, 100)) average_quantity
+FROM {{USERNAME}}.view_on_professors
+GROUP BY item;
+
+-- These results will change each time you run the query.
+-- Smaller aggregations might be removed.
+/*----------+------------------*
+ | item | average_quantity |
+ +----------+------------------+
+ | pencil | 38.5038356810269 |
+ | pen | 13.4725028762032 |
+ *----------+------------------*/
+```
+
+```sql
+-- Without noise, using the epsilon parameter.
+-- (this un-noised version is for demonstration only)
+SELECT
+ WITH DIFFERENTIAL_PRIVACY
+ OPTIONS(epsilon=1e20, delta=.01, max_groups_contributed=1)
+ item,
+ AVG(quantity) average_quantity
+FROM {{USERNAME}}.view_on_professors
+GROUP BY item;
+
+-- These results will not change when you run the query.
+/*----------+------------------*
+ | item | average_quantity |
+ +----------+------------------+
+ | scissors | 8 |
+ | pencil | 40 |
+ | pen | 18.5 |
+ *----------+------------------*/
+```
+
+Note: For more information about when and when not to use
+noise, see [Remove noise][dp-noise].
+
+[dp-example-tables]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_tables
+
+[dp-noise]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#eliminate_noise
+
+[dp-clamped-named]: #dp_clamped_named
+
+[dp-syntax]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_clause
+
+[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
+
+### `COUNT` (`DIFFERENTIAL_PRIVACY`)
+
+
++ [Signature 1](#dp_count_signature1): Returns the number of rows in a
+ differentially private `FROM` clause.
++ [Signature 2](#dp_count_signature2): Returns the number of non-`NULL`
+ values in an expression.
+
+#### Signature 1
+
+
+```sql
+WITH DIFFERENTIAL_PRIVACY ...
+ COUNT(
+ *,
+ [ contribution_bounds_per_group => (lower_bound, upper_bound) ]
+ )
+```
+
+**Description**
+
+Returns the number of rows in the
+[differentially private][dp-from-clause] `FROM` clause. The final result
+is an aggregation across a privacy unit column.
+
+This function must be used with the [`DIFFERENTIAL_PRIVACY` clause][dp-syntax]
+and can support the following argument:
+
++ `contribution_bounds_per_group`: A named argument with a
+ [contribution bound][dp-clamped-named].
+ Performs clamping for each group separately before performing intermediate
+ grouping on the privacy unit column.
+
+**Return type**
+
+`INT64`
+
+**Examples**
+
+The following differentially private query counts the number of requests for
+each item. This query references a table called
+[`professors`][dp-example-tables].
+
+```sql
+-- With noise, using the epsilon parameter.
+SELECT
+ WITH DIFFERENTIAL_PRIVACY
+ OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1, privacy_unit_column=id)
+ item,
+ COUNT(*, contribution_bounds_per_group=>(0, 100)) times_requested
+FROM professors
+GROUP BY item;
+
+-- These results will change each time you run the query.
+-- Smaller aggregations might be removed.
+/*----------+-----------------*
+ | item | times_requested |
+ +----------+-----------------+
+ | pencil | 5 |
+ | pen | 2 |
+ *----------+-----------------*/
+```
+
+```sql
+-- Without noise, using the epsilon parameter.
+-- (this un-noised version is for demonstration only)
+SELECT
+ WITH DIFFERENTIAL_PRIVACY
+ OPTIONS(epsilon=1e20, delta=.01, max_groups_contributed=1, privacy_unit_column=id)
+ item,
+ COUNT(*, contribution_bounds_per_group=>(0, 100)) times_requested
+FROM professors
+GROUP BY item;
+
+-- These results will not change when you run the query.
+/*----------+-----------------*
+ | item | times_requested |
+ +----------+-----------------+
+ | scissors | 1 |
+ | pencil | 4 |
+ | pen | 3 |
+ *----------+-----------------*/
+```
+
+The following differentially private query counts the number of requests for
+each item. This query references a view called
+[`view_on_professors`][dp-example-views].
+
+```sql
+-- With noise, using the epsilon parameter.
+SELECT
+ WITH DIFFERENTIAL_PRIVACY
+ OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
+ item,
+ COUNT(*, contribution_bounds_per_group=>(0, 100)) times_requested
+FROM {{USERNAME}}.view_on_professors
+GROUP BY item;
+
+-- These results will change each time you run the query.
+-- Smaller aggregations might be removed.
+/*----------+-----------------*
+ | item | times_requested |
+ +----------+-----------------+
+ | pencil | 5 |
+ | pen | 2 |
+ *----------+-----------------*/
+```
+
+```sql
+-- Without noise, using the epsilon parameter.
+-- (this un-noised version is for demonstration only)
+SELECT
+ WITH DIFFERENTIAL_PRIVACY
+ OPTIONS(epsilon=1e20, delta=.01, max_groups_contributed=1)
+ item,
+ COUNT(*, contribution_bounds_per_group=>(0, 100)) times_requested
+FROM {{USERNAME}}.view_on_professors
+GROUP BY item;
+
+-- These results will not change when you run the query.
+/*----------+-----------------*
+ | item | times_requested |
+ +----------+-----------------+
+ | scissors | 1 |
+ | pencil | 4 |
+ | pen | 3 |
+ *----------+-----------------*/
+```
+
+Note: For more information about when and when not to use
+noise, see [Remove noise][dp-noise].
+
+#### Signature 2
+
+
+```sql
+WITH DIFFERENTIAL_PRIVACY ...
+ COUNT(
+ expression,
+ [contribution_bounds_per_group => (lower_bound, upper_bound)]
+ )
+```
+
+**Description**
+
+Returns the number of non-`NULL` expression values. The final result is an
+aggregation across a privacy unit column.
+
+This function must be used with the [`DIFFERENTIAL_PRIVACY` clause][dp-syntax]
+and can support these arguments:
+
++ `expression`: The input expression. This expression can be any
+ numeric input type, such as `INT64`.
++ `contribution_bounds_per_group`: A named argument with a
+ [contribution bound][dp-clamped-named].
+ Performs clamping per each group separately before performing intermediate
+ grouping on the privacy unit column.
+
+**Return type**
+
+`INT64`
+
+**Examples**
+
+The following differentially private query counts the number of requests made
+for each type of item. This query references a table called
+[`professors`][dp-example-tables].
+
+```sql
+-- With noise, using the epsilon parameter.
+SELECT
+ WITH DIFFERENTIAL_PRIVACY
+ OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1, privacy_unit_column=id)
+ item,
+ COUNT(item, contribution_bounds_per_group => (0,100)) times_requested
+FROM professors
+GROUP BY item;
+
+-- These results will change each time you run the query.
+-- Smaller aggregations might be removed.
+/*----------+-----------------*
+ | item | times_requested |
+ +----------+-----------------+
+ | pencil | 5 |
+ | pen | 2 |
+ *----------+-----------------*/
+```
+
+```sql
+-- Without noise, using the epsilon parameter.
+-- (this un-noised version is for demonstration only)
+SELECT
+ WITH DIFFERENTIAL_PRIVACY
+ OPTIONS(epsilon=1e20, delta=.01, max_groups_contributed=1, privacy_unit_column=id)
+ item,
+ COUNT(item, contribution_bounds_per_group => (0,100)) times_requested
+FROM professors
+GROUP BY item;
+
+-- These results will not change when you run the query.
+/*----------+-----------------*
+ | item | times_requested |
+ +----------+-----------------+
+ | scissors | 1 |
+ | pencil | 4 |
+ | pen | 3 |
+ *----------+-----------------*/
+```
+
+The following differentially private query counts the number of requests made
+for each type of item. This query references a view called
+[`view_on_professors`][dp-example-views].
+
+```sql
+-- With noise
+SELECT
+ WITH DIFFERENTIAL_PRIVACY
+ OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
+ item,
+ COUNT(item, contribution_bounds_per_group=>(0, 100)) times_requested
+FROM {{USERNAME}}.view_on_professors
+GROUP BY item;
+
+-- These results will change each time you run the query.
+-- Smaller aggregations might be removed.
+/*----------+-----------------*
+ | item | times_requested |
+ +----------+-----------------+
+ | pencil | 5 |
+ | pen | 2 |
+ *----------+-----------------*/
+```
+
+```sql
+--Without noise (this un-noised version is for demonstration only)
+SELECT
+ WITH DIFFERENTIAL_PRIVACY
+ OPTIONS(epsilon=1e20, delta=.01, max_groups_contributed=1)
+ item,
+ COUNT(item, contribution_bounds_per_group=>(0, 100)) times_requested
+FROM {{USERNAME}}.view_on_professors
+GROUP BY item;
+
+-- These results will not change when you run the query.
+/*----------+-----------------*
+ | item | times_requested |
+ +----------+-----------------+
+ | scissors | 1 |
+ | pencil | 4 |
+ | pen | 3 |
+ *----------+-----------------*/
+```
+
+Note: For more information about when and when not to use
+noise, see [Remove noise][dp-noise].
+
+[dp-clamp-implicit]: #dp_implicit_clamping
+
+[dp-from-clause]: https://github.com/google/zetasql/blob/master/docs/differential-privacy.md#dp_from
+
+[dp-example-tables]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_tables
+
+[dp-noise]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#eliminate_noise
+
+[dp-syntax]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_clause
+
+[dp-clamped-named]: #dp_clamped_named
+
+[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
+
+### `PERCENTILE_CONT` (`DIFFERENTIAL_PRIVACY`)
+
+
+```sql
+WITH DIFFERENTIAL_PRIVACY ...
+ PERCENTILE_CONT(
+ expression,
+ percentile,
+ contribution_bounds_per_row => (lower_bound, upper_bound)
+ )
+```
+
+**Description**
+
+Takes an expression and computes a percentile for it. The final result is an
+aggregation across privacy unit columns.
+
+This function must be used with the [`DIFFERENTIAL_PRIVACY` clause][dp-syntax]
+and can support these arguments:
+
++ `expression`: The input expression. This can be most numeric input types,
+ such as `INT64`. `NULL` values are always ignored.
++ `percentile`: The percentile to compute. The percentile must be a literal in
+ the range `[0, 1]`.
++ `contribution_bounds_per_row`: A named argument with a
+ [contribution bounds][dp-clamped-named].
+ Performs clamping for each row separately before performing intermediate
+ grouping on the privacy unit column.
+
+`NUMERIC` and `BIGNUMERIC` arguments are not allowed.
+ If you need them, cast them as the
+`DOUBLE` data type first.
+
+**Return type**
+
+`DOUBLE`
+
+**Examples**
+
+The following differentially private query gets the percentile of items
+requested. Smaller aggregations might not be included. This query references a
+view called [`professors`][dp-example-tables].
+
+```sql
+-- With noise, using the epsilon parameter.
+SELECT
+ WITH DIFFERENTIAL_PRIVACY
+ OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1, privacy_unit_column=id)
+ item,
+ PERCENTILE_CONT(quantity, 0.5, contribution_bounds_per_row => (0,100)) percentile_requested
+FROM professors
+GROUP BY item;
+
+-- These results will change each time you run the query.
+-- Smaller aggregations might be removed.
+ /*----------+----------------------*
+ | item | percentile_requested |
+ +----------+----------------------+
+ | pencil | 72.00011444091797 |
+ | scissors | 8.000175476074219 |
+ | pen | 23.001075744628906 |
+ *----------+----------------------*/
+```
+
+The following differentially private query gets the percentile of items
+requested. Smaller aggregations might not be included. This query references a
+view called [`view_on_professors`][dp-example-views].
+
+```sql
+-- With noise, using the epsilon parameter.
+SELECT
+ WITH DIFFERENTIAL_PRIVACY
+ OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
+ item,
+ PERCENTILE_CONT(quantity, 0.5, contribution_bounds_per_row=>(0, 100)) percentile_requested
+FROM {{USERNAME}}.view_on_professors
+GROUP BY item;
+
+-- These results will change each time you run the query.
+-- Smaller aggregations might be removed.
+/*----------+----------------------*
+ | item | percentile_requested |
+ +----------+----------------------+
+ | pencil | 72.00011444091797 |
+ | scissors | 8.000175476074219 |
+ | pen | 23.001075744628906 |
+ *----------+----------------------*/
+```
+
+[dp-example-tables]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_tables
+
+[dp-clamped-named]: #dp_clamped_named
+
+[dp-syntax]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_clause
+
+[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
+
+### `SUM` (`DIFFERENTIAL_PRIVACY`)
+
+
+```sql
+WITH DIFFERENTIAL_PRIVACY ...
+ SUM(
+ expression,
+ [ contribution_bounds_per_group => (lower_bound, upper_bound) ]
+ )
+```
+
+**Description**
+
+Returns the sum of non-`NULL`, non-`NaN` values in the expression. The final
+result is an aggregation across privacy unit columns.
+
+This function must be used with the [`DIFFERENTIAL_PRIVACY` clause][dp-syntax]
+and can support these arguments:
+
++ `expression`: The input expression. This can be any numeric input type,
+ such as `INT64`. `NULL` values are always ignored.
++ `contribution_bounds_per_group`: A named argument with a
+ [contribution bound][dp-clamped-named]. Performs clamping for each group
+ separately before performing intermediate grouping on the privacy unit column.
+
+**Return type**
+
+One of the following [supertypes][dp-supertype]:
+
++ `INT64`
++ `UINT64`
++ `DOUBLE`
+
+**Examples**
+
+The following differentially private query gets the sum of items requested.
+Smaller aggregations might not be included. This query references a view called
+[`professors`][dp-example-tables].
```sql
-- With noise, using the epsilon parameter.
@@ -13716,18 +15628,18 @@ SELECT
WITH DIFFERENTIAL_PRIVACY
OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1, privacy_unit_column=id)
item,
- AVG(quantity, contribution_bounds_per_group => (0,100)) average_quantity
+ SUM(quantity, contribution_bounds_per_group => (0,100)) quantity
FROM professors
GROUP BY item;
-- These results will change each time you run the query.
-- Smaller aggregations might be removed.
-/*----------+------------------*
- | item | average_quantity |
- +----------+------------------+
- | pencil | 38.5038356810269 |
- | pen | 13.4725028762032 |
- *----------+------------------*/
+/*----------+-----------*
+ | item | quantity |
+ +----------+-----------+
+ | pencil | 143 |
+ | pen | 59 |
+ *----------+-----------*/
```
```sql
@@ -13737,23 +15649,23 @@ SELECT
WITH DIFFERENTIAL_PRIVACY
OPTIONS(epsilon=1e20, delta=.01, max_groups_contributed=1, privacy_unit_column=id)
item,
- AVG(quantity) average_quantity
+ SUM(quantity) quantity
FROM professors
GROUP BY item;
-- These results will not change when you run the query.
-/*----------+------------------*
- | item | average_quantity |
- +----------+------------------+
- | scissors | 8 |
- | pencil | 40 |
- | pen | 18.5 |
- *----------+------------------*/
+/*----------+----------*
+ | item | quantity |
+ +----------+----------+
+ | scissors | 8 |
+ | pencil | 144 |
+ | pen | 58 |
+ *----------+----------*/
```
-The following differentially private query gets the average number of each item
-requested per professor. Smaller aggregations might not be included. This query
-references a view called [`view_on_professors`][dp-example-views].
+The following differentially private query gets the sum of items requested.
+Smaller aggregations might not be included. This query references a view called
+[`view_on_professors`][dp-example-views].
```sql
-- With noise, using the epsilon parameter.
@@ -13761,18 +15673,18 @@ SELECT
WITH DIFFERENTIAL_PRIVACY
OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
item,
- AVG(quantity, contribution_bounds_per_group=>(0, 100)) average_quantity
+ SUM(quantity, contribution_bounds_per_group=>(0, 100)) quantity
FROM {{USERNAME}}.view_on_professors
GROUP BY item;
-- These results will change each time you run the query.
-- Smaller aggregations might be removed.
-/*----------+------------------*
- | item | average_quantity |
- +----------+------------------+
- | pencil | 38.5038356810269 |
- | pen | 13.4725028762032 |
- *----------+------------------*/
+/*----------+-----------*
+ | item | quantity |
+ +----------+-----------+
+ | pencil | 143 |
+ | pen | 59 |
+ *----------+-----------*/
```
```sql
@@ -13782,190 +15694,248 @@ SELECT
WITH DIFFERENTIAL_PRIVACY
OPTIONS(epsilon=1e20, delta=.01, max_groups_contributed=1)
item,
- AVG(quantity) average_quantity
+ SUM(quantity) quantity
FROM {{USERNAME}}.view_on_professors
GROUP BY item;
-- These results will not change when you run the query.
-/*----------+------------------*
- | item | average_quantity |
- +----------+------------------+
- | scissors | 8 |
- | pencil | 40 |
- | pen | 18.5 |
- *----------+------------------*/
+/*----------+----------*
+ | item | quantity |
+ +----------+----------+
+ | scissors | 8 |
+ | pencil | 144 |
+ | pen | 58 |
+ *----------+----------*/
```
Note: For more information about when and when not to use
-noise, see [Remove noise][dp-noise].
+noise, see [Use differential privacy][dp-noise].
[dp-example-tables]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_tables
[dp-noise]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#eliminate_noise
+[dp-supertype]: https://github.com/google/zetasql/blob/master/docs/conversion_rules.md#supertypes
+
[dp-clamped-named]: #dp_clamped_named
[dp-syntax]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_clause
[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
-### `COUNT` (`DIFFERENTIAL_PRIVACY`)
-
-
-+ [Signature 1](#dp_count_signature1): Returns the number of rows in a
- differentially private `FROM` clause.
-+ [Signature 2](#dp_count_signature2): Returns the number of non-`NULL`
- values in an expression.
-
-#### Signature 1
-
+### `VAR_POP` (`DIFFERENTIAL_PRIVACY`)
+
```sql
WITH DIFFERENTIAL_PRIVACY ...
- COUNT(
- *,
- [ contribution_bounds_per_group => (lower_bound, upper_bound) ]
+ VAR_POP(
+ expression,
+ [ contribution_bounds_per_row => (lower_bound, upper_bound) ]
)
```
**Description**
-Returns the number of rows in the
-[differentially private][dp-from-clause] `FROM` clause. The final result
-is an aggregation across a privacy unit column.
+Takes an expression and computes the population (biased) variance of the values
+in the expression. The final result is an aggregation across
+privacy unit columns between `0` and `+Inf`. You can
+[clamp the input values][dp-clamp-explicit] explicitly, otherwise input values
+are clamped implicitly. Clamping is performed per individual user values.
-This function must be used with the [`DIFFERENTIAL_PRIVACY` clause][dp-syntax]
-and can support the following argument:
+This function must be used with the `DIFFERENTIAL_PRIVACY` clause and
+can support these arguments:
-+ `contribution_bounds_per_group`: A named argument with a
++ `expression`: The input expression. This can be any numeric input type,
+ such as `INT64`. `NULL`s are always ignored.
++ `contribution_bounds_per_row`: A named argument with a
[contribution bound][dp-clamped-named].
- Performs clamping for each group separately before performing intermediate
- grouping on the privacy unit column.
+ Performs clamping for each row separately before performing intermediate
+ grouping on individual user values.
+
+`NUMERIC` and `BIGNUMERIC` arguments are not allowed.
+ If you need them, cast them as the
+`DOUBLE` data type first.
**Return type**
-`INT64`
+`DOUBLE`
**Examples**
-The following differentially private query counts the number of requests for
-each item. This query references a table called
+The following differentially private query gets the
+population (biased) variance of items requested. Smaller aggregations may not
+be included. This query references a view called
[`professors`][dp-example-tables].
```sql
--- With noise, using the epsilon parameter.
+-- With noise
SELECT
WITH DIFFERENTIAL_PRIVACY
OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1, privacy_unit_column=id)
item,
- COUNT(*, contribution_bounds_per_group=>(0, 100)) times_requested
+ VAR_POP(quantity, contribution_bounds_per_row => (0,100)) pop_variance
FROM professors
GROUP BY item;
-- These results will change each time you run the query.
--- Smaller aggregations might be removed.
+-- Smaller aggregations may be removed.
/*----------+-----------------*
- | item | times_requested |
+ | item | pop_variance |
+----------+-----------------+
- | pencil | 5 |
- | pen | 2 |
+ | pencil | 642 |
+ | pen | 2.6666666666665 |
+ | scissors | 2500 |
*----------+-----------------*/
```
+The following differentially private query gets the
+population (biased) variance of items requested. Smaller aggregations might not
+be included. This query references a view called
+[`view_on_professors`][dp-example-views].
+
```sql
--- Without noise, using the epsilon parameter.
--- (this un-noised version is for demonstration only)
+-- With noise
SELECT
WITH DIFFERENTIAL_PRIVACY
- OPTIONS(epsilon=1e20, delta=.01, max_groups_contributed=1, privacy_unit_column=id)
+ OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
item,
- COUNT(*, contribution_bounds_per_group=>(0, 100)) times_requested
-FROM professors
+ VAR_POP(quantity, contribution_bounds_per_row=>(0, 100)) pop_variance
+FROM {{USERNAME}}.view_on_professors
GROUP BY item;
--- These results will not change when you run the query.
+-- These results will change each time you run the query.
+-- Smaller aggregations might be removed.
/*----------+-----------------*
- | item | times_requested |
+ | item | pop_variance |
+----------+-----------------+
- | scissors | 1 |
- | pencil | 4 |
- | pen | 3 |
+ | pencil | 642 |
+ | pen | 2.6666666666665 |
+ | scissors | 2500 |
*----------+-----------------*/
```
-The following differentially private query counts the number of requests for
-each item. This query references a view called
-[`view_on_professors`][dp-example-views].
+[dp-clamp-explicit]: #dp_explicit_clamping
+
+[dp-example-tables]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_tables
+
+[dp-clamped-named]: #dp_clamped_named
+
+[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
+
+### `ANON_AVG` (DEPRECATED)
+
+
+Warning: This function has been deprecated. Use
+`AVG` (differential privacy) instead.
+
+```sql
+WITH ANONYMIZATION ...
+ ANON_AVG(expression [clamped_between_clause])
+
+clamped_between_clause:
+ CLAMPED BETWEEN lower_bound AND upper_bound
+```
+
+**Description**
+
+Returns the average of non-`NULL`, non-`NaN` values in the expression.
+This function first computes the average per privacy unit column, and then
+computes the final result by averaging these averages.
+
+This function must be used with the `ANONYMIZATION` clause and
+can support these arguments:
+
++ `expression`: The input expression. This can be any numeric input type,
+ such as `INT64`.
++ `clamped_between_clause`: Perform [clamping][dp-clamp-between] per
+ privacy unit column averages.
+
+**Return type**
+
+`DOUBLE`
+
+**Examples**
+
+The following differentially private query gets the average number of each item
+requested per professor. Smaller aggregations might not be included. This query
+references a view called [`view_on_professors`][dp-example-views].
```sql
-- With noise, using the epsilon parameter.
SELECT
- WITH DIFFERENTIAL_PRIVACY
+ WITH ANONYMIZATION
OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
item,
- COUNT(*, contribution_bounds_per_group=>(0, 100)) times_requested
+ ANON_AVG(quantity CLAMPED BETWEEN 0 AND 100) average_quantity
FROM {{USERNAME}}.view_on_professors
GROUP BY item;
-- These results will change each time you run the query.
-- Smaller aggregations might be removed.
-/*----------+-----------------*
- | item | times_requested |
- +----------+-----------------+
- | pencil | 5 |
- | pen | 2 |
- *----------+-----------------*/
+/*----------+------------------*
+ | item | average_quantity |
+ +----------+------------------+
+ | pencil | 38.5038356810269 |
+ | pen | 13.4725028762032 |
+ *----------+------------------*/
```
```sql
-- Without noise, using the epsilon parameter.
-- (this un-noised version is for demonstration only)
SELECT
- WITH DIFFERENTIAL_PRIVACY
+ WITH ANONYMIZATION
OPTIONS(epsilon=1e20, delta=.01, max_groups_contributed=1)
item,
- COUNT(*, contribution_bounds_per_group=>(0, 100)) times_requested
+ ANON_AVG(quantity) average_quantity
FROM {{USERNAME}}.view_on_professors
GROUP BY item;
-- These results will not change when you run the query.
-/*----------+-----------------*
- | item | times_requested |
- +----------+-----------------+
- | scissors | 1 |
- | pencil | 4 |
- | pen | 3 |
- *----------+-----------------*/
+/*----------+------------------*
+ | item | average_quantity |
+ +----------+------------------+
+ | scissors | 8 |
+ | pencil | 40 |
+ | pen | 18.5 |
+ *----------+------------------*/
```
-Note: For more information about when and when not to use
-noise, see [Remove noise][dp-noise].
+Note: You can learn more about when and when not to use
+noise [here][dp-noise].
-#### Signature 2
-
+[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
+
+[dp-noise]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#eliminate_noise
+
+[dp-clamp-between]: #dp_clamp_between
+
+### `ANON_COUNT` (DEPRECATED)
+
+
+Warning: This function has been deprecated. Use
+`COUNT` (differential privacy) instead.
+
++ [Signature 1](#anon_count_signature1)
++ [Signature 2](#anon_count_signature2)
+
+#### Signature 1
+
```sql
-WITH DIFFERENTIAL_PRIVACY ...
- COUNT(
- expression,
- [contribution_bounds_per_group => (lower_bound, upper_bound)]
- )
+WITH ANONYMIZATION ...
+ ANON_COUNT(*)
```
**Description**
-Returns the number of non-`NULL` expression values. The final result is an
-aggregation across a privacy unit column.
-
-This function must be used with the [`DIFFERENTIAL_PRIVACY` clause][dp-syntax]
-and can support these arguments:
+Returns the number of rows in the
+[differentially private][dp-from-clause] `FROM` clause. The final result
+is an aggregation across privacy unit columns.
+[Input values are clamped implicitly][dp-clamp-implicit]. Clamping is
+performed per privacy unit column.
-+ `expression`: The input expression. This expression can be any
- numeric input type, such as `INT64`.
-+ `contribution_bounds_per_group`: A named argument with a
- [contribution bound][dp-clamped-named].
- Performs clamping per each group separately before performing intermediate
- grouping on the privacy unit column.
+This function must be used with the `ANONYMIZATION` clause.
**Return type**
@@ -13973,18 +15943,18 @@ and can support these arguments:
**Examples**
-The following differentially private query counts the number of requests made
-for each type of item. This query references a table called
-[`professors`][dp-example-tables].
+The following differentially private query counts the number of requests for
+each item. This query references a view called
+[`view_on_professors`][dp-example-views].
```sql
-- With noise, using the epsilon parameter.
SELECT
- WITH DIFFERENTIAL_PRIVACY
- OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1, privacy_unit_column=id)
+ WITH ANONYMIZATION
+ OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
item,
- COUNT(item, contribution_bounds_per_group => (0,100)) times_requested
-FROM professors
+ ANON_COUNT(*) times_requested
+FROM {{USERNAME}}.view_on_professors
GROUP BY item;
-- These results will change each time you run the query.
@@ -14001,11 +15971,11 @@ GROUP BY item;
-- Without noise, using the epsilon parameter.
-- (this un-noised version is for demonstration only)
SELECT
- WITH DIFFERENTIAL_PRIVACY
- OPTIONS(epsilon=1e20, delta=.01, max_groups_contributed=1, privacy_unit_column=id)
+ WITH ANONYMIZATION
+ OPTIONS(epsilon=1e20, delta=.01, max_groups_contributed=1)
item,
- COUNT(item, contribution_bounds_per_group => (0,100)) times_requested
-FROM professors
+ ANON_COUNT(*) times_requested
+FROM {{USERNAME}}.view_on_professors
GROUP BY item;
-- These results will not change when you run the query.
@@ -14018,6 +15988,36 @@ GROUP BY item;
*----------+-----------------*/
```
+Note: You can learn more about when and when not to use
+noise [here][dp-noise].
+
+#### Signature 2
+
+
+```sql
+WITH ANONYMIZATION ...
+ ANON_COUNT(expression [CLAMPED BETWEEN lower_bound AND upper_bound])
+```
+
+**Description**
+
+Returns the number of non-`NULL` expression values. The final result is an
+aggregation across privacy unit columns.
+
+This function must be used with the `ANONYMIZATION` clause and
+can support these arguments:
+
++ `expression`: The input expression. This can be any numeric input type,
+ such as `INT64`.
++ `CLAMPED BETWEEN` clause:
+ Perform [clamping][dp-clamp-between] per privacy unit column.
+
+**Return type**
+
+`INT64`
+
+**Examples**
+
The following differentially private query counts the number of requests made
for each type of item. This query references a view called
[`view_on_professors`][dp-example-views].
@@ -14025,10 +16025,10 @@ for each type of item. This query references a view called
```sql
-- With noise
SELECT
- WITH DIFFERENTIAL_PRIVACY
+ WITH ANONYMIZATION
OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
item,
- COUNT(item, contribution_bounds_per_group=>(0, 100)) times_requested
+ ANON_COUNT(item CLAMPED BETWEEN 0 AND 100) times_requested
FROM {{USERNAME}}.view_on_professors
GROUP BY item;
@@ -14045,10 +16045,10 @@ GROUP BY item;
```sql
--Without noise (this un-noised version is for demonstration only)
SELECT
- WITH DIFFERENTIAL_PRIVACY
+ WITH ANONYMIZATION
OPTIONS(epsilon=1e20, delta=.01, max_groups_contributed=1)
item,
- COUNT(item, contribution_bounds_per_group=>(0, 100)) times_requested
+ ANON_COUNT(item CLAMPED BETWEEN 0 AND 100) times_requested
FROM {{USERNAME}}.view_on_professors
GROUP BY item;
@@ -14062,33 +16062,28 @@ GROUP BY item;
*----------+-----------------*/
```
-Note: For more information about when and when not to use
-noise, see [Remove noise][dp-noise].
+Note: You can learn more about when and when not to use
+noise [here][dp-noise].
[dp-clamp-implicit]: #dp_implicit_clamping
-[dp-from-clause]: https://github.com/google/zetasql/blob/master/docs/differential-privacy.md#dp_from
+[dp-from-clause]: https://github.com/google/zetasql/blob/master/docs/differential-privacy.md#dp_from_rules
-[dp-example-tables]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_tables
+[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
[dp-noise]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#eliminate_noise
-[dp-syntax]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_clause
-
-[dp-clamped-named]: #dp_clamped_named
+[dp-clamp-between]: #dp_clamp_between
-[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
+### `ANON_PERCENTILE_CONT` (DEPRECATED)
+
-### `PERCENTILE_CONT` (`DIFFERENTIAL_PRIVACY`)
-
+Warning: This function has been deprecated. Use
+`PERCENTILE_CONT` (differential privacy) instead.
```sql
-WITH DIFFERENTIAL_PRIVACY ...
- PERCENTILE_CONT(
- expression,
- percentile,
- contribution_bounds_per_row => (lower_bound, upper_bound)
- )
+WITH ANONYMIZATION ...
+ ANON_PERCENTILE_CONT(expression, percentile [CLAMPED BETWEEN lower_bound AND upper_bound])
```
**Description**
@@ -14096,17 +16091,15 @@ WITH DIFFERENTIAL_PRIVACY ...
Takes an expression and computes a percentile for it. The final result is an
aggregation across privacy unit columns.
-This function must be used with the [`DIFFERENTIAL_PRIVACY` clause][dp-syntax]
-and can support these arguments:
+This function must be used with the `ANONYMIZATION` clause and
+can support these arguments:
+ `expression`: The input expression. This can be most numeric input types,
- such as `INT64`. `NULL` values are always ignored.
+ such as `INT64`. `NULL`s are always ignored.
+ `percentile`: The percentile to compute. The percentile must be a literal in
- the range `[0, 1]`.
-+ `contribution_bounds_per_row`: A named argument with a
- [contribution bounds][dp-clamped-named].
- Performs clamping for each row separately before performing intermediate
- grouping on the privacy unit column.
+ the range [0, 1]
++ `CLAMPED BETWEEN` clause:
+ Perform [clamping][dp-clamp-between] per privacy unit column.
`NUMERIC` and `BIGNUMERIC` arguments are not allowed.
If you need them, cast them as the
@@ -14120,71 +16113,173 @@ and can support these arguments:
The following differentially private query gets the percentile of items
requested. Smaller aggregations might not be included. This query references a
-view called [`professors`][dp-example-tables].
+view called [`view_on_professors`][dp-example-views].
```sql
-- With noise, using the epsilon parameter.
SELECT
- WITH DIFFERENTIAL_PRIVACY
- OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1, privacy_unit_column=id)
+ WITH ANONYMIZATION
+ OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
item,
- PERCENTILE_CONT(quantity, 0.5, contribution_bounds_per_row => (0,100)) percentile_requested
-FROM professors
+ ANON_PERCENTILE_CONT(quantity, 0.5 CLAMPED BETWEEN 0 AND 100) percentile_requested
+FROM {{USERNAME}}.view_on_professors
+GROUP BY item;
+
+-- These results will change each time you run the query.
+-- Smaller aggregations might be removed.
+/*----------+----------------------*
+ | item | percentile_requested |
+ +----------+----------------------+
+ | pencil | 72.00011444091797 |
+ | scissors | 8.000175476074219 |
+ | pen | 23.001075744628906 |
+ *----------+----------------------*/
+```
+
+[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
+
+[dp-clamp-between]: #dp_clamp_between
+
+### `ANON_QUANTILES` (DEPRECATED)
+
+
+Warning: This function has been deprecated. Use
+`QUANTILES` (differential privacy) instead.
+
+```sql
+WITH ANONYMIZATION ...
+ ANON_QUANTILES(expression, number CLAMPED BETWEEN lower_bound AND upper_bound)
+```
+
+**Description**
+
+Returns an array of differentially private quantile boundaries for values in
+`expression`. The first element in the return value is the
+minimum quantile boundary and the last element is the maximum quantile boundary.
+The returned results are aggregations across privacy unit columns.
+
+This function must be used with the `ANONYMIZATION` clause and
+can support these arguments:
+
++ `expression`: The input expression. This can be most numeric input types,
+ such as `INT64`. `NULL`s are always ignored.
++ `number`: The number of quantiles to create. This must be an `INT64`.
++ `CLAMPED BETWEEN` clause:
+ Perform [clamping][dp-clamp-between] per privacy unit column.
+
+`NUMERIC` and `BIGNUMERIC` arguments are not allowed.
+ If you need them, cast them as the
+`DOUBLE` data type first.
+
+**Return type**
+
+`ARRAY`<`DOUBLE`>
+
+**Examples**
+
+The following differentially private query gets the five quantile boundaries of
+the four quartiles of the number of items requested. Smaller aggregations
+might not be included. This query references a view called
+[`view_on_professors`][dp-example-views].
+
+```sql
+-- With noise, using the epsilon parameter.
+SELECT
+ WITH ANONYMIZATION
+ OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
+ item,
+ ANON_QUANTILES(quantity, 4 CLAMPED BETWEEN 0 AND 100) quantiles_requested
+FROM {{USERNAME}}.view_on_professors
GROUP BY item;
--- These results will change each time you run the query.
--- Smaller aggregations might be removed.
- /*----------+----------------------*
- | item | percentile_requested |
- +----------+----------------------+
- | pencil | 72.00011444091797 |
- | scissors | 8.000175476074219 |
- | pen | 23.001075744628906 |
- *----------+----------------------*/
-```
+-- These results will change each time you run the query.
+-- Smaller aggregations might be removed.
+/*----------+----------------------------------------------------------------------*
+ | item | quantiles_requested |
+ +----------+----------------------------------------------------------------------+
+ | pen | [6.409375,20.647684733072918,41.40625,67.30848524305556,99.80078125] |
+ | pencil | [6.849259,44.010416666666664,62.64204,65.83806818181819,98.59375] |
+ *----------+----------------------------------------------------------------------*/
+```
+
+[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
+
+[dp-clamp-between]: #dp_clamp_between
+
+### `ANON_STDDEV_POP` (DEPRECATED)
+
+
+Warning: This function has been deprecated. Use
+`STDDEV_POP` (differential privacy) instead.
+
+```sql
+WITH ANONYMIZATION ...
+ ANON_STDDEV_POP(expression [CLAMPED BETWEEN lower_bound AND upper_bound])
+```
+
+**Description**
+
+Takes an expression and computes the population (biased) standard deviation of
+the values in the expression. The final result is an aggregation across
+privacy unit columns between `0` and `+Inf`.
+
+This function must be used with the `ANONYMIZATION` clause and
+can support these arguments:
+
++ `expression`: The input expression. This can be most numeric input types,
+ such as `INT64`. `NULL`s are always ignored.
++ `CLAMPED BETWEEN` clause:
+ Perform [clamping][dp-clamp-between] per individual entity values.
+
+`NUMERIC` and `BIGNUMERIC` arguments are not allowed.
+ If you need them, cast them as the
+`DOUBLE` data type first.
+
+**Return type**
-The following differentially private query gets the percentile of items
-requested. Smaller aggregations might not be included. This query references a
-view called [`view_on_professors`][dp-example-views].
+`DOUBLE`
+
+**Examples**
+
+The following differentially private query gets the
+population (biased) standard deviation of items requested. Smaller aggregations
+might not be included. This query references a view called
+[`view_on_professors`][dp-example-views].
```sql
-- With noise, using the epsilon parameter.
SELECT
- WITH DIFFERENTIAL_PRIVACY
+ WITH ANONYMIZATION
OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
item,
- PERCENTILE_CONT(quantity, 0.5, contribution_bounds_per_row=>(0, 100)) percentile_requested
+ ANON_STDDEV_POP(quantity CLAMPED BETWEEN 0 AND 100) pop_standard_deviation
FROM {{USERNAME}}.view_on_professors
GROUP BY item;
-- These results will change each time you run the query.
-- Smaller aggregations might be removed.
-/*----------+----------------------*
- | item | percentile_requested |
- +----------+----------------------+
- | pencil | 72.00011444091797 |
- | scissors | 8.000175476074219 |
- | pen | 23.001075744628906 |
- *----------+----------------------*/
+/*----------+------------------------*
+ | item | pop_standard_deviation |
+ +----------+------------------------+
+ | pencil | 25.350871122442054 |
+ | scissors | 50 |
+ | pen | 2 |
+ *----------+------------------------*/
```
-[dp-example-tables]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_tables
-
-[dp-clamped-named]: #dp_clamped_named
+[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
-[dp-syntax]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_clause
+[dp-clamp-between]: #dp_clamp_between
-[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
+### `ANON_SUM` (DEPRECATED)
+
-### `SUM` (`DIFFERENTIAL_PRIVACY`)
-
+Warning: This function has been deprecated. Use
+`SUM` (differential privacy) instead.
```sql
-WITH DIFFERENTIAL_PRIVACY ...
- SUM(
- expression,
- [ contribution_bounds_per_group => (lower_bound, upper_bound) ]
- )
+WITH ANONYMIZATION ...
+ ANON_SUM(expression [CLAMPED BETWEEN lower_bound AND upper_bound])
```
**Description**
@@ -14192,14 +16287,13 @@ WITH DIFFERENTIAL_PRIVACY ...
Returns the sum of non-`NULL`, non-`NaN` values in the expression. The final
result is an aggregation across privacy unit columns.
-This function must be used with the [`DIFFERENTIAL_PRIVACY` clause][dp-syntax]
-and can support these arguments:
+This function must be used with the `ANONYMIZATION` clause and
+can support these arguments:
+ `expression`: The input expression. This can be any numeric input type,
- such as `INT64`. `NULL` values are always ignored.
-+ `contribution_bounds_per_group`: A named argument with a
- [contribution bound][dp-clamped-named]. Performs clamping for each group
- separately before performing intermediate grouping on the privacy unit column.
+ such as `INT64`.
++ `CLAMPED BETWEEN` clause:
+ Perform [clamping][dp-clamp-between] per privacy unit column.
**Return type**
@@ -14213,16 +16307,16 @@ One of the following [supertypes][dp-supertype]:
The following differentially private query gets the sum of items requested.
Smaller aggregations might not be included. This query references a view called
-[`professors`][dp-example-tables].
+[`view_on_professors`][dp-example-views].
```sql
-- With noise, using the epsilon parameter.
SELECT
- WITH DIFFERENTIAL_PRIVACY
- OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1, privacy_unit_column=id)
+ WITH ANONYMIZATION
+ OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
item,
- SUM(quantity, contribution_bounds_per_group => (0,100)) quantity
-FROM professors
+ ANON_SUM(quantity CLAMPED BETWEEN 0 AND 100) quantity
+FROM {{USERNAME}}.view_on_professors
GROUP BY item;
-- These results will change each time you run the query.
@@ -14239,11 +16333,11 @@ GROUP BY item;
-- Without noise, using the epsilon parameter.
-- (this un-noised version is for demonstration only)
SELECT
- WITH DIFFERENTIAL_PRIVACY
- OPTIONS(epsilon=1e20, delta=.01, max_groups_contributed=1, privacy_unit_column=id)
+ WITH ANONYMIZATION
+ OPTIONS(epsilon=1e20, delta=.01, max_groups_contributed=1)
item,
- SUM(quantity) quantity
-FROM professors
+ ANON_SUM(quantity) quantity
+FROM {{USERNAME}}.view_on_professors
GROUP BY item;
-- These results will not change when you run the query.
@@ -14256,1059 +16350,1125 @@ GROUP BY item;
*----------+----------*/
```
-The following differentially private query gets the sum of items requested.
-Smaller aggregations might not be included. This query references a view called
+Note: You can learn more about when and when not to use
+noise [here][dp-noise].
+
+[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
+
+[dp-noise]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#eliminate_noise
+
+[dp-supertype]: https://github.com/google/zetasql/blob/master/docs/conversion_rules.md#supertypes
+
+[dp-clamp-between]: #dp_clamp_between
+
+### `ANON_VAR_POP` (DEPRECATED)
+
+
+Warning: This function has been deprecated. Use
+`VAR_POP` (differential privacy) instead.
+
+```sql
+WITH ANONYMIZATION ...
+ ANON_VAR_POP(expression [CLAMPED BETWEEN lower_bound AND upper_bound])
+```
+
+**Description**
+
+Takes an expression and computes the population (biased) variance of the values
+in the expression. The final result is an aggregation across
+privacy unit columns between `0` and `+Inf`. You can
+[clamp the input values][dp-clamp-explicit] explicitly, otherwise input values
+are clamped implicitly. Clamping is performed per individual entity values.
+
+This function must be used with the `ANONYMIZATION` clause and
+can support these arguments:
+
++ `expression`: The input expression. This can be any numeric input type,
+ such as `INT64`. `NULL`s are always ignored.
++ `CLAMPED BETWEEN` clause:
+ Perform [clamping][dp-clamp-between] per individual entity values.
+
+`NUMERIC` and `BIGNUMERIC` arguments are not allowed.
+ If you need them, cast them as the
+`DOUBLE` data type first.
+
+**Return type**
+
+`DOUBLE`
+
+**Examples**
+
+The following differentially private query gets the
+population (biased) variance of items requested. Smaller aggregations might not
+be included. This query references a view called
[`view_on_professors`][dp-example-views].
```sql
-- With noise, using the epsilon parameter.
SELECT
- WITH DIFFERENTIAL_PRIVACY
+ WITH ANONYMIZATION
OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
item,
- SUM(quantity, contribution_bounds_per_group=>(0, 100)) quantity
+ ANON_VAR_POP(quantity CLAMPED BETWEEN 0 AND 100) pop_variance
FROM {{USERNAME}}.view_on_professors
GROUP BY item;
-- These results will change each time you run the query.
-- Smaller aggregations might be removed.
-/*----------+-----------*
- | item | quantity |
- +----------+-----------+
- | pencil | 143 |
- | pen | 59 |
- *----------+-----------*/
+/*----------+-----------------*
+ | item | pop_variance |
+ +----------+-----------------+
+ | pencil | 642 |
+ | pen | 2.6666666666665 |
+ | scissors | 2500 |
+ *----------+-----------------*/
```
+[dp-clamp-explicit]: #dp_explicit_clamping
+
+[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
+
+[dp-clamp-between]: #dp_clamp_between
+
+### Supplemental materials
+
+### Clamp values in a differentially private aggregate function
+
+
+In [differentially private queries][dp-syntax],
+aggregation clamping is used to limit the contribution of outliers. You can
+clamp explicitly or implicitly as follows:
+
++ [Clamp explicitly in the `DIFFERENTIAL_PRIVACY` clause][dp-clamped-named].
++ [Clamp implicitly in the `DIFFERENTIAL_PRIVACY` clause][dp-clamped-named-imp].
+
+#### Implicitly clamp values
+
+
+If you don't include the contribution bounds named argument with the
+`DIFFERENTIAL_PRIVACY` clause, clamping is implicit, which
+means bounds are derived from the data itself in a differentially private way.
+
+Implicit bounding works best when computed using large datasets. For more
+information, see
+[Implicit bounding limitations for small datasets][implicit-limits].
+
+**Details**
+
+In differentially private aggregate functions, explicit clamping is optional.
+If you don't include this clause, clamping is implicit,
+which means bounds are derived from the data itself in a differentially
+private way. The process is somewhat random, so aggregations with identical
+ranges can have different bounds.
+
+Implicit bounds are determined for each aggregation. So if some
+aggregations have a wide range of values, and others have a narrow range of
+values, implicit bounding can identify different bounds for different
+aggregations as appropriate. Implicit bounds might be an advantage or a
+disadvantage depending on your use case. Different bounds for different
+aggregations can result in lower error. Different bounds also means that
+different aggregations have different levels of uncertainty, which might not be
+directly comparable. [Explicit bounds][dp-clamped-named], on the other hand,
+apply uniformly to all aggregations and should be derived from public
+information.
+
+When clamping is implicit, part of the total epsilon is spent picking bounds.
+This leaves less epsilon for aggregations, so these aggregations are noisier.
+
+**Example**
+
+The following anonymized query clamps each aggregate contribution for each
+differential privacy ID and within a derived range from the data itself.
+As long as all or most values fall within this range, your results
+will be accurate. This query references a view called
+[`view_on_professors`][dp-example-views].
+
```sql
--- Without noise, using the epsilon parameter.
--- (this un-noised version is for demonstration only)
-SELECT
- WITH DIFFERENTIAL_PRIVACY
- OPTIONS(epsilon=1e20, delta=.01, max_groups_contributed=1)
- item,
- SUM(quantity) quantity
-FROM {{USERNAME}}.view_on_professors
+--Without noise (this un-noised version is for demonstration only)
+SELECT WITH DIFFERENTIAL_PRIVACY
+ OPTIONS (
+ epsilon = 1e20,
+ delta = .01,
+ privacy_unit_column=id
+ )
+ item,
+ AVG(quantity) average_quantity
+FROM view_on_professors
GROUP BY item;
--- These results will not change when you run the query.
-/*----------+----------*
- | item | quantity |
- +----------+----------+
- | scissors | 8 |
- | pencil | 144 |
- | pen | 58 |
- *----------+----------*/
+/*----------+------------------*
+ | item | average_quantity |
+ +----------+------------------+
+ | scissors | 8 |
+ | pencil | 72 |
+ | pen | 18.5 |
+ *----------+------------------*/
+```
+
+The following anonymized query clamps each aggregate contribution for each
+differential privacy ID and within a derived range from the data itself.
+As long as all or most values fall within this range, your results
+will be accurate. This query references a view called
+[`view_on_professors`][dp-example-views].
+
+```sql
+--Without noise (this un-noised version is for demonstration only)
+SELECT WITH DIFFERENTIAL_PRIVACY
+ OPTIONS (
+ epsilon = 1e20,
+ delta = .01,
+ max_groups_contributed = 1
+ )
+ item,
+ AVG(quantity) AS average_quantity
+FROM view_on_professors
+GROUP BY item;
+
+/*----------+------------------*
+ | item | average_quantity |
+ +----------+------------------+
+ | scissors | 8 |
+ | pencil | 72 |
+ | pen | 18.5 |
+ *----------+------------------*/
+```
+
+#### Explicitly clamp values
+
+
+```sql
+contribution_bounds_per_group => (lower_bound,upper_bound)
+```
+
+```sql
+contribution_bounds_per_row => (lower_bound,upper_bound)
+```
+
+Use the contribution bounds named argument to explicitly clamp
+values per group or per row between a lower and upper bound in a
+`DIFFERENTIAL_PRIVACY` clause.
+
+Input values:
+
++ `contribution_bounds_per_row`: Contributions per privacy unit are clamped
+ on a per-row (per-record) basis. This means the following:
+ + Upper and lower bounds are applied to column values in individual
+ rows produced by the input subquery independently.
+ + The maximum possible contribution per privacy unit (and per grouping set)
+ is the product of the per-row contribution limit and `max_groups_contributed`
+ differential privacy parameter.
++ `contribution_bounds_per_group`: Contributions per privacy unit are clamped
+ on a unique set of entity-specified `GROUP BY` keys. The upper and lower
+ bounds are applied to values per group after the values are aggregated per
+ privacy unit.
++ `lower_bound`: Numeric literal that represents the smallest value to
+ include in an aggregation.
++ `upper_bound`: Numeric literal that represents the largest value to
+ include in an aggregation.
+
+`NUMERIC` and `BIGNUMERIC` arguments are not allowed.
+
+**Details**
+
+In differentially private aggregate functions, clamping explicitly clamps the
+total contribution from each privacy unit column to within a specified
+range.
+
+Explicit bounds are uniformly applied to all aggregations. So even if some
+aggregations have a wide range of values, and others have a narrow range of
+values, the same bounds are applied to all of them. On the other hand, when
+[implicit bounds][dp-clamped-named-imp] are inferred from the data, the bounds
+applied to each aggregation can be different.
+
+Explicit bounds should be chosen to reflect public information.
+For example, bounding ages between 0 and 100 reflects public information
+because the age of most people generally falls within this range.
+
+Important: The results of the query reveal the explicit bounds. Do not use
+explicit bounds based on the entity data; explicit bounds should be based on
+public information.
+
+**Examples**
+
+The following anonymized query clamps each aggregate contribution for each
+differential privacy ID and within a specified range (`0` and `100`).
+As long as all or most values fall within this range, your results
+will be accurate. This query references a view called
+[`view_on_professors`][dp-example-views].
+
+```sql
+--Without noise (this un-noised version is for demonstration only)
+SELECT WITH DIFFERENTIAL_PRIVACY
+ OPTIONS (
+ epsilon = 1e20,
+ delta = .01,
+ privacy_unit_column=id
+ )
+ item,
+ AVG(quantity, contribution_bounds_per_group=>(0,100)) AS average_quantity
+FROM view_on_professors
+GROUP BY item;
+
+/*----------+------------------*
+ | item | average_quantity |
+ +----------+------------------+
+ | scissors | 8 |
+ | pencil | 40 |
+ | pen | 18.5 |
+ *----------+------------------*/
+```
+
+Notice what happens when most or all values fall outside of the clamped range.
+To get accurate results, ensure that the difference between the upper and lower
+bound is as small as possible, and that most inputs are between the upper and
+lower bound.
+
+```sql {.bad}
+--Without noise (this un-noised version is for demonstration only)
+SELECT WITH DIFFERENTIAL_PRIVACY
+ OPTIONS (
+ epsilon = 1e20,
+ delta = .01,
+ privacy_unit_column=id
+ )
+ item,
+ AVG(quantity, contribution_bounds_per_group=>(50,100)) AS average_quantity
+FROM view_on_professors
+GROUP BY item;
+
+/*----------+------------------*
+ | item | average_quantity |
+ +----------+------------------+
+ | scissors | 54 |
+ | pencil | 58 |
+ | pen | 51 |
+ *----------+------------------*/
+```
+
+The following differentially private query clamps each aggregate contribution
+for each privacy unit column and within a specified range (`0` and `100`).
+As long as all or most values fall within this range, your results will be
+accurate. This query references a view called
+[`view_on_professors`][dp-example-views].
+
+```sql
+--Without noise (this un-noised version is for demonstration only)
+SELECT WITH DIFFERENTIAL_PRIVACY
+ OPTIONS (
+ epsilon = 1e20,
+ delta = .01,
+ max_groups_contributed = 1
+ )
+ item,
+ AVG(quantity, contribution_bounds_per_group=>(0,100)) AS average_quantity
+FROM view_on_professors
+GROUP BY item;
+
+/*----------+------------------*
+ | item | average_quantity |
+ +----------+------------------+
+ | scissors | 8 |
+ | pencil | 40 |
+ | pen | 18.5 |
+ *----------+------------------*/
+```
+
+Notice what happens when most or all values fall outside of the clamped range.
+To get accurate results, ensure that the difference between the upper and lower
+bound is as small as possible, and that most inputs are between the upper and
+lower bound.
+
+```sql {.bad}
+--Without noise (this un-noised version is for demonstration only)
+SELECT WITH DIFFERENTIAL_PRIVACY
+ OPTIONS (
+ epsilon = 1e20,
+ delta = .01,
+ max_groups_contributed = 1
+ )
+ item,
+ AVG(quantity, contribution_bounds_per_group=>(50,100)) AS average_quantity
+FROM view_on_professors
+GROUP BY item;
+
+/*----------+------------------*
+ | item | average_quantity |
+ +----------+------------------+
+ | scissors | 54 |
+ | pencil | 58 |
+ | pen | 51 |
+ *----------+------------------*/
```
Note: For more information about when and when not to use
-noise, see [Use differential privacy][dp-noise].
+noise, see [Remove noise][dp-noise].
-[dp-example-tables]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_tables
+[dp-syntax]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_clause
+
+[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
[dp-noise]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#eliminate_noise
-[dp-supertype]: https://github.com/google/zetasql/blob/master/docs/conversion_rules.md#supertypes
+[implicit-limits]: https://github.com/google/zetasql/blob/master/docs/differential-privacy.md#implicit_limits
[dp-clamped-named]: #dp_clamped_named
+[dp-clamped-named-imp]: #dp_clamped_named_implicit
+
+[dp-guide]: https://github.com/google/zetasql/blob/master/docs/differential-privacy.md
+
[dp-syntax]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_clause
-[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
+[agg-function-calls]: https://github.com/google/zetasql/blob/master/docs/aggregate-function-calls.md
-### `VAR_POP` (`DIFFERENTIAL_PRIVACY`)
-
+## GQL functions
-```sql
-WITH DIFFERENTIAL_PRIVACY ...
- VAR_POP(
- expression,
- [ contribution_bounds_per_row => (lower_bound, upper_bound) ]
- )
-```
+ZetaSQL supports the following GQL functions:
+
+### Function list
+
+
+
+
+ Name |
+ Summary |
+
+
+
+
+
+
+
+ DESTINATION_NODE_ID
+
+
+ |
+ Gets a unique identifier of a graph edge's destination node. |
+
+
+
+
+
+ EDGES
+
+
+ |
+
+ Gets the edges in a graph path. The resulting array retains the
+ original order in the graph path.
+ |
+
+
+
+
+
+ ELEMENT_ID
+
+
+ |
+ Gets a graph element's unique identifier. |
+
+
+
+
+
+ IS_ACYCLIC
-**Description**
+
+ |
+ Checks if a graph path has a repeating node. |
+
-Takes an expression and computes the population (biased) variance of the values
-in the expression. The final result is an aggregation across
-privacy unit columns between `0` and `+Inf`. You can
-[clamp the input values][dp-clamp-explicit] explicitly, otherwise input values
-are clamped implicitly. Clamping is performed per individual user values.
+
+
+
+ IS_TRAIL
-This function must be used with the `DIFFERENTIAL_PRIVACY` clause and
-can support these arguments:
+
+ |
+ Checks if a graph path has a repeating edge. |
+
-+ `expression`: The input expression. This can be any numeric input type,
- such as `INT64`. `NULL`s are always ignored.
-+ `contribution_bounds_per_row`: A named argument with a
- [contribution bound][dp-clamped-named].
- Performs clamping for each row separately before performing intermediate
- grouping on individual user values.
+
+
+
+ LABELS
-`NUMERIC` and `BIGNUMERIC` arguments are not allowed.
- If you need them, cast them as the
-`DOUBLE` data type first.
+
+ |
+ Gets the labels associated with a graph element. |
+
-**Return type**
+
+
+
+ NODES
-`DOUBLE`
+
+ |
+
+ Gets the nodes in a graph path. The resulting array retains the
+ original order in the graph path.
+ |
+
-**Examples**
+
+
+
+ PATH
-The following differentially private query gets the
-population (biased) variance of items requested. Smaller aggregations may not
-be included. This query references a view called
-[`professors`][dp-example-tables].
+
+ |
+ Creates a graph path from a list of graph elements. |
+
-```sql
--- With noise
-SELECT
- WITH DIFFERENTIAL_PRIVACY
- OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1, privacy_unit_column=id)
- item,
- VAR_POP(quantity, contribution_bounds_per_row => (0,100)) pop_variance
-FROM professors
-GROUP BY item;
+
+
+
+ PATH_FIRST
--- These results will change each time you run the query.
--- Smaller aggregations may be removed.
-/*----------+-----------------*
- | item | pop_variance |
- +----------+-----------------+
- | pencil | 642 |
- | pen | 2.6666666666665 |
- | scissors | 2500 |
- *----------+-----------------*/
-```
+
+ |
+ Gets the first node in a graph path. |
+
-The following differentially private query gets the
-population (biased) variance of items requested. Smaller aggregations might not
-be included. This query references a view called
-[`view_on_professors`][dp-example-views].
+
+
+
+ PATH_LAST
-```sql
--- With noise
-SELECT
- WITH DIFFERENTIAL_PRIVACY
- OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
- item,
- VAR_POP(quantity, contribution_bounds_per_row=>(0, 100)) pop_variance
-FROM {{USERNAME}}.view_on_professors
-GROUP BY item;
+
+ |
+ Gets the last node in a graph path. |
+
--- These results will change each time you run the query.
--- Smaller aggregations might be removed.
-/*----------+-----------------*
- | item | pop_variance |
- +----------+-----------------+
- | pencil | 642 |
- | pen | 2.6666666666665 |
- | scissors | 2500 |
- *----------+-----------------*/
-```
+
+
+
+ PATH_LENGTH
-[dp-clamp-explicit]: #dp_explicit_clamping
+
+ |
+ Gets the number of edges in a graph path. |
+
-[dp-example-tables]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_tables
+
+
+
+ PROPERTY_NAMES
-[dp-clamped-named]: #dp_clamped_named
+
+ |
+ Gets the property names associated with a graph element. |
+
-[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
+
+
+
+ SOURCE_NODE_ID
-### `ANON_AVG` (DEPRECATED)
-
+
+ |
+ Gets a unique identifier of a graph edge's source node. |
+
-Warning: This function has been deprecated. Use
-`AVG` (differential privacy) instead.
+
+
-```sql
-WITH ANONYMIZATION ...
- ANON_AVG(expression [clamped_between_clause])
+### `DESTINATION_NODE_ID`
-clamped_between_clause:
- CLAMPED BETWEEN lower_bound AND upper_bound
+```sql
+DESTINATION_NODE_ID(edge_element)
```
**Description**
-Returns the average of non-`NULL`, non-`NaN` values in the expression.
-This function first computes the average per privacy unit column, and then
-computes the final result by averaging these averages.
+Gets a unique identifier of a graph edge's destination node. The unique identifier is only valid for the scope of the query where it is obtained.
-This function must be used with the `ANONYMIZATION` clause and
-can support these arguments:
+**Arguments**
-+ `expression`: The input expression. This can be any numeric input type,
- such as `INT64`.
-+ `clamped_between_clause`: Perform [clamping][dp-clamp-between] per
- privacy unit column averages.
++ `edge_element`: A `GRAPH_ELEMENT` value that represents an edge.
+
+**Details**
+
+Returns `NULL` if `edge_element` is `NULL`.
**Return type**
-`DOUBLE`
+`STRING`
**Examples**
-The following differentially private query gets the average number of each item
-requested per professor. Smaller aggregations might not be included. This query
-references a view called [`view_on_professors`][dp-example-views].
-
```sql
--- With noise, using the epsilon parameter.
-SELECT
- WITH ANONYMIZATION
- OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
- item,
- ANON_AVG(quantity CLAMPED BETWEEN 0 AND 100) average_quantity
-FROM {{USERNAME}}.view_on_professors
-GROUP BY item;
+GRAPH FinGraph
+MATCH (:Person)-[o:Owns]->(a:Account)
+RETURN a.id AS account_id, DESTINATION_NODE_ID(o) AS destination_node_id
--- These results will change each time you run the query.
--- Smaller aggregations might be removed.
-/*----------+------------------*
- | item | average_quantity |
- +----------+------------------+
- | pencil | 38.5038356810269 |
- | pen | 13.4725028762032 |
- *----------+------------------*/
+/*------------------------------------------+
+ |account_id | destination_node_id |
+ +-----------|------------------------------+
+ | 7 | mUZpbkdyYXBoLkFjY291bnQAeJEO |
+ | 16 | mUZpbkdyYXBoLkFjY291bnQAeJEg |
+ | 20 | mUZpbkdyYXBoLkFjY291bnQAeJEo |
+ +------------------------------------------*/
```
-```sql
--- Without noise, using the epsilon parameter.
--- (this un-noised version is for demonstration only)
-SELECT
- WITH ANONYMIZATION
- OPTIONS(epsilon=1e20, delta=.01, max_groups_contributed=1)
- item,
- ANON_AVG(quantity) average_quantity
-FROM {{USERNAME}}.view_on_professors
-GROUP BY item;
+Note that the actual identifiers obtained may be different from what's shown above.
--- These results will not change when you run the query.
-/*----------+------------------*
- | item | average_quantity |
- +----------+------------------+
- | scissors | 8 |
- | pencil | 40 |
- | pen | 18.5 |
- *----------+------------------*/
+### `EDGES`
+
+```sql
+EDGES(graph_path)
```
-Note: You can learn more about when and when not to use
-noise [here][dp-noise].
+**Description**
-[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
+Gets the edges in a graph path. The resulting array retains the
+original order in the graph path.
-[dp-noise]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#eliminate_noise
+**Definitions**
-[dp-clamp-between]: #dp_clamp_between
++ `graph_path`: A `GRAPH_PATH` value that represents a graph path.
-### `ANON_COUNT` (DEPRECATED)
-
+**Details**
-Warning: This function has been deprecated. Use
-`COUNT` (differential privacy) instead.
+If `graph_path` is `NULL`, returns `NULL`.
-+ [Signature 1](#anon_count_signature1)
-+ [Signature 2](#anon_count_signature2)
+**Return type**
-#### Signature 1
-
+`ARRAY`
+
+**Examples**
```sql
-WITH ANONYMIZATION ...
- ANON_COUNT(*)
+GRAPH FinGraph
+MATCH p=(src:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(dst:Account)
+LET es = EDGES(p)
+RETURN
+ LABELS(es[0]) AS labels,
+ es[0].to_id AS to_account;
+
+/*----------------------------*
+ | labels | to_account |
+ +----------------------------+
+ | ["Transfers"] | 7 |
+ | ["Transfers"] | 7 |
+ | ["Transfers"] | 16 |
+ | ["Transfers"] | 16 |
+ | ["Transfers"] | 16 |
+ | ["Transfers"] | 20 |
+ | ["Transfers"] | 20 |
+ *----------------------------/*
+```
+
+### `ELEMENT_ID`
+
+```sql
+ELEMENT_ID(element)
```
**Description**
-Returns the number of rows in the
-[differentially private][dp-from-clause] `FROM` clause. The final result
-is an aggregation across privacy unit columns.
-[Input values are clamped implicitly][dp-clamp-implicit]. Clamping is
-performed per privacy unit column.
+Gets a graph element's unique identifier. The unique identifier is only valid for the scope of the query where it is obtained.
-This function must be used with the `ANONYMIZATION` clause.
+**Arguments**
-**Return type**
++ `element`: A `GRAPH_ELEMENT` value.
-`INT64`
+**Details**
-**Examples**
+Returns `NULL` if `element` is `NULL`.
-The following differentially private query counts the number of requests for
-each item. This query references a view called
-[`view_on_professors`][dp-example-views].
+**Return type**
-```sql
--- With noise, using the epsilon parameter.
-SELECT
- WITH ANONYMIZATION
- OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
- item,
- ANON_COUNT(*) times_requested
-FROM {{USERNAME}}.view_on_professors
-GROUP BY item;
+`STRING`
--- These results will change each time you run the query.
--- Smaller aggregations might be removed.
-/*----------+-----------------*
- | item | times_requested |
- +----------+-----------------+
- | pencil | 5 |
- | pen | 2 |
- *----------+-----------------*/
-```
+**Examples**
```sql
--- Without noise, using the epsilon parameter.
--- (this un-noised version is for demonstration only)
-SELECT
- WITH ANONYMIZATION
- OPTIONS(epsilon=1e20, delta=.01, max_groups_contributed=1)
- item,
- ANON_COUNT(*) times_requested
-FROM {{USERNAME}}.view_on_professors
-GROUP BY item;
+GRAPH FinGraph
+MATCH (p:Person)-[o:Owns]->(:Account)
+RETURN p.name AS name, ELEMENT_ID(p) AS node_element_id, ELEMENT_ID(o) AS edge_element_id
--- These results will not change when you run the query.
-/*----------+-----------------*
- | item | times_requested |
- +----------+-----------------+
- | scissors | 1 |
- | pencil | 4 |
- | pen | 3 |
- *----------+-----------------*/
+/*--------------------------------------------------------------------------------------------------------------------------------------------+
+ | name | node_element_id | edge_element_id . |
+ +------|------------------------------|------------------------------------------------------------------------------------------------------+
+ | Alex | mUZpbkdyYXBoLlBlcnNvbgB4kQI= | mUZpbkdyYXBoLlBlcnNvbk93bkFjY291bnQAeJECkQ6ZRmluR3JhcGguUGVyc29uAHiRAplGaW5HcmFwaC5BY2NvdW50AHiRDg== |
+ | Dana | mUZpbkdyYXBoLlBlcnNvbgB4kQQ= | mUZpbkdyYXBoLlBlcnNvbk93bkFjY291bnQAeJEGkSCZRmluR3JhcGguUGVyc29uAHiRBplGaW5HcmFwaC5BY2NvdW50AHiRIA== |
+ | Lee | mUZpbkdyYXBoLlBlcnNvbgB4kQY= | mUZpbkdyYXBoLlBlcnNvbk93bkFjY291bnQAeJEEkSiZRmluR3JhcGguUGVyc29uAHiRBJlGaW5HcmFwaC5BY2NvdW50AHiRKA== |
+ +--------------------------------------------------------------------------------------------------------------------------------------------*/
```
-Note: You can learn more about when and when not to use
-noise [here][dp-noise].
+Note that the actual identifiers obtained may be different from what's shown above.
-#### Signature 2
-
+### `IS_ACYCLIC`
```sql
-WITH ANONYMIZATION ...
- ANON_COUNT(expression [CLAMPED BETWEEN lower_bound AND upper_bound])
+IS_ACYCLIC(graph_path)
```
**Description**
-Returns the number of non-`NULL` expression values. The final result is an
-aggregation across privacy unit columns.
+Checks if a graph path has a repeating node. Returns `TRUE` if a repetition is
+found, otherwise returns `FALSE`.
-This function must be used with the `ANONYMIZATION` clause and
-can support these arguments:
+**Definitions**
-+ `expression`: The input expression. This can be any numeric input type,
- such as `INT64`.
-+ `CLAMPED BETWEEN` clause:
- Perform [clamping][dp-clamp-between] per privacy unit column.
++ `graph_path`: A `GRAPH_PATH` value that represents a graph path.
-**Return type**
+**Details**
-`INT64`
+Two nodes are considered equal if they compare as equal.
-**Examples**
+Returns `NULL` if `graph_path` is `NULL`.
-The following differentially private query counts the number of requests made
-for each type of item. This query references a view called
-[`view_on_professors`][dp-example-views].
+**Return type**
-```sql
--- With noise
-SELECT
- WITH ANONYMIZATION
- OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
- item,
- ANON_COUNT(item CLAMPED BETWEEN 0 AND 100) times_requested
-FROM {{USERNAME}}.view_on_professors
-GROUP BY item;
+`BOOL`
--- These results will change each time you run the query.
--- Smaller aggregations might be removed.
-/*----------+-----------------*
- | item | times_requested |
- +----------+-----------------+
- | pencil | 5 |
- | pen | 2 |
- *----------+-----------------*/
-```
+**Examples**
```sql
---Without noise (this un-noised version is for demonstration only)
-SELECT
- WITH ANONYMIZATION
- OPTIONS(epsilon=1e20, delta=.01, max_groups_contributed=1)
- item,
- ANON_COUNT(item CLAMPED BETWEEN 0 AND 100) times_requested
-FROM {{USERNAME}}.view_on_professors
-GROUP BY item;
+GRAPH FinGraph
+MATCH p=(src:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(dst:Account)
+RETURN src.id AS source_account_id, IS_ACYCLIC(p) AS is_acyclic_path
--- These results will not change when you run the query.
-/*----------+-----------------*
- | item | times_requested |
- +----------+-----------------+
- | scissors | 1 |
- | pencil | 4 |
- | pen | 3 |
- *----------+-----------------*/
+/*-------------------------------------*
+ | source_account_id | is_acyclic_path |
+ +-------------------------------------+
+ | 16 | TRUE |
+ | 20 | TRUE |
+ | 20 | TRUE |
+ | 16 | FALSE |
+ | 7 | TRUE |
+ | 7 | TRUE |
+ | 20 | FALSE |
+ *-------------------------------------*/
```
-Note: You can learn more about when and when not to use
-noise [here][dp-noise].
-
-[dp-clamp-implicit]: #dp_implicit_clamping
-
-[dp-from-clause]: https://github.com/google/zetasql/blob/master/docs/differential-privacy.md#dp_from_rules
-
-[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
-
-[dp-noise]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#eliminate_noise
-
-[dp-clamp-between]: #dp_clamp_between
-
-### `ANON_PERCENTILE_CONT` (DEPRECATED)
-
-
-Warning: This function has been deprecated. Use
-`PERCENTILE_CONT` (differential privacy) instead.
+### `IS_TRAIL`
```sql
-WITH ANONYMIZATION ...
- ANON_PERCENTILE_CONT(expression, percentile [CLAMPED BETWEEN lower_bound AND upper_bound])
+IS_TRAIL(graph_path)
```
**Description**
-Takes an expression and computes a percentile for it. The final result is an
-aggregation across privacy unit columns.
+Checks if a graph path has a repeating edge. Returns `TRUE` if a repetition is
+found, otherwise returns `FALSE`.
-This function must be used with the `ANONYMIZATION` clause and
-can support these arguments:
+**Definitions**
-+ `expression`: The input expression. This can be most numeric input types,
- such as `INT64`. `NULL`s are always ignored.
-+ `percentile`: The percentile to compute. The percentile must be a literal in
- the range [0, 1]
-+ `CLAMPED BETWEEN` clause:
- Perform [clamping][dp-clamp-between] per privacy unit column.
++ `graph_path`: A `GRAPH_PATH` value that represents a graph path.
-`NUMERIC` and `BIGNUMERIC` arguments are not allowed.
- If you need them, cast them as the
-`DOUBLE` data type first.
+**Details**
+
+Returns `NULL` if `graph_path` is `NULL`.
**Return type**
-`DOUBLE`
+`BOOL`
**Examples**
-The following differentially private query gets the percentile of items
-requested. Smaller aggregations might not be included. This query references a
-view called [`view_on_professors`][dp-example-views].
-
```sql
--- With noise, using the epsilon parameter.
-SELECT
- WITH ANONYMIZATION
- OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
- item,
- ANON_PERCENTILE_CONT(quantity, 0.5 CLAMPED BETWEEN 0 AND 100) percentile_requested
-FROM {{USERNAME}}.view_on_professors
-GROUP BY item;
+GRAPH FinGraph
+MATCH
+ p=(a1:Account)-[t1:Transfers]->(a2:Account)-[t2:Transfers]->
+ (a3:Account)-[t3:Transfers]->(a4:Account)
+WHERE a1.id < a4.id
+RETURN
+ IS_TRAIL(p) AS is_trail_path, t1.id as t1_id, t2.id as t2_id, t3.id as t3_id
--- These results will change each time you run the query.
--- Smaller aggregations might be removed.
-/*----------+----------------------*
- | item | percentile_requested |
- +----------+----------------------+
- | pencil | 72.00011444091797 |
- | scissors | 8.000175476074219 |
- | pen | 23.001075744628906 |
- *----------+----------------------*/
+/*---------------+-------+-------+-------+
+ | is_trail_path | t1_id | t2_id | t3_id |
+ +---------------+-------+-------+-------+
+ | FALSE | 16 | 20 | 16 |
+ | TRUE | 7 | 16 | 20 |
+ | TRUE | 7 | 16 | 20 |
+ +---------------+-------+-------+-------*/
```
-[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
-
-[dp-clamp-between]: #dp_clamp_between
-
-### `ANON_QUANTILES` (DEPRECATED)
-
-
-Warning: This function has been deprecated. Use
-`QUANTILES` (differential privacy) instead.
+### `LABELS`
```sql
-WITH ANONYMIZATION ...
- ANON_QUANTILES(expression, number CLAMPED BETWEEN lower_bound AND upper_bound)
+LABELS(element)
```
**Description**
-Returns an array of differentially private quantile boundaries for values in
-`expression`. The first element in the return value is the
-minimum quantile boundary and the last element is the maximum quantile boundary.
-The returned results are aggregations across privacy unit columns.
+Gets the labels associated with a graph element and preserves the original case
+of each label.
-This function must be used with the `ANONYMIZATION` clause and
-can support these arguments:
+**Arguments**
-+ `expression`: The input expression. This can be most numeric input types,
- such as `INT64`. `NULL`s are always ignored.
-+ `number`: The number of quantiles to create. This must be an `INT64`.
-+ `CLAMPED BETWEEN` clause:
- Perform [clamping][dp-clamp-between] per privacy unit column.
++ `element`: A `GRAPH_ELEMENT` value that represents the graph element to
+ extract labels from.
-`NUMERIC` and `BIGNUMERIC` arguments are not allowed.
- If you need them, cast them as the
-`DOUBLE` data type first.
+**Details**
+
+Returns `NULL` if `element` is `NULL`.
**Return type**
-`ARRAY`<`DOUBLE`>
+`ARRAY`
**Examples**
-The following differentially private query gets the five quantile boundaries of
-the four quartiles of the number of items requested. Smaller aggregations
-might not be included. This query references a view called
-[`view_on_professors`][dp-example-views].
-
```sql
--- With noise, using the epsilon parameter.
-SELECT
- WITH ANONYMIZATION
- OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
- item,
- ANON_QUANTILES(quantity, 4 CLAMPED BETWEEN 0 AND 100) quantiles_requested
-FROM {{USERNAME}}.view_on_professors
-GROUP BY item;
+GRAPH FinGraph
+MATCH (n:Person|Account)
+RETURN LABELS(n) AS label, n.id
--- These results will change each time you run the query.
--- Smaller aggregations might be removed.
-/*----------+----------------------------------------------------------------------*
- | item | quantiles_requested |
- +----------+----------------------------------------------------------------------+
- | pen | [6.409375,20.647684733072918,41.40625,67.30848524305556,99.80078125] |
- | pencil | [6.849259,44.010416666666664,62.64204,65.83806818181819,98.59375] |
- *----------+----------------------------------------------------------------------*/
+/*----------------+
+ | label | id |
+ +----------------+
+ | [Account] | 7 |
+ | [Account] | 16 |
+ | [Account] | 20 |
+ | [Person] | 1 |
+ | [Person] | 2 |
+ | [Person] | 3 |
+ +----------------*/
```
-[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
-
-[dp-clamp-between]: #dp_clamp_between
-
-### `ANON_STDDEV_POP` (DEPRECATED)
-
-
-Warning: This function has been deprecated. Use
-`STDDEV_POP` (differential privacy) instead.
+### `NODES`
```sql
-WITH ANONYMIZATION ...
- ANON_STDDEV_POP(expression [CLAMPED BETWEEN lower_bound AND upper_bound])
+NODES(graph_path)
```
**Description**
-Takes an expression and computes the population (biased) standard deviation of
-the values in the expression. The final result is an aggregation across
-privacy unit columns between `0` and `+Inf`.
+Gets the nodes in a graph path. The resulting array retains the
+original order in the graph path.
-This function must be used with the `ANONYMIZATION` clause and
-can support these arguments:
+**Definitions**
-+ `expression`: The input expression. This can be most numeric input types,
- such as `INT64`. `NULL`s are always ignored.
-+ `CLAMPED BETWEEN` clause:
- Perform [clamping][dp-clamp-between] per individual entity values.
++ `graph_path`: A `GRAPH_PATH` value that represents a graph path.
-`NUMERIC` and `BIGNUMERIC` arguments are not allowed.
- If you need them, cast them as the
-`DOUBLE` data type first.
+**Details**
+
+Returns `NULL` if `graph_path` is `NULL`.
**Return type**
-`DOUBLE`
+`ARRAY`
**Examples**
-The following differentially private query gets the
-population (biased) standard deviation of items requested. Smaller aggregations
-might not be included. This query references a view called
-[`view_on_professors`][dp-example-views].
-
```sql
--- With noise, using the epsilon parameter.
-SELECT
- WITH ANONYMIZATION
- OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
- item,
- ANON_STDDEV_POP(quantity CLAMPED BETWEEN 0 AND 100) pop_standard_deviation
-FROM {{USERNAME}}.view_on_professors
-GROUP BY item;
+GRAPH FinGraph
+MATCH p=(src:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(dst:Account)
+LET ns = NODES(p)
+RETURN
+ JSON_QUERY(TO_JSON(ns)[0], '$.labels') AS labels,
+ JSON_QUERY(TO_JSON(ns)[0], '$.properties.nick_name') AS nick_name;
--- These results will change each time you run the query.
--- Smaller aggregations might be removed.
-/*----------+------------------------*
- | item | pop_standard_deviation |
- +----------+------------------------+
- | pencil | 25.350871122442054 |
- | scissors | 50 |
- | pen | 2 |
- *----------+------------------------*/
+/*--------------------------------*
+ | labels | nick_name |
+ +--------------------------------+
+ | ["Account"] | "Vacation Fund" |
+ | ["Account"] | "Rainy Day Fund" |
+ | ["Account"] | "Rainy Day Fund" |
+ | ["Account"] | "Rainy Day Fund" |
+ | ["Account"] | "Vacation Fund" |
+ | ["Account"] | "Vacation Fund" |
+ | ["Account"] | "Vacation Fund" |
+ | ["Account"] | "Rainy Day Fund" |
+ *--------------------------------/*
```
-[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
-
-[dp-clamp-between]: #dp_clamp_between
-
-### `ANON_SUM` (DEPRECATED)
-
-
-Warning: This function has been deprecated. Use
-`SUM` (differential privacy) instead.
-
+### `PATH`
+
```sql
-WITH ANONYMIZATION ...
- ANON_SUM(expression [CLAMPED BETWEEN lower_bound AND upper_bound])
+PATH(graph_element[, ...])
```
**Description**
-Returns the sum of non-`NULL`, non-`NaN` values in the expression. The final
-result is an aggregation across privacy unit columns.
+Creates a graph path from a list of graph elements.
-This function must be used with the `ANONYMIZATION` clause and
-can support these arguments:
+**Definitions**
-+ `expression`: The input expression. This can be any numeric input type,
- such as `INT64`.
-+ `CLAMPED BETWEEN` clause:
- Perform [clamping][dp-clamp-between] per privacy unit column.
++ `graph_element`: A `GRAPH_ELEMENT` value that represents a graph element,
+ such as a node or edge, to add to a graph path.
-**Return type**
+**Details**
-One of the following [supertypes][dp-supertype]:
+This function produces an error if:
-+ `INT64`
-+ `UINT64`
-+ `DOUBLE`
++ A graph element is `NULL`.
++ Nodes aren't interleaved with edges.
++ An edge doesn't connect to neighboring nodes.
-**Examples**
+**Return type**
-The following differentially private query gets the sum of items requested.
-Smaller aggregations might not be included. This query references a view called
-[`view_on_professors`][dp-example-views].
+`GRAPH_PATH`
+
+**Examples**
```sql
--- With noise, using the epsilon parameter.
-SELECT
- WITH ANONYMIZATION
- OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
- item,
- ANON_SUM(quantity CLAMPED BETWEEN 0 AND 100) quantity
-FROM {{USERNAME}}.view_on_professors
-GROUP BY item;
+GRAPH FinGraph
+MATCH (src:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(dst:Account)
+LET p = PATH(src, t1, mid, t2, dst)
+RETURN
+ JSON_QUERY(TO_JSON(p)[0], '$.labels') AS element_a,
+ JSON_QUERY(TO_JSON(p)[1], '$.labels') AS element_b,
+ JSON_QUERY(TO_JSON(p)[2], '$.labels') AS element_c
--- These results will change each time you run the query.
--- Smaller aggregations might be removed.
-/*----------+-----------*
- | item | quantity |
- +----------+-----------+
- | pencil | 143 |
- | pen | 59 |
- *----------+-----------*/
+/*-------------------------------------------*
+ | element_a | element_b | element_c |
+ +-------------------------------------------+
+ | ["Account"] | ["Transfers"] | ["Account"] |
+ | ... | ... | ... |
+ *-------------------------------------------*/
```
```sql
--- Without noise, using the epsilon parameter.
--- (this un-noised version is for demonstration only)
-SELECT
- WITH ANONYMIZATION
- OPTIONS(epsilon=1e20, delta=.01, max_groups_contributed=1)
- item,
- ANON_SUM(quantity) quantity
-FROM {{USERNAME}}.view_on_professors
-GROUP BY item;
-
--- These results will not change when you run the query.
-/*----------+----------*
- | item | quantity |
- +----------+----------+
- | scissors | 8 |
- | pencil | 144 |
- | pen | 58 |
- *----------+----------*/
+-- Error: in 'p', a graph element is NULL.
+GRAPH FinGraph
+MATCH (src:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(dst:Account)
+LET p = PATH(src, NULL, mid, t2, dst)
+RETURN TO_JSON(p) AS results
```
-Note: You can learn more about when and when not to use
-noise [here][dp-noise].
-
-[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
-
-[dp-noise]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#eliminate_noise
-
-[dp-supertype]: https://github.com/google/zetasql/blob/master/docs/conversion_rules.md#supertypes
-
-[dp-clamp-between]: #dp_clamp_between
+```sql
+-- Error: in 'p', 'src' and 'mid' are nodes that should be interleaved with an
+-- edge.
+GRAPH FinGraph
+MATCH (src:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(dst:Account)
+LET p = PATH(src, mid, t2, dst)
+RETURN TO_JSON(p) AS results
+```
-### `ANON_VAR_POP` (DEPRECATED)
-
+```sql
+-- Error: in 'p', 't2' is an edge that does not connect to a neighboring node on
+-- the right.
+GRAPH FinGraph
+MATCH (src:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(dst:Account)
+LET p = PATH(src, t2, mid)
+RETURN TO_JSON(p) AS results
+```
-Warning: This function has been deprecated. Use
-`VAR_POP` (differential privacy) instead.
+### `PATH_FIRST`
```sql
-WITH ANONYMIZATION ...
- ANON_VAR_POP(expression [CLAMPED BETWEEN lower_bound AND upper_bound])
+PATH_FIRST(graph_path)
```
**Description**
-Takes an expression and computes the population (biased) variance of the values
-in the expression. The final result is an aggregation across
-privacy unit columns between `0` and `+Inf`. You can
-[clamp the input values][dp-clamp-explicit] explicitly, otherwise input values
-are clamped implicitly. Clamping is performed per individual entity values.
+Gets the first node in a graph path.
-This function must be used with the `ANONYMIZATION` clause and
-can support these arguments:
+**Definitions**
-+ `expression`: The input expression. This can be any numeric input type,
- such as `INT64`. `NULL`s are always ignored.
-+ `CLAMPED BETWEEN` clause:
- Perform [clamping][dp-clamp-between] per individual entity values.
++ `graph_path`: A `GRAPH_PATH` value that represents the graph path to
+ extract the first node from.
-`NUMERIC` and `BIGNUMERIC` arguments are not allowed.
- If you need them, cast them as the
-`DOUBLE` data type first.
+**Details**
+
+Returns `NULL` if `graph_path` is `NULL`.
**Return type**
-`DOUBLE`
+`GRAPH_ELEMENT`
**Examples**
-The following differentially private query gets the
-population (biased) variance of items requested. Smaller aggregations might not
-be included. This query references a view called
-[`view_on_professors`][dp-example-views].
-
```sql
--- With noise, using the epsilon parameter.
-SELECT
- WITH ANONYMIZATION
- OPTIONS(epsilon=10, delta=.01, max_groups_contributed=1)
- item,
- ANON_VAR_POP(quantity CLAMPED BETWEEN 0 AND 100) pop_variance
-FROM {{USERNAME}}.view_on_professors
-GROUP BY item;
+GRAPH FinGraph
+MATCH p=(src:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(dst:Account)
+LET f = PATH_FIRST(p)
+RETURN
+ LABELS(f) AS labels,
+ f.nick_name AS nick_name;
--- These results will change each time you run the query.
--- Smaller aggregations might be removed.
-/*----------+-----------------*
- | item | pop_variance |
- +----------+-----------------+
- | pencil | 642 |
- | pen | 2.6666666666665 |
- | scissors | 2500 |
- *----------+-----------------*/
+/*--------------------------*
+ | labels | nick_name |
+ +--------------------------+
+ | Account | Vacation Fund |
+ | Account | Rainy Day Fund |
+ | Account | Rainy Day Fund |
+ | Account | Vacation Fund |
+ | Account | Vacation Fund |
+ | Account | Vacation Fund |
+ | Account | Rainy Day Fund |
+ *--------------------------/*
```
-[dp-clamp-explicit]: #dp_explicit_clamping
-
-[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
-
-[dp-clamp-between]: #dp_clamp_between
-
-### Clamp values in a differentially private aggregate function
-
+### `PATH_LAST`
-In [differentially private queries][dp-syntax],
-aggregation clamping is used to limit the contribution of outliers. You can
-clamp explicitly or implicitly as follows:
+```sql
+PATH_LAST(graph_path)
+```
-+ [Clamp explicitly in the `DIFFERENTIAL_PRIVACY` clause][dp-clamped-named].
-+ [Clamp implicitly in the `DIFFERENTIAL_PRIVACY` clause][dp-clamped-named-imp].
+**Description**
-#### Implicitly clamp values
-
+Gets the last node in a graph path.
-If you don't include the contribution bounds named argument with the
-`DIFFERENTIAL_PRIVACY` clause, clamping is implicit, which
-means bounds are derived from the data itself in a differentially private way.
+**Definitions**
-Implicit bounding works best when computed using large datasets. For more
-information, see
-[Implicit bounding limitations for small datasets][implicit-limits].
++ `graph_path`: A `GRAPH_PATH` value that represents the graph path to
+ extract the last node from.
**Details**
-In differentially private aggregate functions, explicit clamping is optional.
-If you don't include this clause, clamping is implicit,
-which means bounds are derived from the data itself in a differentially
-private way. The process is somewhat random, so aggregations with identical
-ranges can have different bounds.
-
-Implicit bounds are determined for each aggregation. So if some
-aggregations have a wide range of values, and others have a narrow range of
-values, implicit bounding can identify different bounds for different
-aggregations as appropriate. Implicit bounds might be an advantage or a
-disadvantage depending on your use case. Different bounds for different
-aggregations can result in lower error. Different bounds also means that
-different aggregations have different levels of uncertainty, which might not be
-directly comparable. [Explicit bounds][dp-clamped-named], on the other hand,
-apply uniformly to all aggregations and should be derived from public
-information.
+Returns `NULL` if `graph_path` is `NULL`.
-When clamping is implicit, part of the total epsilon is spent picking bounds.
-This leaves less epsilon for aggregations, so these aggregations are noisier.
+**Return type**
-**Example**
+`GRAPH_ELEMENT`
-The following anonymized query clamps each aggregate contribution for each
-differential privacy ID and within a derived range from the data itself.
-As long as all or most values fall within this range, your results
-will be accurate. This query references a view called
-[`view_on_professors`][dp-example-views].
+**Examples**
```sql
---Without noise (this un-noised version is for demonstration only)
-SELECT WITH DIFFERENTIAL_PRIVACY
- OPTIONS (
- epsilon = 1e20,
- delta = .01,
- privacy_unit_column=id
- )
- item,
- AVG(quantity) average_quantity
-FROM view_on_professors
-GROUP BY item;
+GRAPH FinGraph
+MATCH p=(src:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(dst:Account)
+LET f = PATH_LAST(p)
+RETURN
+ LABELS(f) AS labels,
+ f.nick_name AS nick_name;
-/*----------+------------------*
- | item | average_quantity |
- +----------+------------------+
- | scissors | 8 |
- | pencil | 72 |
- | pen | 18.5 |
- *----------+------------------*/
+/*--------------------------*
+ | labels | nick_name |
+ +--------------------------+
+ | Account | Vacation Fund |
+ | Account | Vacation Fund |
+ | Account | Vacation Fund |
+ | Account | Vacation Fund |
+ | Account | Rainy Day Fund |
+ | Account | Rainy Day Fund |
+ | Account | Rainy Day Fund |
+ *--------------------------/*
```
-The following anonymized query clamps each aggregate contribution for each
-differential privacy ID and within a derived range from the data itself.
-As long as all or most values fall within this range, your results
-will be accurate. This query references a view called
-[`view_on_professors`][dp-example-views].
+### `PATH_LENGTH`
```sql
---Without noise (this un-noised version is for demonstration only)
-SELECT WITH DIFFERENTIAL_PRIVACY
- OPTIONS (
- epsilon = 1e20,
- delta = .01,
- max_groups_contributed = 1
- )
- item,
- AVG(quantity) AS average_quantity
-FROM view_on_professors
-GROUP BY item;
-
-/*----------+------------------*
- | item | average_quantity |
- +----------+------------------+
- | scissors | 8 |
- | pencil | 72 |
- | pen | 18.5 |
- *----------+------------------*/
+PATH_LENGTH(graph_path)
```
-#### Explicitly clamp values
-
+**Description**
+
+Gets the number of edges in a graph path.
+
+**Definitions**
+
++ `graph_path`: A `GRAPH_PATH` value that represents the graph path with the
+ edges to count.
+
+**Details**
+
+Returns `NULL` if `graph_path` is `NULL`.
+
+**Return type**
+
+`INT64`
+
+**Examples**
```sql
-contribution_bounds_per_group => (lower_bound,upper_bound)
+GRAPH FinGraph
+MATCH p=(src:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(dst:Account)
+RETURN PATH_LENGTH(p) AS results
+
+/*---------*
+ | results |
+ +---------+
+ | 2 |
+ | 2 |
+ | 2 |
+ | 2 |
+ | 2 |
+ | 2 |
+ | 2 |
+ *---------*/
```
+### `PROPERTY_NAMES`
+
```sql
-contribution_bounds_per_row => (lower_bound,upper_bound)
+PROPERTY_NAMES(element)
```
-Use the contribution bounds named argument to explicitly clamp
-values per group or per row between a lower and upper bound in a
-`DIFFERENTIAL_PRIVACY` clause.
+**Description**
-Input values:
+Gets the name of each property associated with a graph element and preserves
+the original case of each name.
-+ `contribution_bounds_per_row`: Contributions per privacy unit are clamped
- on a per-row (per-record) basis. This means the following:
- + Upper and lower bounds are applied to column values in individual
- rows produced by the input subquery independently.
- + The maximum possible contribution per privacy unit (and per grouping set)
- is the product of the per-row contribution limit and `max_groups_contributed`
- differential privacy parameter.
-+ `contribution_bounds_per_group`: Contributions per privacy unit are clamped
- on a unique set of entity-specified `GROUP BY` keys. The upper and lower
- bounds are applied to values per group after the values are aggregated per
- privacy unit.
-+ `lower_bound`: Numeric literal that represents the smallest value to
- include in an aggregation.
-+ `upper_bound`: Numeric literal that represents the largest value to
- include in an aggregation.
+**Arguments**
-`NUMERIC` and `BIGNUMERIC` arguments are not allowed.
++ `element`: A `GRAPH_ELEMENT` value.
**Details**
-In differentially private aggregate functions, clamping explicitly clamps the
-total contribution from each privacy unit column to within a specified
-range.
-
-Explicit bounds are uniformly applied to all aggregations. So even if some
-aggregations have a wide range of values, and others have a narrow range of
-values, the same bounds are applied to all of them. On the other hand, when
-[implicit bounds][dp-clamped-named-imp] are inferred from the data, the bounds
-applied to each aggregation can be different.
+Returns `NULL` if `element` is `NULL`.
-Explicit bounds should be chosen to reflect public information.
-For example, bounding ages between 0 and 100 reflects public information
-because the age of most people generally falls within this range.
+**Return type**
-Important: The results of the query reveal the explicit bounds. Do not use
-explicit bounds based on the entity data; explicit bounds should be based on
-public information.
+`ARRAY`
**Examples**
-The following anonymized query clamps each aggregate contribution for each
-differential privacy ID and within a specified range (`0` and `100`).
-As long as all or most values fall within this range, your results
-will be accurate. This query references a view called
-[`view_on_professors`][dp-example-views].
-
```sql
---Without noise (this un-noised version is for demonstration only)
-SELECT WITH DIFFERENTIAL_PRIVACY
- OPTIONS (
- epsilon = 1e20,
- delta = .01,
- privacy_unit_column=id
- )
- item,
- AVG(quantity, contribution_bounds_per_group=>(0,100)) AS average_quantity
-FROM view_on_professors
-GROUP BY item;
-
-/*----------+------------------*
- | item | average_quantity |
- +----------+------------------+
- | scissors | 8 |
- | pencil | 40 |
- | pen | 18.5 |
- *----------+------------------*/
-```
-
-Notice what happens when most or all values fall outside of the clamped range.
-To get accurate results, ensure that the difference between the upper and lower
-bound is as small as possible, and that most inputs are between the upper and
-lower bound.
+GRAPH FinGraph
+MATCH (n:Person|Account)
+RETURN PROPERTY_NAMES(n) AS property_names, n.id
-```sql {.bad}
---Without noise (this un-noised version is for demonstration only)
-SELECT WITH DIFFERENTIAL_PRIVACY
- OPTIONS (
- epsilon = 1e20,
- delta = .01,
- privacy_unit_column=id
- )
- item,
- AVG(quantity, contribution_bounds_per_group=>(50,100)) AS average_quantity
-FROM view_on_professors
-GROUP BY item;
-
-/*----------+------------------*
- | item | average_quantity |
- +----------+------------------+
- | scissors | 54 |
- | pencil | 58 |
- | pen | 51 |
- *----------+------------------*/
+/*-----------------------------------------------+
+ | label | id |
+ +-----------------------------------------------+
+ | [create_time, id, is_blocked, nick_name] | 7 |
+ | [create_time, id, is_blocked, nick_name] | 16 |
+ | [create_time, id, is_blocked, nick_name] | 20 |
+ | [birthday, city, country, id, name] | 1 |
+ | [birthday, city, country, id, name] | 2 |
+ | [birthday, city, country, id, name] | 3 |
+ +-----------------------------------------------*/
```
-The following differentially private query clamps each aggregate contribution
-for each privacy unit column and within a specified range (`0` and `100`).
-As long as all or most values fall within this range, your results will be
-accurate. This query references a view called
-[`view_on_professors`][dp-example-views].
+### `SOURCE_NODE_ID`
```sql
---Without noise (this un-noised version is for demonstration only)
-SELECT WITH DIFFERENTIAL_PRIVACY
- OPTIONS (
- epsilon = 1e20,
- delta = .01,
- max_groups_contributed = 1
- )
- item,
- AVG(quantity, contribution_bounds_per_group=>(0,100)) AS average_quantity
-FROM view_on_professors
-GROUP BY item;
-
-/*----------+------------------*
- | item | average_quantity |
- +----------+------------------+
- | scissors | 8 |
- | pencil | 40 |
- | pen | 18.5 |
- *----------+------------------*/
+SOURCE_NODE_ID(edge_element)
```
-Notice what happens when most or all values fall outside of the clamped range.
-To get accurate results, ensure that the difference between the upper and lower
-bound is as small as possible, and that most inputs are between the upper and
-lower bound.
+**Description**
-```sql {.bad}
---Without noise (this un-noised version is for demonstration only)
-SELECT WITH DIFFERENTIAL_PRIVACY
- OPTIONS (
- epsilon = 1e20,
- delta = .01,
- max_groups_contributed = 1
- )
- item,
- AVG(quantity, contribution_bounds_per_group=>(50,100)) AS average_quantity
-FROM view_on_professors
-GROUP BY item;
+Gets a unique identifier of a graph edge's source node. The unique identifier is only valid for the scope of the query where it is obtained.
-/*----------+------------------*
- | item | average_quantity |
- +----------+------------------+
- | scissors | 54 |
- | pencil | 58 |
- | pen | 51 |
- *----------+------------------*/
-```
+**Arguments**
-Note: For more information about when and when not to use
-noise, see [Remove noise][dp-noise].
++ `edge_element`: A `GRAPH_ELEMENT` value that represents an edge.
-[dp-guide]: https://github.com/google/zetasql/blob/master/docs/differential-privacy.md
+**Details**
-[dp-syntax]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_clause
+Returns `NULL` if `edge_element` is `NULL`.
-[agg-function-calls]: https://github.com/google/zetasql/blob/master/docs/aggregate-function-calls.md
+**Return type**
-[dp-example-views]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#dp_example_views
+`STRING`
-[dp-noise]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#eliminate_noise
+**Examples**
-[implicit-limits]: https://github.com/google/zetasql/blob/master/docs/differential-privacy.md#implicit_limits
+```sql
+GRAPH FinGraph
+MATCH (p:Person)-[o:Owns]->(:Account)
+RETURN p.name AS name, SOURCE_NODE_ID(o) AS source_node_id
-[dp-clamped-named]: #dp_clamped_named
+/*-------------------------------------+
+ | name | source_node_id |
+ +------|------------------------------+
+ | Alex | mUZpbkdyYXBoLlBlcnNvbgB4kQI= |
+ | Dana | mUZpbkdyYXBoLlBlcnNvbgB4kQQ= |
+ | Lee | mUZpbkdyYXBoLlBlcnNvbgB4kQY= |
+ +-------------------------------------*/
+```
-[dp-clamped-named-imp]: #dp_clamped_named_implicit
+Note that the actual identifiers obtained may be different from what's shown above.
+
+[functions-and-operators]: https://github.com/google/zetasql/blob/master/docs/functions-and-operators.md
## Geography functions
@@ -15518,7 +17678,6 @@ behavior:
S2_CELLIDFROMPOINT
-
|
Gets the S2 cell ID covering a point GEOGRAPHY value.
@@ -15527,7 +17686,6 @@ behavior:
|
S2_COVERINGCELLIDS
-
|
Gets an array of S2 cell IDs that cover a GEOGRAPHY value.
@@ -15536,7 +17694,6 @@ behavior:
|
ST_ACCUM
-
|
Aggregates GEOGRAPHY values into an array of
@@ -15546,7 +17703,6 @@ behavior:
|
ST_ANGLE
-
|
Takes three point GEOGRAPHY values, which represent two
@@ -15556,7 +17712,6 @@ behavior:
|
ST_AREA
-
|
Gets the area covered by the polygons in a GEOGRAPHY value.
@@ -15565,7 +17720,6 @@ behavior:
|
ST_ASBINARY
-
|
Converts a GEOGRAPHY value to a
@@ -15575,7 +17729,6 @@ behavior:
|
ST_ASGEOJSON
-
|
Converts a GEOGRAPHY value to a STRING
@@ -15585,7 +17738,6 @@ behavior:
|
ST_ASKML
-
|
Converts a GEOGRAPHY value to a STRING
@@ -15595,7 +17747,6 @@ behavior:
|
ST_ASTEXT
-
|
Converts a GEOGRAPHY value to a
@@ -15605,7 +17756,6 @@ behavior:
|
ST_AZIMUTH
-
|
Gets the azimuth of a line segment formed by two
@@ -15615,7 +17765,6 @@ behavior:
|
ST_BOUNDARY
-
|
Gets the union of component boundaries in a
@@ -15625,7 +17774,6 @@ behavior:
|
ST_BOUNDINGBOX
-
|
Gets the bounding box for a GEOGRAPHY value.
@@ -15634,7 +17782,6 @@ behavior:
|
ST_BUFFER
-
|
Gets the buffer around a GEOGRAPHY value, using a specific
@@ -15644,7 +17791,6 @@ behavior:
|
ST_BUFFERWITHTOLERANCE
-
|
Gets the buffer around a GEOGRAPHY value, using tolerance.
@@ -15653,7 +17799,6 @@ behavior:
|
ST_CENTROID
-
|
Gets the centroid of a GEOGRAPHY value.
@@ -15662,7 +17807,6 @@ behavior:
|
ST_CLOSESTPOINT
-
|
Gets the point on a GEOGRAPHY value which is closest to any
@@ -15672,17 +17816,16 @@ behavior:
|
ST_CLUSTERDBSCAN
-
|
Performs DBSCAN clustering on a group of GEOGRAPHY values and
produces a 0-based cluster number for this row.
+
|
ST_CONTAINS
-
|
Checks if one GEOGRAPHY value contains another
@@ -15692,7 +17835,6 @@ behavior:
|
ST_CONVEXHULL
-
|
Returns the convex hull for a GEOGRAPHY value.
@@ -15701,7 +17843,6 @@ behavior:
|
ST_COVEREDBY
-
|
Checks if all points of a GEOGRAPHY value are on the boundary
@@ -15711,7 +17852,6 @@ behavior:
|
ST_COVERS
-
|
Checks if all points of a GEOGRAPHY value are on the boundary
@@ -15721,7 +17861,6 @@ behavior:
|
ST_DIFFERENCE
-
|
Gets the point set difference between two GEOGRAPHY values.
@@ -15730,7 +17869,6 @@ behavior:
|
ST_DIMENSION
-
|
Gets the dimension of the highest-dimensional element in a
@@ -15740,7 +17878,6 @@ behavior:
|
ST_DISJOINT
-
|
Checks if two GEOGRAPHY values are disjoint (do not intersect).
@@ -15749,7 +17886,6 @@ behavior:
|
ST_DISTANCE
-
|
Gets the shortest distance in meters between two GEOGRAPHY
@@ -15759,7 +17895,6 @@ behavior:
|
ST_DUMP
-
|
Returns an array of simple GEOGRAPHY components in a
@@ -15769,7 +17904,6 @@ behavior:
|
ST_DUMPPOINTS
-
|
Produces an array of GEOGRAPHY points with all points, line
@@ -15779,7 +17913,6 @@ behavior:
|
ST_DWITHIN
-
|
Checks if any points in two GEOGRAPHY values are within a given
@@ -15789,7 +17922,6 @@ behavior:
|
ST_ENDPOINT
-
|
Gets the last point of a linestring GEOGRAPHY value.
@@ -15798,7 +17930,6 @@ behavior:
|
ST_EQUALS
-
|
Checks if two GEOGRAPHY values represent the same
@@ -15808,16 +17939,15 @@ behavior:
|
ST_EXTENT
-
|
Gets the bounding box for a group of GEOGRAPHY values.
+
|
ST_EXTERIORRING
-
|
Returns a linestring GEOGRAPHY value that corresponds to the
@@ -15827,7 +17957,6 @@ behavior:
|
ST_GEOGFROM
-
|
Converts a STRING or BYTES value
@@ -15837,7 +17966,6 @@ behavior:
|
ST_GEOGFROMGEOJSON
-
|
Converts a STRING GeoJSON geometry value into a
@@ -15847,7 +17975,6 @@ behavior:
|
ST_GEOGFROMKML
-
|
Converts a STRING KML geometry value into a
@@ -15857,7 +17984,6 @@ behavior:
|
ST_GEOGFROMTEXT
-
|
Converts a STRING WKT geometry value into a
@@ -15867,7 +17993,6 @@ behavior:
|
ST_GEOGFROMWKB
-
|
Converts a BYTES or hexadecimal-text STRING WKT
@@ -15877,7 +18002,6 @@ behavior:
|
ST_GEOGPOINT
-
|
Creates a point GEOGRAPHY value for a given longitude and
@@ -15887,7 +18011,6 @@ behavior:
|
ST_GEOGPOINTFROMGEOHASH
-
|
Gets a point GEOGRAPHY value that is in the middle of a
@@ -15897,7 +18020,6 @@ behavior:
|
ST_GEOHASH
-
|
Converts a point GEOGRAPHY value to a STRING
@@ -15907,7 +18029,6 @@ behavior:
|
ST_GEOMETRYTYPE
-
|
Gets the Open Geospatial Consortium (OGC) geometry type for a
@@ -15917,14 +18038,12 @@ behavior:
|
ST_HAUSDORFFDISTANCE
-
|
Gets the discrete Hausdorff distance between two geometries. |
ST_INTERIORRINGS
-
|
Gets the interior rings of a polygon GEOGRAPHY value.
@@ -15933,7 +18052,6 @@ behavior:
|
ST_INTERSECTION
-
|
Gets the point set intersection of two GEOGRAPHY values.
@@ -15942,7 +18060,6 @@ behavior:
|
ST_INTERSECTS
-
|
Checks if at least one point appears in two GEOGRAPHY
@@ -15952,7 +18069,6 @@ behavior:
|
ST_INTERSECTSBOX
-
|
Checks if a GEOGRAPHY value intersects a rectangle.
@@ -15961,7 +18077,6 @@ behavior:
|
ST_ISCLOSED
-
|
Checks if all components in a GEOGRAPHY value are closed.
@@ -15970,7 +18085,6 @@ behavior:
|
ST_ISCOLLECTION
-
|
Checks if the total number of points, linestrings, and polygons is
@@ -15980,7 +18094,6 @@ behavior:
|
ST_ISEMPTY
-
|
Checks if a GEOGRAPHY value is empty.
@@ -15989,7 +18102,6 @@ behavior:
|
ST_ISRING
-
|
Checks if a GEOGRAPHY value is a closed, simple
@@ -15999,7 +18111,6 @@ behavior:
|
ST_LENGTH
-
|
Gets the total length of lines in a GEOGRAPHY value.
@@ -16008,7 +18119,6 @@ behavior:
|
ST_LINEINTERPOLATEPOINT
-
|
Gets a point at a specific fraction in a linestring GEOGRAPHY
@@ -16018,7 +18128,6 @@ behavior:
|
ST_LINELOCATEPOINT
-
|
Gets a section of a linestring GEOGRAPHY value between the
@@ -16028,7 +18137,6 @@ behavior:
|
ST_LINESUBSTRING
-
|
Gets a segment of a single linestring at a specific starting and
@@ -16038,7 +18146,6 @@ behavior:
|
ST_MAKELINE
-
|
Creates a linestring GEOGRAPHY value by concatenating the point
@@ -16048,7 +18155,6 @@ behavior:
|
ST_MAKEPOLYGON
-
|
Constructs a polygon GEOGRAPHY value by combining
@@ -16058,7 +18164,6 @@ behavior:
|
ST_MAKEPOLYGONORIENTED
-
|
Constructs a polygon GEOGRAPHY value, using an array of
@@ -16069,7 +18174,6 @@ behavior:
|
ST_MAXDISTANCE
-
|
Gets the longest distance between two non-empty
@@ -16079,7 +18183,6 @@ behavior:
|
ST_NPOINTS
-
|
An alias of ST_NUMPOINTS .
@@ -16088,7 +18191,6 @@ behavior:
|
ST_NUMGEOMETRIES
-
|
Gets the number of geometries in a GEOGRAPHY value.
@@ -16097,7 +18199,6 @@ behavior:
|
ST_NUMPOINTS
-
|
Gets the number of vertices in the a GEOGRAPHY value.
@@ -16106,7 +18207,6 @@ behavior:
|
ST_PERIMETER
-
|
Gets the length of the boundary of the polygons in a
@@ -16116,7 +18216,6 @@ behavior:
|
ST_POINTN
-
|
Gets the point at a specific index of a linestring GEOGRAPHY
@@ -16126,7 +18225,6 @@ behavior:
|
ST_SIMPLIFY
-
|
Converts a GEOGRAPHY value into a simplified
@@ -16136,7 +18234,6 @@ behavior:
|
ST_SNAPTOGRID
-
|
Produces a GEOGRAPHY value, where each vertex has
@@ -16146,7 +18243,6 @@ behavior:
|
ST_STARTPOINT
-
|
Gets the first point of a linestring GEOGRAPHY value.
@@ -16155,7 +18251,6 @@ behavior:
|
ST_TOUCHES
-
|
Checks if two GEOGRAPHY values intersect and their interiors
@@ -16165,7 +18260,6 @@ behavior:
|
ST_UNION
-
|
Gets the point set union of multiple GEOGRAPHY values.
@@ -16174,17 +18268,16 @@ behavior:
|
ST_UNION_AGG
-
|
Aggregates over GEOGRAPHY values and gets their
point set union.
+
|
ST_WITHIN
-
|
Checks if one GEOGRAPHY value contains another
@@ -16194,7 +18287,6 @@ behavior:
|
ST_X
-
|
Gets the longitude from a point GEOGRAPHY value.
@@ -16203,7 +18295,6 @@ behavior:
|
ST_Y
-
|
Gets the latitude from a point GEOGRAPHY value.
@@ -17173,7 +19264,7 @@ be in the difference.
**Example**
The following query illustrates the difference between `geog1`, a larger polygon
-`POLYGON((0 0, 10 0, 10 10, 0 0))` and `geog1`, a smaller polygon
+`POLYGON((0 0, 10 0, 10 10, 0 0))` and `geog2`, a smaller polygon
`POLYGON((4 2, 6 2, 8 6, 4 2))` that intersects with `geog1`. The result is
`geog1` with a hole where `geog2` intersects with it.
@@ -17434,19 +19525,24 @@ ST_EQUALS(geography_1, geography_2)
**Description**
-Returns `TRUE` if `geography_1` and `geography_2` represent the same
+Checks if two `GEOGRAPHY` values represent the same `GEOGRAPHY` value. Returns
+`TRUE` if the values are the same, otherwise returns `FALSE`.
-`GEOGRAPHY` value. More precisely, this means that
-one of the following conditions holds:
-+ `ST_COVERS(geography_1, geography_2) = TRUE` and `ST_COVERS(geography_2,
- geography_1) = TRUE`
-+ Both `geography_1` and `geography_2` are empty.
+**Definitions**
-Therefore, two `GEOGRAPHY`s may be equal even if the
-ordering of points or vertices differ, as long as they still represent the same
-geometric structure.
++ `geography_1`: The first `GEOGRAPHY` value to compare.
++ `geography_2`: The second `GEOGRAPHY` value to compare.
-**Constraints**
+**Details**
+
+As long as they still represent the same geometric structure, two
+`GEOGRAPHY` values can be equal even if the ordering of points or vertices
+differ. This means that one of the following conditions must be true for this
+function to return `TRUE`:
+
++ Both `ST_COVERS(geography_1, geography_2)` and
+ `ST_COVERS(geography_2, geography_1)` are `TRUE`.
++ Both `geography_1` and `geography_2` are empty.
`ST_EQUALS` is not guaranteed to be a transitive function.
@@ -19342,7 +21438,6 @@ ZetaSQL supports the following hash functions.
|
FARM_FINGERPRINT
-
|
Computes the fingerprint of a STRING or
@@ -19352,7 +21447,6 @@ ZetaSQL supports the following hash functions.
|
MD5
-
|
Computes the hash of a STRING or
@@ -19362,7 +21456,6 @@ ZetaSQL supports the following hash functions.
|
SHA1
-
|
Computes the hash of a STRING or
@@ -19372,7 +21465,6 @@ ZetaSQL supports the following hash functions.
|
SHA256
-
|
Computes the hash of a STRING or
@@ -19382,7 +21474,6 @@ ZetaSQL supports the following hash functions.
|
SHA512
-
|
Computes the hash of a STRING or
@@ -19591,7 +21682,6 @@ ZetaSQL supports the following HLL++ functions:
|
HLL_COUNT.EXTRACT
-
|
Extracts a cardinality estimate of an HLL++ sketch.
@@ -19600,7 +21690,6 @@ ZetaSQL supports the following HLL++ functions:
|
HLL_COUNT.INIT
-
|
Aggregates values of the same underlying type into a new HLL++ sketch.
@@ -19609,7 +21698,6 @@ ZetaSQL supports the following HLL++ functions:
|
HLL_COUNT.MERGE
-
|
Merges HLL++ sketches of the same underlying type into a new sketch, and
@@ -19619,7 +21707,6 @@ ZetaSQL supports the following HLL++ functions:
|
HLL_COUNT.MERGE_PARTIAL
-
|
Merges HLL++ sketches of the same underlying type into a new sketch.
@@ -19912,7 +21999,6 @@ ZetaSQL supports the following interval functions.
|
EXTRACT
-
|
Extracts part of an INTERVAL value.
@@ -19921,7 +22007,6 @@ ZetaSQL supports the following interval functions.
|
JUSTIFY_DAYS
-
|
Normalizes the day part of an INTERVAL value.
@@ -19930,7 +22015,6 @@ ZetaSQL supports the following interval functions.
|
JUSTIFY_HOURS
-
|
Normalizes the time part of an INTERVAL value.
@@ -19939,7 +22023,6 @@ ZetaSQL supports the following interval functions.
|
JUSTIFY_INTERVAL
-
|
Normalizes the day and time parts of an INTERVAL value.
@@ -19948,7 +22031,6 @@ ZetaSQL supports the following interval functions.
|
MAKE_INTERVAL
-
|
Constructs an INTERVAL value.
@@ -20364,6 +22446,9 @@ behavior:
TO_JSON
+ SAFE_TO_JSON
+
+
TO_JSON_STRING
|
@@ -20439,18 +22524,21 @@ behavior:
BOOL
-
|
Converts a JSON boolean to a SQL BOOL value.
+
|
- BOOL
-
+ | BOOL_ARRAY
|
- Converts a JSON array of booleans to a SQL ARRAY<BOOL> value. |
+
+ Converts a JSON array of booleans to a
+ SQL ARRAY<BOOL> value.
+
+ |
@@ -20498,60 +22586,70 @@ behavior:
INT32
-
|
Converts a JSON number to a SQL INT32 value.
+
|
INT32_ARRAY
-
|
- Converts a JSON number to a SQL ARRAY<INT32> value. |
+
+ Converts a JSON number to a SQL ARRAY<INT32> value.
+
+ |
INT64
-
|
Converts a JSON number to a SQL INT64 value.
+
|
INT64_ARRAY
-
|
- Converts a JSON array of numbers to a SQL ARRAY<INT64> value. |
+
+ Converts a JSON array of numbers to a
+ SQL ARRAY<INT64> value.
+
+ |
JSON_ARRAY
-
|
- Creates a JSON array. |
+
+ Creates a JSON array.
+
+ |
JSON_ARRAY_APPEND
-
|
- Appends JSON data to the end of a JSON array. |
+
+ Appends JSON data to the end of a JSON array.
+
+ |
JSON_ARRAY_INSERT
-
|
- Inserts JSON data into a JSON array. |
+
+ Inserts JSON data into a JSON array.
+
+ |
JSON_EXTRACT
-
|
(Deprecated)
@@ -20566,7 +22664,6 @@ behavior:
|
JSON_EXTRACT_ARRAY
-
|
(Deprecated)
@@ -20576,12 +22673,12 @@ behavior:
ARRAY<JSON>
value.
+
|
JSON_EXTRACT_SCALAR
-
|
(Deprecated)
@@ -20592,25 +22689,23 @@ behavior:
|
JSON_EXTRACT_STRING_ARRAY
-
|
(Deprecated)
Extracts a JSON array of scalar values and converts it to a SQL
ARRAY<STRING> value.
+
|
JSON_OBJECT
-
|
Creates a JSON object. |
JSON_QUERY
-
|
Extracts a JSON value and converts it to a SQL
@@ -20624,7 +22719,6 @@ behavior:
|
JSON_QUERY_ARRAY
-
|
Extracts a JSON array and converts it to
@@ -20633,33 +22727,30 @@ behavior:
ARRAY<JSON>
value.
+
|
JSON_REMOVE
-
|
Produces JSON with the specified JSON data removed. |
JSON_SET
-
|
Inserts or replaces JSON data. |
JSON_STRIP_NULLS
-
|
Removes JSON nulls from JSON objects and JSON arrays. |
JSON_TYPE
-
|
Gets the JSON type of the outermost JSON value and converts the name of
@@ -20669,7 +22760,6 @@ behavior:
|
JSON_VALUE
-
|
Extracts a JSON scalar value and converts it to a SQL
@@ -20679,28 +22769,31 @@ behavior:
|
JSON_VALUE_ARRAY
-
|
Extracts a JSON array of scalar values and converts it to a SQL
ARRAY<STRING> value.
+
|
LAX_BOOL
-
|
Attempts to convert a JSON value to a SQL BOOL value.
+
|
LAX_BOOL_ARRAY
-
|
- Attempts to convert a JSON value to a SQL ARRAY<BOOL> value. |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<BOOL> value.
+
+ |
@@ -20713,6 +22806,7 @@ behavior:
Attempts to convert a JSON value to a
SQL DOUBLE value.
+
|
@@ -20723,7 +22817,11 @@ behavior:
|
- Attempts to convert a JSON value to a SQL ARRAY<DOUBLE> value. |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<DOUBLE> value.
+
+ |
@@ -20733,7 +22831,11 @@ behavior:
- Attempts to convert a JSON value to a SQL FLOAT value. |
+
+ Attempts to convert a JSON value to a
+ SQL FLOAT value.
+
+ |
@@ -20743,167 +22845,199 @@ behavior:
- Attempts to convert a JSON value to a SQL ARRAY>FLOAT< value. |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY>FLOAT< value.
+
+ |
LAX_INT32
-
|
Attempts to convert a JSON value to a SQL INT32 value.
+
|
LAX_INT32_ARRAY
-
|
- Attempts to convert a JSON value to a SQL ARRAY<INT32> value. |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<INT32> value.
+
+ |
LAX_INT64
-
|
Attempts to convert a JSON value to a SQL INT64 value.
+
|
LAX_INT64_ARRAY
-
|
- Attempts to convert a JSON value to a SQL ARRAY<INT64> value. |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<INT64> value.
+
+ |
LAX_STRING
-
|
Attempts to convert a JSON value to a SQL STRING value.
+
|
LAX_STRING_ARRAY
-
|
- Attempts to convert a JSON value to a SQL ARRAY<STRING> value. |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<STRING> value.
+
+ |
LAX_UINT32
-
|
Attempts to convert a JSON value to a SQL UINT32 value.
+
|
LAX_UINT32_ARRAY
-
|
- Attempts to convert a JSON value to a SQL ARRAY<UINT32> value. |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<UINT32> value.
+
+ |
LAX_UINT64
-
|
Attempts to convert a JSON value to a SQL UINT64 value.
+
|
LAX_UINT64_ARRAY
-
|
- Attempts to convert a JSON value to a SQL ARRAY<UINT64> value. |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<UINT64> value.
+
+ |
PARSE_JSON
-
|
Converts a JSON-formatted STRING value to a
JSON value.
+
|
- STRING
-
+ | SAFE_TO_JSON
|
- Converts a JSON string to a SQL STRING value.
+ Similar to the `TO_JSON` function, but for each unsupported field in the
+ input argument, produces a JSON null instead of an error.
|
+ STRING (JSON)
+ |
- STRING_ARRAY
-
+ Converts a JSON string to a SQL STRING value.
+
|
+
+
+
+ STRING_ARRAY
+ |
Converts a JSON array of strings to a SQL ARRAY<STRING>
value.
+
|
TO_JSON
-
|
Converts a SQL value to a JSON value.
+
|
TO_JSON_STRING
-
|
Converts a SQL value to a JSON-formatted STRING value.
+
|
UINT32
-
|
Converts a JSON number to a SQL UINT32 value.
+
|
UINT32_ARRAY
-
|
- Converts a JSON number to a SQL ARRAY<UINT32> value. |
+
+ Converts a JSON number to a
+ SQL ARRAY<UINT32> value.
+
+ |
UINT64
-
|
Converts a JSON number to a SQL UINT64 value.
+
|
UINT64_ARRAY
-
|
- Converts a JSON number to a SQL ARRAY<UINT64> value. |
+
+ Converts a JSON number to a SQL ARRAY<UINT64> value.
+
+ |
@@ -27630,6 +29764,75 @@ SELECT PARSE_JSON('"red"') AS json_data;
*------------------------------*/
```
+### `SAFE_TO_JSON`
+
+```sql
+SAFE_TO_JSON(sql_value)
+```
+
+**Description**
+
+Similar to the `TO_JSON` function, but for each unsupported field in the
+input argument, produces a JSON null instead of an error.
+
+Arguments:
+
++ `sql_value`: The SQL value to convert to a JSON value. You can review the
+ ZetaSQL data types that this function supports and their
+ [JSON encodings][json-encodings].
+
+**Return type**
+
+`JSON`
+
+**Example**
+
+The following queries are functionally the same, except that `SAFE_TO_JSON`
+produces a JSON null instead of an error when a hypothetical unsupported
+data type is encountered:
+
+```sql
+-- Produces a JSON null.
+SELECT SAFE_TO_JSON(CAST(b'' AS UNSUPPORTED_TYPE)) as result;
+```
+
+```sql
+-- Produces an error.
+SELECT TO_JSON(CAST(b'' AS UNSUPPORTED_TYPE), stringify_wide_numbers=>TRUE) as result;
+```
+
+In the following query, the value for `ut` is ignored because the value is an
+unsupported type:
+
+```sql
+SELECT SAFE_TO_JSON(STRUCT(CAST(b'' AS UNSUPPORTED_TYPE) AS ut) AS result;
+
+/*--------------*
+ | result |
+ +--------------+
+ | {"ut": null} |
+ *--------------*/
+```
+
+The following array produces a JSON null instead of an error because the data
+type for the array is not supported.
+
+```sql
+SELECT SAFE_TO_JSON([
+ CAST(b'' AS UNSUPPORTED_TYPE),
+ CAST(b'' AS UNSUPPORTED_TYPE),
+ CAST(b'' AS UNSUPPORTED_TYPE),
+ ]) AS result;
+
+/*------------*
+ | result |
+ +------------+
+ | null |
+ *------------*/
+```
+
+[json-encodings]: #json_encodings
+
### `STRING`
@@ -28186,6 +30389,106 @@ SELECT UINT64_ARRAY(JSON '[null]') AS result; -- Throws an error
SELECT UINT64_ARRAY(JSON 'null') AS result; -- Throws an error
```
+### Supplemental materials
+
+### Differences between the JSON and JSON-formatted STRING types
+
+
+Many JSON functions accept two input types:
+
++ [`JSON`][JSON-type] type
++ `STRING` type
+
+The `STRING` version of the extraction functions behaves differently than the
+`JSON` version, mainly because `JSON` type values are always validated whereas
+JSON-formatted `STRING` type values are not.
+
+#### Non-validation of `STRING` inputs
+
+The following `STRING` is invalid JSON because it is missing a trailing `}`:
+
+```
+{"hello": "world"
+```
+
+The JSON function reads the input from the beginning and stops as soon as the
+field to extract is found, without reading the remainder of the input. A parsing
+error is not produced.
+
+With the `JSON` type, however, `JSON '{"hello": "world"'` returns a parsing
+error.
+
+For example:
+
+```sql
+SELECT JSON_VALUE('{"hello": "world"', "$.hello") AS hello;
+
+/*-------*
+ | hello |
+ +-------+
+ | world |
+ *-------*/
+```
+
+```sql
+SELECT JSON_VALUE(JSON '{"hello": "world"', "$.hello") AS hello;
+-- An error is returned: Invalid JSON literal: syntax error while parsing
+-- object - unexpected end of input; expected '}'
+```
+
+#### No strict validation of extracted values
+
+In the following examples, duplicated keys are not removed when using a
+JSON-formatted string. Similarly, keys order is preserved. For the `JSON`
+type, `JSON '{"key": 1, "key": 2}'` will result in `JSON '{"key":1}'` during
+parsing.
+
+```sql
+SELECT JSON_QUERY('{"key": 1, "key": 2}', "$") AS string;
+
+/*-------------------*
+ | string |
+ +-------------------+
+ | {"key":1,"key":2} |
+ *-------------------*/
+```
+
+```sql
+SELECT JSON_QUERY(JSON '{"key": 1, "key": 2}', "$") AS json;
+
+/*-----------*
+ | json |
+ +-----------+
+ | {"key":1} |
+ *-----------*/
+```
+
+#### JSON `null`
+
+When using a JSON-formatted `STRING` type in a JSON function, a JSON `null`
+value is extracted as a SQL `NULL` value.
+
+When using a JSON type in a JSON function, a JSON `null` value returns a JSON
+`null` value.
+
+```sql
+WITH t AS (
+ SELECT '{"name": null}' AS json_string, JSON '{"name": null}' AS json)
+SELECT JSON_QUERY(json_string, "$.name") AS name_string,
+ JSON_QUERY(json_string, "$.name") IS NULL AS name_string_is_null,
+ JSON_QUERY(json, "$.name") AS name_json,
+ JSON_QUERY(json, "$.name") IS NULL AS name_json_is_null
+FROM t;
+
+/*-------------+---------------------+-----------+-------------------*
+ | name_string | name_string_is_null | name_json | name_json_is_null |
+ +-------------+---------------------+-----------+-------------------+
+ | NULL | true | null | false |
+ *-------------+---------------------+-----------+-------------------*/
+```
+
+[JSON-type]: https://github.com/google/zetasql/blob/master/docs/data-types.md#json_type
+
### JSON encodings
@@ -28325,6 +30628,26 @@ The following SQL to JSON encodings are supported:
+
+ INTERVAL |
+ string |
+ SQL input: INTERVAL '10:20:30.52' HOUR TO SECOND
+ JSON output: "PT10H20M30.52S"
+
+ SQL input: INTERVAL 1 SECOND
+ JSON output: "PT1S"
+
+ INTERVAL -25 MONTH
+ JSON output: "P-2Y-1M"
+
+ INTERVAL '1 5:30' DAY TO MINUTE
+ JSON output: "P1DT5H30M"
+ |
+
+
+
+
+
NUMERIC
@@ -28639,6 +30962,54 @@ The following SQL to JSON encodings are supported:
+ |
+ GRAPH_ELEMENT |
+
+ object
+
+ The object can contain zero or more key-value pairs.
+ Each value is formatted according to its type.
+
+
+ For TO_JSON , graph
+ element (node or edge) objects are supported.
+
+
+ -
+ The graph element identifier is only valid within the scope of the
+ same query response and cannot be used to correlate entities across
+ different queries.
+
+ -
+ Field names that aren't valid UTF-8 might result in unparseable
+ JSON.
+
+ -
+ The result may include internal key-value pairs that are not defined
+ by the users.
+
+ -
+ The conversion can fail if the object contains values of unsupported
+ types.
+
+
+ |
+
+ SQL:
+
+GRAPH FinGraph
+MATCH (p:Person WHERE p.name = 'Dana')
+RETURN TO_JSON(p) AS dana_json;
+
+ JSON output (truncated):
+
+{"identifier":"ZGFuYQ==","kind":"node","labels":["Person"],"properties":{"id":2,"name":"Dana"}}
+ |
+
+
+
+
+
RANGE |
@@ -28781,108 +31152,6 @@ The JSONPath format supports these operators:
-### Differences between the JSON and JSON-formatted STRING types
-
-
-Many JSON functions accept two input types:
-
-+ [`JSON`][JSON-type] type
-+ `STRING` type
-
-The `STRING` version of the extraction functions behaves differently than the
-`JSON` version, mainly because `JSON` type values are always validated whereas
-JSON-formatted `STRING` type values are not.
-
-#### Non-validation of `STRING` inputs
-
-The following `STRING` is invalid JSON because it is missing a trailing `}`:
-
-```
-{"hello": "world"
-```
-
-The JSON function reads the input from the beginning and stops as soon as the
-field to extract is found, without reading the remainder of the input. A parsing
-error is not produced.
-
-With the `JSON` type, however, `JSON '{"hello": "world"'` returns a parsing
-error.
-
-For example:
-
-```sql
-SELECT JSON_VALUE('{"hello": "world"', "$.hello") AS hello;
-
-/*-------*
- | hello |
- +-------+
- | world |
- *-------*/
-```
-
-```sql
-SELECT JSON_VALUE(JSON '{"hello": "world"', "$.hello") AS hello;
--- An error is returned: Invalid JSON literal: syntax error while parsing
--- object - unexpected end of input; expected '}'
-```
-
-#### No strict validation of extracted values
-
-In the following examples, duplicated keys are not removed when using a
-JSON-formatted string. Similarly, keys order is preserved. For the `JSON`
-type, `JSON '{"key": 1, "key": 2}'` will result in `JSON '{"key":1}'` during
-parsing.
-
-```sql
-SELECT JSON_QUERY('{"key": 1, "key": 2}', "$") AS string;
-
-/*-------------------*
- | string |
- +-------------------+
- | {"key":1,"key":2} |
- *-------------------*/
-```
-
-```sql
-SELECT JSON_QUERY(JSON '{"key": 1, "key": 2}', "$") AS json;
-
-/*-----------*
- | json |
- +-----------+
- | {"key":1} |
- *-----------*/
-```
-
-#### JSON `null`
-
-When using a JSON-formatted `STRING` type in a JSON function, a JSON `null`
-value is extracted as a SQL `NULL` value.
-
-When using a JSON type in a JSON function, a JSON `null` value returns a JSON
-`null` value.
-
-```sql
-WITH t AS (
- SELECT '{"name": null}' AS json_string, JSON '{"name": null}' AS json)
-SELECT JSON_QUERY(json_string, "$.name") AS name_string,
- JSON_QUERY(json_string, "$.name") IS NULL AS name_string_is_null,
- JSON_QUERY(json, "$.name") AS name_json,
- JSON_QUERY(json, "$.name") IS NULL AS name_json_is_null
-FROM t;
-
-/*-------------+---------------------+-----------+-------------------*
- | name_string | name_string_is_null | name_json | name_json_is_null |
- +-------------+---------------------+-----------+-------------------+
- | NULL | true | null | false |
- *-------------+---------------------+-----------+-------------------*/
-```
-
-[JSONPath-format]: #JSONPath_format
-
-[JSON-type]: https://github.com/google/zetasql/blob/master/docs/data-types.md#json_type
-
-[JSONPath-mode]: #JSONPath_mode
-
## Mathematical functions
ZetaSQL supports mathematical functions.
@@ -29049,7 +31318,6 @@ All mathematical functions have the following behaviors:
|
ABS
-
|
Computes the absolute value of X .
@@ -29058,7 +31326,6 @@ All mathematical functions have the following behaviors:
|
ACOS
-
|
Computes the inverse cosine of X .
@@ -29067,7 +31334,6 @@ All mathematical functions have the following behaviors:
|
ACOSH
-
|
Computes the inverse hyperbolic cosine of X .
@@ -29076,7 +31342,6 @@ All mathematical functions have the following behaviors:
|
ASIN
-
|
Computes the inverse sine of X .
@@ -29085,7 +31350,6 @@ All mathematical functions have the following behaviors:
|
ASINH
-
|
Computes the inverse hyperbolic sine of X .
@@ -29094,7 +31358,6 @@ All mathematical functions have the following behaviors:
|
ATAN
-
|
Computes the inverse tangent of X .
@@ -29103,7 +31366,6 @@ All mathematical functions have the following behaviors:
|
ATAN2
-
|
Computes the inverse tangent of X/Y , using the signs of
@@ -29113,7 +31375,6 @@ All mathematical functions have the following behaviors:
|
ATANH
-
|
Computes the inverse hyperbolic tangent of X .
@@ -29121,8 +31382,30 @@ All mathematical functions have the following behaviors:
|
- CBRT
+ | AVG
+ |
+
+ Gets the average of non-NULL values.
+ For more information, see Aggregate functions.
+ |
+
+
+
+ AVG (Differential Privacy)
+ |
+
+ DIFFERENTIAL_PRIVACY -supported AVG .
+ Gets the differentially-private average of non-NULL ,
+ non-NaN values in a query with a
+ DIFFERENTIAL_PRIVACY clause.
+
For more information, see Differential privacy functions.
+
+ |
+
+
+
+ CBRT
|
Computes the cube root of X .
@@ -29131,7 +31414,6 @@ All mathematical functions have the following behaviors:
|
CEIL
-
|
Gets the smallest integral value that is not less than X .
@@ -29140,7 +31422,6 @@ All mathematical functions have the following behaviors:
|
CEILING
-
|
Synonym of CEIL .
@@ -29149,7 +31430,6 @@ All mathematical functions have the following behaviors:
|
COS
-
|
Computes the cosine of X .
@@ -29158,7 +31438,6 @@ All mathematical functions have the following behaviors:
|
COSH
-
|
Computes the hyperbolic cosine of X .
@@ -29167,14 +31446,12 @@ All mathematical functions have the following behaviors:
|
COSINE_DISTANCE
-
|
Computes the cosine distance between two vectors. |
COT
-
|
Computes the cotangent of X .
@@ -29183,7 +31460,6 @@ All mathematical functions have the following behaviors:
|
COTH
-
|
Computes the hyperbolic cotangent of X .
@@ -29192,7 +31468,6 @@ All mathematical functions have the following behaviors:
|
CSC
-
|
Computes the cosecant of X .
@@ -29201,7 +31476,6 @@ All mathematical functions have the following behaviors:
|
CSCH
-
|
Computes the hyperbolic cosecant of X .
@@ -29210,7 +31484,6 @@ All mathematical functions have the following behaviors:
|
DIV
-
|
Divides integer X by integer Y .
@@ -29219,7 +31492,6 @@ All mathematical functions have the following behaviors:
|
EXP
-
|
Computes e to the power of X .
@@ -29228,14 +31500,12 @@ All mathematical functions have the following behaviors:
|
EUCLIDEAN_DISTANCE
-
|
Computes the Euclidean distance between two vectors. |
FLOOR
-
|
Gets the largest integral value that is not greater than X .
@@ -29244,7 +31514,6 @@ All mathematical functions have the following behaviors:
|
GREATEST
-
|
Gets the greatest value among X1,...,XN .
@@ -29253,7 +31522,6 @@ All mathematical functions have the following behaviors:
|
IEEE_DIVIDE
-
|
Divides X by Y , but does not generate errors for
@@ -29263,7 +31531,6 @@ All mathematical functions have the following behaviors:
|
IS_INF
-
|
Checks if X is positive or negative infinity.
@@ -29272,7 +31539,6 @@ All mathematical functions have the following behaviors:
|
IS_NAN
-
|
Checks if X is a NaN value.
@@ -29281,7 +31547,6 @@ All mathematical functions have the following behaviors:
|
LEAST
-
|
Gets the least value among X1,...,XN .
@@ -29290,7 +31555,6 @@ All mathematical functions have the following behaviors:
|
LN
-
|
Computes the natural logarithm of X .
@@ -29299,7 +31563,6 @@ All mathematical functions have the following behaviors:
|
LOG
-
|
Computes the natural logarithm of X or the logarithm of
@@ -29309,7 +31572,6 @@ All mathematical functions have the following behaviors:
|
LOG10
-
|
Computes the natural logarithm of X to base 10.
@@ -29317,8 +31579,17 @@ All mathematical functions have the following behaviors:
|
- MOD
+ | MAX
+ |
+
+ Gets the maximum non-NULL value.
+ For more information, see Aggregate functions.
+ |
+
+
+
+ MOD
|
Gets the remainder of the division of X by Y .
@@ -29327,7 +31598,6 @@ All mathematical functions have the following behaviors:
|
PI
-
|
Produces the mathematical constant π as a
@@ -29337,7 +31607,6 @@ All mathematical functions have the following behaviors:
|
PI_BIGNUMERIC
-
|
Produces the mathematical constant π as a BIGNUMERIC value.
@@ -29346,7 +31615,6 @@ All mathematical functions have the following behaviors:
|
PI_NUMERIC
-
|
Produces the mathematical constant π as a NUMERIC value.
@@ -29355,7 +31623,6 @@ All mathematical functions have the following behaviors:
|
POW
-
|
Produces the value of X raised to the power of Y .
@@ -29364,7 +31631,6 @@ All mathematical functions have the following behaviors:
|
POWER
-
|
Synonym of POW .
@@ -29373,7 +31639,6 @@ All mathematical functions have the following behaviors:
|
RAND
-
|
Generates a pseudo-random value of type
@@ -29384,17 +31649,16 @@ All mathematical functions have the following behaviors:
|
RANGE_BUCKET
-
|
Scans through a sorted array and returns the 0-based position
of a point's upper bound.
+
|
ROUND
-
|
Rounds X to the nearest integer or rounds X
@@ -29404,7 +31668,6 @@ All mathematical functions have the following behaviors:
|
SAFE_ADD
-
|
Equivalent to the addition operator (X + Y ), but returns
@@ -29414,7 +31677,6 @@ All mathematical functions have the following behaviors:
|
SAFE_DIVIDE
-
|
Equivalent to the division operator (X / Y ), but returns
@@ -29424,7 +31686,6 @@ All mathematical functions have the following behaviors:
|
SAFE_MULTIPLY
-
|
Equivalent to the multiplication operator (X * Y ),
@@ -29434,7 +31695,6 @@ All mathematical functions have the following behaviors:
|
SAFE_NEGATE
-
|
Equivalent to the unary minus operator (-X ), but returns
@@ -29444,7 +31704,6 @@ All mathematical functions have the following behaviors:
|
SAFE_SUBTRACT
-
|
Equivalent to the subtraction operator (X - Y ), but
@@ -29454,7 +31713,6 @@ All mathematical functions have the following behaviors:
|
SEC
-
|
Computes the secant of X .
@@ -29463,7 +31721,6 @@ All mathematical functions have the following behaviors:
|
SECH
-
|
Computes the hyperbolic secant of X .
@@ -29472,7 +31729,6 @@ All mathematical functions have the following behaviors:
|
SIGN
-
|
Produces -1 , 0, or +1 for negative, zero, and positive arguments
@@ -29482,7 +31738,6 @@ All mathematical functions have the following behaviors:
|
SIN
-
|
Computes the sine of X .
@@ -29491,7 +31746,6 @@ All mathematical functions have the following behaviors:
|
SINH
-
|
Computes the hyperbolic sine of X .
@@ -29500,7 +31754,6 @@ All mathematical functions have the following behaviors:
|
SQRT
-
|
Computes the square root of X .
@@ -29508,8 +31761,30 @@ All mathematical functions have the following behaviors:
|
- TAN
+ | SUM
+ |
+
+ Gets the sum of non-NULL values.
+ For more information, see Aggregate functions.
+
+ |
+
+
+ SUM (Differential Privacy)
+ |
+
+ DIFFERENTIAL_PRIVACY -supported SUM .
+ Gets the differentially-private sum of non-NULL ,
+ non-NaN values in a query with a
+ DIFFERENTIAL_PRIVACY clause.
+
For more information, see Differential privacy functions.
+
+ |
+
+
+
+ TAN
|
Computes the tangent of X .
@@ -29518,7 +31793,6 @@ All mathematical functions have the following behaviors:
|
TANH
-
|
Computes the hyperbolic tangent of X .
@@ -29527,7 +31801,6 @@ All mathematical functions have the following behaviors:
|
TRUNC
-
|
Rounds a number like ROUND(X) or ROUND(X, N) ,
@@ -30200,7 +32473,7 @@ Computes the [cosine distance][wiki-cosine-distance] between two vectors.
```sql
-- sparse vector ARRAY>
- [(1, 10.0), (2: 30.0), (5, 40.0)]
+ [(1, 10.0), (2, 30.0), (5, 40.0)]
```
```sql
@@ -30212,11 +32485,11 @@ Computes the [cosine distance][wiki-cosine-distance] between two vectors.
particular order. The following sparse vectors are equivalent:
```sql
- [('a', 10.0), ('b': 30.0), ('d': 40.0)]
+ [('a', 10.0), ('b', 30.0), ('d', 40.0)]
```
```sql
- [('d': 40.0), ('a', 10.0), ('b': 30.0)]
+ [('d', 40.0), ('a', 10.0), ('b', 30.0)]
```
+ Both non-sparse vectors
in this function must share the same dimensions, and if they don't, an error
@@ -30759,7 +33032,7 @@ Computes the [Euclidean distance][wiki-euclidean-distance] between two vectors.
```sql
-- sparse vector ARRAY>
- [(1, 10.0), (2: 30.0), (5, 40.0)]
+ [(1, 10.0), (2, 30.0), (5, 40.0)]
```
```sql
@@ -30771,11 +33044,11 @@ Computes the [Euclidean distance][wiki-euclidean-distance] between two vectors.
particular order. The following sparse vectors are equivalent:
```sql
- [('a', 10.0), ('b': 30.0), ('d': 40.0)]
+ [('a', 10.0), ('b', 30.0), ('d', 40.0)]
```
```sql
- [('d': 40.0), ('a', 10.0), ('b': 30.0)]
+ [('d', 40.0), ('a', 10.0), ('b', 30.0)]
```
+ Both non-sparse vectors
in this function must share the same dimensions, and if they don't, an error
@@ -31181,7 +33454,7 @@ equal to zero.
+inf |
|
- X < 0 |
+ X <= 0 |
Error |
@@ -32585,65 +34858,65 @@ For all navigation functions, the result data type is the same type as
FIRST_VALUE
-
|
Gets a value for the first row in the current window frame.
+
|
LAG
-
|
Gets a value for a preceding row.
+
|
LAST_VALUE
-
|
Gets a value for the last row in the current window frame.
+
|
LEAD
-
|
Gets a value for a subsequent row.
+
|
NTH_VALUE
-
|
Gets a value for the Nth row of the current window frame.
+
|
PERCENTILE_CONT
-
|
Computes the specified percentile for a value, using
linear interpolation.
+
|
PERCENTILE_DISC
-
|
Computes the specified percentile for a discrete value.
+
|
@@ -33483,7 +35756,6 @@ ZetaSQL supports the following Net functions.
NET.FORMAT_IP
-
|
(Deprecated) Converts an
@@ -33494,7 +35766,6 @@ ZetaSQL supports the following Net functions.
|
NET.FORMAT_PACKED_IP
-
|
(Deprecated) Converts an
@@ -33505,7 +35776,6 @@ ZetaSQL supports the following Net functions.
|
NET.HOST
-
|
Gets the hostname from a URL.
@@ -33514,7 +35784,6 @@ ZetaSQL supports the following Net functions.
|
NET.IP_FROM_STRING
-
|
Converts an IPv4 or IPv6 address from a STRING value to
@@ -33524,7 +35793,6 @@ ZetaSQL supports the following Net functions.
|
NET.IP_IN_NET
-
|
Checks if an IP address is in a subnet.
@@ -33533,7 +35801,6 @@ ZetaSQL supports the following Net functions.
|
NET.IP_NET_MASK
-
|
Gets a network mask.
@@ -33542,7 +35809,6 @@ ZetaSQL supports the following Net functions.
|
NET.IP_TO_STRING
-
|
Converts an IPv4 or IPv6 address from a BYTES value in
@@ -33552,7 +35818,6 @@ ZetaSQL supports the following Net functions.
|
NET.IP_TRUNC
-
|
Converts a BYTES IPv4 or IPv6 address in
@@ -33562,7 +35827,6 @@ ZetaSQL supports the following Net functions.
|
NET.IPV4_FROM_INT64
-
|
Converts an IPv4 address from an INT64 value to a
@@ -33572,7 +35836,6 @@ ZetaSQL supports the following Net functions.
|
NET.IPV4_TO_INT64
-
|
Converts an IPv4 address from a BYTES value in network
@@ -33582,7 +35845,6 @@ ZetaSQL supports the following Net functions.
|
NET.MAKE_NET
-
|
Takes a IPv4 or IPv6 address and the prefix length, and produces a
@@ -33592,7 +35854,6 @@ ZetaSQL supports the following Net functions.
|
NET.PARSE_IP
-
|
(Deprecated) Converts an
@@ -33603,7 +35864,6 @@ ZetaSQL supports the following Net functions.
|
NET.PARSE_PACKED_IP
-
|
(Deprecated) Converts an
@@ -33614,7 +35874,6 @@ ZetaSQL supports the following Net functions.
|
NET.PUBLIC_SUFFIX
-
|
Gets the public suffix from a URL.
@@ -33623,7 +35882,6 @@ ZetaSQL supports the following Net functions.
|
NET.REG_DOMAIN
-
|
Gets the registered or registrable domain from a URL.
@@ -33632,7 +35890,6 @@ ZetaSQL supports the following Net functions.
|
NET.SAFE_IP_FROM_STRING
-
|
Similar to the NET.IP_FROM_STRING , but returns
@@ -34357,56 +36614,56 @@ numbering functions.
|
CUME_DIST
-
|
Gets the cumulative distribution (relative position (0,1]) of each row
within a window.
+
|
DENSE_RANK
-
|
Gets the dense rank (1-based, no gaps) of each row within a window.
+
|
NTILE
-
|
Gets the quantile bucket number (1-based) of each row within a window.
+
|
PERCENT_RANK
-
|
Gets the percentile rank (from 0 to 1) of each row within a window.
+
|
RANK
-
|
Gets the rank (1-based) of each row within a window.
+
|
ROW_NUMBER
-
|
Gets the sequential row number (1-based) of each row within a window.
+
|
@@ -34937,7 +37194,6 @@ ZetaSQL supports the following protocol buffer functions.
ENUM_VALUE_DESCRIPTOR_PROTO
-
|
Gets the enum value descriptor proto
@@ -34947,7 +37203,6 @@ ZetaSQL supports the following protocol buffer functions.
|
EXTRACT
-
|
Extracts a value or metadata from a protocol buffer.
@@ -34956,7 +37211,6 @@ ZetaSQL supports the following protocol buffer functions.
|
FILTER_FIELDS
-
|
Removed unwanted fields from a protocol buffer.
@@ -34965,16 +37219,15 @@ ZetaSQL supports the following protocol buffer functions.
|
FROM_PROTO
-
|
Converts a protocol buffer value into ZetaSQL value.
+
|
PROTO_DEFAULT_IF_NULL
-
|
Produces the default protocol buffer field value if the
@@ -34985,7 +37238,6 @@ ZetaSQL supports the following protocol buffer functions.
|
PROTO_MAP_CONTAINS_KEY
-
|
Checks if a protocol buffer map field contains a given key.
@@ -34994,7 +37246,6 @@ ZetaSQL supports the following protocol buffer functions.
|
PROTO_MODIFY_MAP
-
|
Modifies a protocol buffer map field.
@@ -35003,7 +37254,6 @@ ZetaSQL supports the following protocol buffer functions.
|
REPLACE_FIELDS
-
|
Replaces the values in one or more protocol buffer fields.
@@ -35012,10 +37262,10 @@ ZetaSQL supports the following protocol buffer functions.
|
TO_PROTO
-
|
Converts a ZetaSQL value into a protocol buffer value.
+
|
@@ -35882,12 +38132,12 @@ Returns a copy of a protocol buffer, replacing the values in one or more fields.
`field_path` is a delimited path to the protocol buffer field that is replaced.
When using `replace_fields`, the following limitations apply:
-+ If `value` is `NULL`, it un-sets `field_path` or returns an error if the last
- component of `field_path` is a required field.
-+ Replacing subfields will succeed only if the message containing the field is
- set.
-+ Replacing subfields of repeated field is not allowed.
-+ A repeated field can be replaced with an `ARRAY` value.
++ If `value` is `NULL`, it un-sets `field_path` or returns an error if the
+ last component of `field_path` is a required field.
++ Replacing subfields will succeed only if the message containing the field is
+ set.
++ Replacing subfields of repeated field isn't allowed.
++ A repeated field can be replaced with an `ARRAY` value.
**Return type**
@@ -36148,14 +38398,16 @@ ZetaSQL supports the following range functions.
GENERATE_RANGE_ARRAY
-
|
- Splits a range into an array of subranges. |
+
+ Splits a range into an array of subranges.
+ For more information, see Range functions.
+
+ |
RANGE
-
|
Constructs a range of DATE , DATETIME ,
@@ -36164,8 +38416,18 @@ ZetaSQL supports the following range functions.
|
- RANGE_CONTAINS
+ | RANGE_BUCKET
+ |
+
+ Scans through a sorted array and returns the 0-based position
+ of a point's upper bound.
+ For more information, see Mathematical functions.
+ |
+
+
+
+ RANGE_CONTAINS
|
Signature 1: Checks if one range is in another range.
@@ -36176,35 +38438,33 @@ ZetaSQL supports the following range functions.
|
RANGE_END
-
|
Gets the upper bound of a range. |
RANGE_INTERSECT
-
|
Gets a segment of two ranges that intersect. |
RANGE_OVERLAPS
-
|
Checks if two ranges overlap. |
RANGE_SESSIONIZE
-
|
- Produces a table of sessionized ranges. |
+
+ Produces a table of sessionized ranges.
+
+ |
RANGE_START
-
|
Gets the lower bound of a range. |
@@ -36985,7 +39245,6 @@ ZetaSQL supports the following security functions.
SESSION_USER
-
|
Get the email address or principal identifier of the user that is running
@@ -37047,82 +39306,82 @@ To learn about the syntax for aggregate function calls, see
|
CORR
-
|
Computes the Pearson coefficient of correlation of a set of number pairs.
+
|
COVAR_POP
-
|
Computes the population covariance of a set of number pairs.
+
|
COVAR_SAMP
-
|
Computes the sample covariance of a set of number pairs.
+
|
STDDEV
-
|
An alias of the STDDEV_SAMP function.
+
|
STDDEV_POP
-
|
Computes the population (biased) standard deviation of the values.
+
|
STDDEV_SAMP
-
|
Computes the sample (unbiased) standard deviation of the values.
+
|
VAR_POP
-
|
Computes the population (biased) variance of the values.
+
|
VAR_SAMP
-
|
Computes the sample (unbiased) variance of the values.
+
|
VARIANCE
-
|
An alias of VAR_SAMP .
+
|
@@ -38145,8 +40404,18 @@ canonical equivalence.
- ASCII
+ | ARRAY_TO_STRING
+ |
+
+ Produces a concatenation of the elements in an array as a
+ STRING value.
+ For more information, see Array functions.
+
+ |
+
+
+ ASCII
|
Gets the ASCII code for the first character or byte in a STRING
@@ -38156,7 +40425,6 @@ canonical equivalence.
|
BYTE_LENGTH
-
|
Gets the number of BYTES in a STRING or
@@ -38166,7 +40434,6 @@ canonical equivalence.
|
CHAR_LENGTH
-
|
Gets the number of characters in a STRING value.
@@ -38175,7 +40442,6 @@ canonical equivalence.
|
CHARACTER_LENGTH
-
|
Synonym for CHAR_LENGTH .
@@ -38184,36 +40450,35 @@ canonical equivalence.
|
CHR
-
|
Converts a Unicode code point to a character.
+
|
CODE_POINTS_TO_BYTES
-
|
Converts an array of extended ASCII code points to a
BYTES value.
+
|
CODE_POINTS_TO_STRING
-
|
Converts an array of extended ASCII code points to a
STRING value.
+
|
COLLATE
-
|
Combines a STRING value and a collation specification into a
@@ -38223,7 +40488,6 @@ canonical equivalence.
|
CONCAT
-
|
Concatenates one or more STRING or BYTES
@@ -38233,7 +40497,6 @@ canonical equivalence.
|
EDIT_DISTANCE
-
|
Computes the Levenshtein distance between two STRING
@@ -38243,7 +40506,6 @@ canonical equivalence.
|
ENDS_WITH
-
|
Checks if a STRING or BYTES value is the suffix
@@ -38253,7 +40515,6 @@ canonical equivalence.
|
FORMAT
-
|
Formats data and produces the results as a STRING value.
@@ -38262,37 +40523,37 @@ canonical equivalence.
|
FROM_BASE32
-
|
Converts a base32-encoded STRING value into a
BYTES value.
+
|
FROM_BASE64
-
|
Converts a base64-encoded STRING value into a
BYTES value.
+
+
|
FROM_HEX
-
|
Converts a hexadecimal-encoded STRING value into a
BYTES value.
+
|
INITCAP
-
|
Formats a STRING as proper case, which means that the first
@@ -38302,7 +40563,6 @@ canonical equivalence.
|
INSTR
-
|
Finds the position of a subvalue inside another value, optionally starting
@@ -38311,8 +40571,28 @@ canonical equivalence.
|
- LEFT
+ | LAX_STRING
+ |
+
+ Attempts to convert a JSON value to a SQL STRING value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ LAX_STRING_ARRAY
+ |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<STRING> value.
+ For more information, see JSON functions.
+
+ |
+
+
+ LEFT
|
Gets the specified leftmost portion from a STRING or
@@ -38322,7 +40602,6 @@ canonical equivalence.
|
LENGTH
-
|
Gets the length of a STRING or BYTES value.
@@ -38331,7 +40610,6 @@ canonical equivalence.
|
LOWER
-
|
Formats alphabetic characters in a STRING value as
@@ -38344,7 +40622,6 @@ canonical equivalence.
|
LPAD
-
|
Prepends a STRING or BYTES value with a pattern.
@@ -38353,7 +40630,6 @@ canonical equivalence.
|
LTRIM
-
|
Identical to the TRIM function, but only removes leading
@@ -38363,7 +40639,6 @@ canonical equivalence.
|
NORMALIZE
-
|
Case-sensitively normalizes the characters in a STRING value.
@@ -38372,7 +40647,6 @@ canonical equivalence.
|
NORMALIZE_AND_CASEFOLD
-
|
Case-insensitively normalizes the characters in a STRING value.
@@ -38381,7 +40655,6 @@ canonical equivalence.
|
OCTET_LENGTH
-
|
Alias for BYTE_LENGTH .
@@ -38390,7 +40663,6 @@ canonical equivalence.
|
REGEXP_CONTAINS
-
|
Checks if a value is a partial match for a regular expression.
@@ -38399,7 +40671,6 @@ canonical equivalence.
|
REGEXP_EXTRACT
-
|
Produces a substring that matches a regular expression.
@@ -38408,7 +40679,6 @@ canonical equivalence.
|
REGEXP_EXTRACT_ALL
-
|
Produces an array of all substrings that match a
@@ -38418,7 +40688,6 @@ canonical equivalence.
|
REGEXP_INSTR
-
|
Finds the position of a regular expression match in a value, optionally
@@ -38428,7 +40697,6 @@ canonical equivalence.
|
REGEXP_MATCH
-
|
(Deprecated) Checks if a value is a full match for a regular expression.
@@ -38437,7 +40705,6 @@ canonical equivalence.
|
REGEXP_REPLACE
-
|
Produces a STRING value where all substrings that match a
@@ -38447,7 +40714,6 @@ canonical equivalence.
|
REGEXP_SUBSTR
-
|
Synonym for REGEXP_EXTRACT .
@@ -38456,7 +40722,6 @@ canonical equivalence.
|
REPEAT
-
|
Produces a STRING or BYTES value that consists of
@@ -38466,7 +40731,6 @@ canonical equivalence.
|
REPLACE
-
|
Replaces all occurrences of a pattern with another pattern in a
@@ -38476,7 +40740,6 @@ canonical equivalence.
|
REVERSE
-
|
Reverses a STRING or BYTES value.
@@ -38485,7 +40748,6 @@ canonical equivalence.
|
RIGHT
-
|
Gets the specified rightmost portion from a STRING or
@@ -38495,7 +40757,6 @@ canonical equivalence.
|
RPAD
-
|
Appends a STRING or BYTES value with a pattern.
@@ -38504,7 +40765,6 @@ canonical equivalence.
|
RTRIM
-
|
Identical to the TRIM function, but only removes trailing
@@ -38514,18 +40774,17 @@ canonical equivalence.
|
SAFE_CONVERT_BYTES_TO_STRING
-
|
Converts a BYTES value to a STRING value and
replace any invalid UTF-8 characters with the Unicode replacement character,
U+FFFD .
+
|
SOUNDEX
-
|
Gets the Soundex codes for words in a STRING value.
@@ -38534,7 +40793,6 @@ canonical equivalence.
|
SPLIT
-
|
Splits a STRING or BYTES value, using a delimiter.
@@ -38542,8 +40800,17 @@ canonical equivalence.
|
- STARTS_WITH
+ | SPLIT_SUBSTR
+ |
+
+ Returns the substring from an input string that's determined by a delimiter, a
+ location that indicates the first split of the substring to return, and the
+ number of splits to include.
+ |
+
+
+ STARTS_WITH
|
Checks if a STRING or BYTES value is a
@@ -38552,8 +40819,49 @@ canonical equivalence.
|
- STRPOS
+ | STRING (JSON)
+ |
+
+ Converts a JSON string to a SQL STRING value.
+ For more information, see JSON functions.
+ |
+
+
+
+ STRING_ARRAY
+ |
+
+ Converts a JSON array of strings to a SQL ARRAY<STRING>
+ value.
+ For more information, see JSON functions.
+
+ |
+
+
+
+ STRING (Timestamp)
+ |
+
+ Converts a TIMESTAMP value to a STRING value.
+ For more information, see Timestamp functions.
+
+ |
+
+
+
+ STRING_AGG
+ |
+
+ Concatenates non-NULL STRING or
+ BYTES values.
+ For more information, see Aggregate functions.
+
+ |
+
+
+
+ STRPOS
|
Finds the position of the first occurrence of a subvalue inside another
@@ -38563,7 +40871,6 @@ canonical equivalence.
|
SUBSTR
-
|
Gets a portion of a STRING or BYTES value.
@@ -38572,54 +40879,52 @@ canonical equivalence.
|
SUBSTRING
-
|
Alias for SUBSTR |
TO_BASE32
-
|
Converts a BYTES value to a
base32-encoded STRING value.
+
|
TO_BASE64
-
|
Converts a BYTES value to a
base64-encoded STRING value.
+
|
TO_CODE_POINTS
-
|
Converts a STRING or BYTES value into an array of
extended ASCII code points.
+
|
TO_HEX
-
|
Converts a BYTES value to a
hexadecimal STRING value.
+
|
TRANSLATE
-
|
Within a value, replaces each source character with the corresponding
@@ -38629,7 +40934,6 @@ canonical equivalence.
|
TRIM
-
|
Removes the specified leading and trailing Unicode code points or bytes
@@ -38639,7 +40943,6 @@ canonical equivalence.
|
UNICODE
-
|
Gets the Unicode code point for the first character in a value.
@@ -38648,7 +40951,6 @@ canonical equivalence.
|
UPPER
-
|
Formats alphabetic characters in a STRING value as
@@ -41732,7 +44034,9 @@ SPLIT(value[, delimiter])
**Description**
-Splits `value` using the `delimiter` argument.
+Splits a `STRING` or `BYTES` value, using a delimiter. The `delimiter` argument
+must be a literal character or sequence of characters. You can't split with a
+regular expression.
For `STRING`, the default delimiter is the comma `,`.
@@ -41775,6 +44079,206 @@ FROM letters;
*----------------------*/
```
+### `SPLIT_SUBSTR`
+
+```sql
+SPLIT_SUBSTR(value, delimiter, start_split[, count])
+```
+
+**Description**
+
+Returns a substring from an input `STRING` that's determined by a delimiter, a
+location that indicates the first split of the substring to return, and the
+number of splits to include in the returned substring.
+
+The `value` argument is the supplied `STRING` value from which a substring is
+returned.
+
+The `delimiter` argument is the delimiter used to split the input `STRING`. It
+must be a literal character or sequence of characters.
+
++ The `delimiter` argument can't be a regular expression.
++ Delimiter matching is from left to right.
++ If the delimiter is a sequence of characters, then two instances of the
+ delimiter in the input string can't overlap. For example, if the delimiter is
+ `**`, then the delimiters in the string `aa***bb***cc` are:
+ + The first two asterisks after `aa`.
+ + The first two asterisks after `bb`.
+
+The `start_split` argument is an integer that specifies the first split of the
+substring to return.
+
++ If `start_split` is `1`, then the returned substring starts from the first
+ split.
++ If `start_split` is `0` or less than the negative of the number of splits,
+ then `start_split` is treated as if it's `1` and returns a substring that
+ starts with the first split.
++ If `start_split` is greater than the number of splits, then an empty string is
+ returned.
++ If `start_split` is negative, then the splits are counted from the end of the
+ input string. If `start_split` is `-1`, then the last split in the input
+ string is returned.
+
+The optional `count` argument is an integer that specifies the maximum number
+of splits to include in the returned substring.
+
++ If `count` isn't specified, then the substring from the `start_split`
+ position to the end of the input string is returned.
++ If `count` is `0`, an empty string is returned.
++ If `count` is negative, an error is returned.
++ If the sum of `count` plus `start_split` is greater than the number of splits,
+ then a substring from `start_split` to the end of the input string is
+ returned.
+
+This function supports specifying [collation][collation].
+
+[collation]: https://github.com/google/zetasql/blob/master/docs/collation-concepts.md#collate_about
+
+**Return type**
+
+`STRING`
+
+**Examples**
+
+The following example returns an empty string because `count` is `0`:
+
+```sql
+SELECT SPLIT_SUBSTR("www.abc.xyz.com", ".", 1, 0) AS example
+
+/*---------*
+ | example |
+ +---------+
+ | |
+ *---------*/
+```
+
+The following example returns two splits starting with the first split:
+
+```sql
+SELECT SPLIT_SUBSTR("www.abc.xyz.com", ".", 1, 2) AS example
+
+/*---------*
+ | example |
+ +---------+
+ | www.abc |
+ *---------*/
+```
+
+The following example returns one split starting with the first split:
+
+```sql
+SELECT SPLIT_SUBSTR("www.abc.xyz.com", ".", 1, 1) AS example
+
+/*---------*
+ | example |
+ +---------+
+ | www |
+ *---------*/
+```
+
+The following example returns splits from the right because `start_split` is a
+negative value:
+
+```sql
+SELECT SPLIT_SUBSTR("www.abc.xyz.com", ".", -1, 1) AS example
+
+/*---------*
+ | example |
+ +---------+
+ | com |
+ *---------*/
+```
+
+The following example returns a substring with three splits, starting with the
+first split:
+
+```sql
+SELECT SPLIT_SUBSTR("www.abc.xyz.com", ".", 1, 3) AS example
+
+/*-------------*
+ | example |
+ +-------------+
+ | www.abc.xyz |
+ *------------*/
+```
+
+If `start_split` is zero, then it's treated as if it's `1`. The following
+example returns three substrings starting with the first split:
+
+```sql
+SELECT SPLIT_SUBSTR("www.abc.xyz.com", ".", 0, 3) AS example
+
+/*-------------*
+ | example |
+ +-------------+
+ | www.abc.xyz |
+ *------------*/
+```
+
+If `start_split` is greater than the number of splits, then an empty string is
+returned:
+
+```sql
+SELECT SPLIT_SUBSTR("www.abc.xyz.com", ".", 5, 3) AS example
+
+/*---------*
+ | example |
+ +---------+
+ | |
+ *--------*/
+```
+
+In the following example, the `start_split` value (`-5`) is less than the
+negative of the number of splits (`-4`), so `start_split` is treated as `1`:
+
+```sql
+SELECT SPLIT_SUBSTR("www.abc.xyz.com", ".", -5, 3) AS example
+
+/*-------------*
+ | example |
+ +-------------+
+ | www.abc.xyz |
+ *------------*/
+```
+
+In the following example, the substring from `start_split` to the end of the
+string is returned because `count` isn't specified:
+
+```sql
+SELECT SPLIT_SUBSTR("www.abc.xyz.com", ".", 3) AS example
+
+/*---------*
+ | example |
+ +---------+
+ | xyz.com |
+ *--------*/
+```
+
+The following two examples demonstrate how `SPLIT_SUBSTR` works with a
+multi-character delimiter that has overlapping matches in the input string. In
+each example, the input string contains instances of three asterisks in a row
+(`***`) and the delimiter is two asterisks (`**`).
+
+```sql
+SELECT SPLIT_SUBSTR('aaa***bbb***ccc', '**', 1, 2) AS example
+
+/*-----------*
+ | example |
+ +-----------+
+ | aaa***bbb |
+ *----------*/
+```
+
+```sql
+SELECT SPLIT_SUBSTR('aaa***bbb***ccc', '**', 2, 2) AS example
+
+/*------------*
+ | example |
+ +------------+
+ | *bbb***ccc |
+ *-----------*/
+```
+
### `STARTS_WITH`
```sql
@@ -42414,7 +44918,6 @@ ZetaSQL supports the following time functions.
|
CURRENT_TIME
-
|
Returns the current time as a TIME value.
@@ -42423,7 +44926,6 @@ ZetaSQL supports the following time functions.
|
EXTRACT
-
|
Extracts part of a TIME value.
@@ -42432,7 +44934,6 @@ ZetaSQL supports the following time functions.
|
FORMAT_TIME
-
|
Formats a TIME value according to the specified format string.
@@ -42441,16 +44942,15 @@ ZetaSQL supports the following time functions.
|
PARSE_TIME
-
|
Converts a STRING value to a TIME value.
+
|
TIME
-
|
Constructs a TIME value.
@@ -42459,7 +44959,6 @@ ZetaSQL supports the following time functions.
|
TIME_ADD
-
|
Adds a specified time interval to a TIME value.
@@ -42468,7 +44967,6 @@ ZetaSQL supports the following time functions.
|
TIME_DIFF
-
|
Gets the number of unit boundaries between two TIME values at
@@ -42478,7 +44976,6 @@ ZetaSQL supports the following time functions.
|
TIME_SUB
-
|
Subtracts a specified time interval from a TIME value.
@@ -42487,10 +44984,9 @@ ZetaSQL supports the following time functions.
|
TIME_TRUNC
-
|
- Truncates a TIME value.
+ Truncates a TIME value at a particular granularity.
|
@@ -42586,13 +45082,18 @@ SELECT EXTRACT(HOUR FROM TIME "15:30:00") as hour;
### `FORMAT_TIME`
```sql
-FORMAT_TIME(format_string, time_object)
+FORMAT_TIME(format_string, time_expr)
```
**Description**
-Formats a `TIME` object according to the specified `format_string`. See
-[Supported Format Elements For TIME][time-format-elements]
-for a list of format elements that this function supports.
+
+Formats a `TIME` value according to the specified format string.
+
+**Definitions**
+
++ `format_string`: A `STRING` value that contains the
+ [format elements][time-format-elements] to use with `time_expr`.
++ `time_expr`: A `TIME` value that represents the time to format.
**Return Data Type**
@@ -42620,14 +45121,19 @@ PARSE_TIME(format_string, time_string)
**Description**
-Converts a [string representation of time][time-format] to a
-`TIME` object.
+Converts a `STRING` value to a `TIME` value.
-`format_string` contains the [format elements][time-format-elements]
-that define how `time_string` is formatted. Each element in
-`time_string` must have a corresponding element in `format_string`. The
-location of each element in `format_string` must match the location of
-each element in `time_string`.
+**Definitions**
+
++ `format_string`: A `STRING` value that contains the
+ [format elements][time-format-elements] to use with `time_string`.
++ `time_string`: A `STRING` value that represents the time to parse.
+
+**Details**
+
+Each element in `time_string` must have a corresponding element in
+`format_string`. The location of each element in `format_string` must match the
+location of each element in `time_string`.
```sql
-- This works because elements on both sides match.
@@ -42645,16 +45151,16 @@ SELECT PARSE_TIME("%T", "07:30:00");
When using `PARSE_TIME`, keep the following in mind:
-+ **Unspecified fields.** Any unspecified field is initialized from
++ Unspecified fields. Any unspecified field is initialized from
`00:00:00.0`. For instance, if `seconds` is unspecified then it
defaults to `00`, and so on.
-+ **Whitespace.** One or more consecutive white spaces in the format string
++ Whitespace. One or more consecutive white spaces in the format string
matches zero or more consecutive white spaces in the `TIME` string. In
addition, leading and trailing white spaces in the `TIME` string are always
allowed, even if they are not in the format string.
-+ **Format precedence.** When two (or more) format elements have overlapping
++ Format precedence. When two (or more) format elements have overlapping
information, the last one generally overrides any earlier ones.
-+ **Format divergence.** `%p` can be used with `am`, `AM`, `pm`, and `PM`.
++ Format divergence. `%p` can be used with `am`, `AM`, `pm`, and `PM`.
**Return Data Type**
@@ -42881,20 +45387,22 @@ SELECT
### `TIME_TRUNC`
```sql
-TIME_TRUNC(time_expression, granularity)
+TIME_TRUNC(time_value, time_granularity)
```
**Description**
-Truncates a `TIME` value at a particular time granularity. The `TIME` value
-is always rounded to the beginning of `granularity`.
+Truncates a `TIME` value at a particular granularity.
**Definitions**
-+ `time_expression`: The `TIME` value to truncate.
-+ `granularity`: The time part that represents the granularity. If
- you passed in a `TIME` value for the first argument, `granularity` can
- be:
++ `time_value`: The `TIME` value to truncate.
++ `time_granularity`: The truncation granularity for a `TIME` value.
+ [Time granularities][time-trunc-granularity-time] can be used.
+
+
+
+**Time granularity definitions**
+ `NANOSECOND`: If used, nothing is truncated from the value.
@@ -42908,6 +45416,10 @@ is always rounded to the beginning of `granularity`.
+ `HOUR`: The nearest lesser than or equal hour.
+**Details**
+
+The resulting value is always rounded to the beginning of `granularity`.
+
**Return Data Type**
`TIME`
@@ -42926,6 +45438,8 @@ SELECT
*----------------------------+------------------------*/
```
+[time-trunc-granularity-time]: #time_trunc_granularity_time
+
[time-to-string]: #cast
## Time series functions
@@ -42945,7 +45459,6 @@ ZetaSQL supports the following time series functions.
DATE_BUCKET
-
|
Gets the lower bound of the date bucket that contains a date.
@@ -42954,7 +45467,6 @@ ZetaSQL supports the following time series functions.
|
DATETIME_BUCKET
-
|
Gets the lower bound of the datetime bucket that contains a datetime.
@@ -42963,7 +45475,6 @@ ZetaSQL supports the following time series functions.
|
TIMESTAMP_BUCKET
-
|
Gets the lower bound of the timestamp bucket that contains a timestamp.
@@ -43026,9 +45537,9 @@ FROM some_dates;
| 1949-12-28 |
| 1949-12-30 |
| 1949-12-30 |
- | 1950-12-01 |
- | 1950-12-01 |
- | 1950-12-03 |
+ | 1950-01-01 |
+ | 1950-01-01 |
+ | 1950-01-03 |
+--------------------*/
-- Some date buckets that originate from 1950-01-01:
@@ -43235,8 +45746,9 @@ Gets the lower bound of the timestamp bucket that contains a timestamp.
In the following example, the origin is omitted and the default origin,
`1950-01-01 00:00:00` is used. All buckets expand in both directions from the
-origin, and the size of each bucket is 12 hours. The lower bound of the bucket
-in which `my_timestamp` belongs is returned:
+origin, and the size of each bucket is 12 hours. The default time zone,
+which is implementation defined, is included in the results. The lower bound of the
+bucket in which `my_timestamp` belongs is returned:
```sql
WITH some_timestamps AS (
@@ -43255,12 +45767,12 @@ FROM some_timestamps;
/*---------------------------------------------+
| bucket_lower_bound |
+---------------------------------------------+
- | 2000-12-30 12:00:00.000 America/Los_Angeles |
- | 2000-12-31 00:00:00.000 America/Los_Angeles |
- | 2000-12-31 12:00:00.000 America/Los_Angeles |
- | 2000-01-01 00:00:00.000 America/Los_Angeles |
- | 2000-01-01 12:00:00.000 America/Los_Angeles |
- | 2000-01-01 00:00:00.000 America/Los_Angeles |
+ | 1949-12-30 12:00:00.000 America/Los_Angeles |
+ | 1949-12-31 00:00:00.000 America/Los_Angeles |
+ | 1949-12-31 12:00:00.000 America/Los_Angeles |
+ | 1950-01-01 00:00:00.000 America/Los_Angeles |
+ | 1950-01-01 12:00:00.000 America/Los_Angeles |
+ | 1950-01-02 00:00:00.000 America/Los_Angeles |
+---------------------------------------------*/
-- Some timestamp buckets that originate from 1950-01-01 00:00:00:
@@ -43275,7 +45787,8 @@ FROM some_timestamps;
In the following example, the origin has been changed to `2000-12-24 12:00:00`,
and all buckets expand in both directions from this point. The size of each
-bucket is seven days. The lower bound of the bucket in which `my_timestamp`
+bucket is seven days. The default time zone, which is implementation defined, is included
+in the results. The lower bound of the bucket in which `my_timestamp`
belongs is returned:
```sql
@@ -43347,7 +45860,6 @@ and [`TIMESTAMP` range][data-types-link-to-timestamp_type].
|
CURRENT_TIMESTAMP
-
|
Returns the current date and time as a TIMESTAMP object.
@@ -43356,7 +45868,6 @@ and [`TIMESTAMP` range][data-types-link-to-timestamp_type].
|
EXTRACT
-
|
Extracts part of a TIMESTAMP value.
@@ -43365,7 +45876,6 @@ and [`TIMESTAMP` range][data-types-link-to-timestamp_type].
|
FORMAT_TIMESTAMP
-
|
Formats a TIMESTAMP value according to the specified
@@ -43374,26 +45884,35 @@ and [`TIMESTAMP` range][data-types-link-to-timestamp_type].
|
- PARSE_TIMESTAMP
+ | GENERATE_TIMESTAMP_ARRAY
+ |
+
+ Generates an array of timestamps in a range.
+ For more information, see Array functions.
+
+ |
+
+
+ PARSE_TIMESTAMP
|
Converts a STRING value to a TIMESTAMP value.
+
|
- STRING
-
+ | STRING (Timestamp)
|
Converts a TIMESTAMP value to a STRING value.
+
|
TIMESTAMP
-
|
Constructs a TIMESTAMP value.
@@ -43402,7 +45921,6 @@ and [`TIMESTAMP` range][data-types-link-to-timestamp_type].
|
TIMESTAMP_ADD
-
|
Adds a specified time interval to a TIMESTAMP value.
@@ -43411,7 +45929,6 @@ and [`TIMESTAMP` range][data-types-link-to-timestamp_type].
|
TIMESTAMP_DIFF
-
|
Gets the number of unit boundaries between two TIMESTAMP values
@@ -43421,7 +45938,6 @@ and [`TIMESTAMP` range][data-types-link-to-timestamp_type].
|
TIMESTAMP_FROM_UNIX_MICROS
-
|
Similar to TIMESTAMP_MICROS , except that additionally, a
@@ -43431,7 +45947,6 @@ and [`TIMESTAMP` range][data-types-link-to-timestamp_type].
|
TIMESTAMP_FROM_UNIX_MILLIS
-
|
Similar to TIMESTAMP_MILLIS , except that additionally, a
@@ -43441,7 +45956,6 @@ and [`TIMESTAMP` range][data-types-link-to-timestamp_type].
|
TIMESTAMP_FROM_UNIX_SECONDS
-
|
Similar to TIMESTAMP_SECONDS , except that additionally, a
@@ -43451,37 +45965,36 @@ and [`TIMESTAMP` range][data-types-link-to-timestamp_type].
|
TIMESTAMP_MICROS
-
|
Converts the number of microseconds since
- 1970-01-01 00:00:00 UTC to a TIMESTAMP.
+ 1970-01-01 00:00:00 UTC to a TIMESTAMP .
+
|
TIMESTAMP_MILLIS
-
|
Converts the number of milliseconds since
- 1970-01-01 00:00:00 UTC to a TIMESTAMP.
+ 1970-01-01 00:00:00 UTC to a TIMESTAMP .
+
|
TIMESTAMP_SECONDS
-
|
Converts the number of seconds since
- 1970-01-01 00:00:00 UTC to a TIMESTAMP.
+ 1970-01-01 00:00:00 UTC to a TIMESTAMP .
+
|
TIMESTAMP_SUB
-
|
Subtracts a specified time interval from a TIMESTAMP value.
@@ -43490,40 +46003,43 @@ and [`TIMESTAMP` range][data-types-link-to-timestamp_type].
|
TIMESTAMP_TRUNC
-
|
- Truncates a TIMESTAMP value.
+
+ Truncates a TIMESTAMP or
+ DATETIME value at a particular
+ granularity.
+
|
UNIX_MICROS
-
|
Converts a TIMESTAMP value to the number of microseconds since
1970-01-01 00:00:00 UTC.
+
|
UNIX_MILLIS
-
|
Converts a TIMESTAMP value to the number of milliseconds
since 1970-01-01 00:00:00 UTC.
+
|
UNIX_SECONDS
-
|
Converts a TIMESTAMP value to the number of seconds since
1970-01-01 00:00:00 UTC.
+
|
@@ -43785,21 +46301,28 @@ SELECT
### `FORMAT_TIMESTAMP`
```sql
-FORMAT_TIMESTAMP(format_string, timestamp[, time_zone])
+FORMAT_TIMESTAMP(format_string, timestamp_expr[, time_zone])
```
**Description**
-Formats a timestamp according to the specified `format_string`.
+Formats a `TIMESTAMP` value according to the specified format string.
-See [Format elements for date and time parts][timestamp-format-elements]
-for a list of format elements that this function supports.
+**Definitions**
+
++ `format_string`: A `STRING` value that contains the
+ [format elements][timestamp-format-elements] to use with
+ `timestamp_expr`.
++ `timestamp_expr`: A `TIMESTAMP` value that represents the timestamp to format.
++ `time_zone`: A `STRING` value that represents a time zone. For more
+ information about how to use a time zone with a timestamp, see
+ [Time zone definitions][timestamp-link-to-timezone-definitions].
**Return Data Type**
`STRING`
-**Example**
+**Examples**
```sql
SELECT FORMAT_TIMESTAMP("%c", TIMESTAMP "2050-12-25 15:30:55+00", "UTC")
@@ -43847,6 +46370,8 @@ SELECT FORMAT_TIMESTAMP("%Y-%m-%dT%H:%M:%SZ", TIMESTAMP "2050-12-25 15:30:55", "
[timestamp-format-elements]: https://github.com/google/zetasql/blob/master/docs/format-elements.md#format_elements_date_time
+[timestamp-link-to-timezone-definitions]: #timezone_definitions
+
### `PARSE_TIMESTAMP`
```sql
@@ -43855,14 +46380,22 @@ PARSE_TIMESTAMP(format_string, timestamp_string[, time_zone])
**Description**
-Converts a [string representation of a timestamp][timestamp-format] to a
-`TIMESTAMP` object.
+Converts a `STRING` value to a `TIMESTAMP` value.
+
+**Definitions**
+
++ `format_string`: A `STRING` value that contains the
+ [format elements][timestamp-format-elements] to use with `timestamp_string`.
++ `timestamp_string`: A `STRING` value that represents the timestamp to parse.
++ `time_zone`: A `STRING` value that represents a time zone. For more
+ information about how to use a time zone with a timestamp, see
+ [Time zone definitions][timestamp-link-to-timezone-definitions].
+
+**Details**
-`format_string` contains the [format elements][timestamp-format-elements]
-that define how `timestamp_string` is formatted. Each element in
-`timestamp_string` must have a corresponding element in `format_string`. The
-location of each element in `format_string` must match the location of
-each element in `timestamp_string`.
+Each element in `timestamp_string` must have a corresponding element in
+`format_string`. The location of each element in `format_string` must match the
+location of each element in `timestamp_string`.
```sql
-- This works because elements on both sides match.
@@ -43880,22 +46413,22 @@ SELECT PARSE_TIMESTAMP("%c", "Thu Dec 25 07:30:00 2008");
When using `PARSE_TIMESTAMP`, keep the following in mind:
-+ **Unspecified fields.** Any unspecified field is initialized from `1970-01-01
++ Unspecified fields. Any unspecified field is initialized from `1970-01-01
00:00:00.0`. This initialization value uses the time zone specified by the
function's time zone argument, if present. If not, the initialization value
uses the default time zone, which is implementation defined. For instance, if the year
is unspecified then it defaults to `1970`, and so on.
-+ **Case insensitivity.** Names, such as `Monday`, `February`, and so on, are
++ Case insensitivity. Names, such as `Monday`, `February`, and so on, are
case insensitive.
-+ **Whitespace.** One or more consecutive white spaces in the format string
++ Whitespace. One or more consecutive white spaces in the format string
matches zero or more consecutive white spaces in the timestamp string. In
addition, leading and trailing white spaces in the timestamp string are always
allowed, even if they are not in the format string.
-+ **Format precedence.** When two (or more) format elements have overlapping
++ Format precedence. When two (or more) format elements have overlapping
information (for example both `%F` and `%Y` affect the year), the last one
generally overrides any earlier ones, with some exceptions (see the
descriptions of `%s`, `%C`, and `%y`).
-+ **Format divergence.** `%p` can be used with `am`, `AM`, `pm`, and `PM`.
++ Format divergence. `%p` can be used with `am`, `AM`, `pm`, and `PM`.
**Return Data Type**
@@ -43918,6 +46451,8 @@ SELECT PARSE_TIMESTAMP("%c", "Thu Dec 25 07:30:00 2008") AS parsed;
[timestamp-format-elements]: https://github.com/google/zetasql/blob/master/docs/format-elements.md#format_elements_date_time
+[timestamp-link-to-timezone-definitions]: #timezone_definitions
+
### `STRING`
```sql
@@ -44395,61 +46930,73 @@ SELECT
### `TIMESTAMP_TRUNC`
```sql
-TIMESTAMP_TRUNC(timestamp_expression, granularity[, time_zone])
+TIMESTAMP_TRUNC(timestamp_value, timestamp_granularity[, time_zone])
+```
+
+```sql
+TIMESTAMP_TRUNC(datetime_value, datetime_granularity)
```
**Description**
-Truncates a `TIMESTAMP` value at a particular time granularity. The `TIMESTAMP`
-value is always rounded to the beginning of `granularity`.
+Truncates a `TIMESTAMP` or `DATETIME` value at a particular granularity.
**Definitions**
-+ `timestamp_expression`: The `TIMESTAMP` value to truncate.
-+ `granularity`: The datetime part that represents the granularity. If
- you passed in a `TIMESTAMP` value for the first argument, `granularity` can
- be:
-
- + `NANOSECOND`: If used, nothing is truncated from the value.
-
- + `MICROSECOND`: The nearest lesser than or equal microsecond.
-
- + `MILLISECOND`: The nearest lesser than or equal millisecond.
++ `timestamp_value`: A `TIMESTAMP` value to truncate.
++ `timestamp_granularity`: The truncation granularity for a `TIMESTAMP` value.
+ [Date granularities][timestamp-trunc-granularity-date] and
+ [time granularities][timestamp-trunc-granularity-time] can be used.
++ `time_zone`: A time zone to use with the `TIMESTAMP` value.
+ [Time zone parts][timestamp-time-zone-parts] can be used.
+ Use this argument if you want to use a time zone other than
+ the default time zone, which is implementation defined, as part of the
+ truncate operation.
- + `SECOND`: The nearest lesser than or equal second.
+ Note: When truncating a timestamp to `MINUTE`
+ or `HOUR` parts, this function determines the civil time of the
+ timestamp in the specified (or default) time zone
+ and subtracts the minutes and seconds (when truncating to `HOUR`) or the
+ seconds (when truncating to `MINUTE`) from that timestamp.
+ While this provides intuitive results in most cases, the result is
+ non-intuitive near daylight savings transitions that are not hour-aligned.
++ `datetime_value`: A `DATETIME` value to truncate.
++ `datetime_granularity`: The truncation granularity for a `DATETIME` value.
+ [Date granularities][timestamp-trunc-granularity-date] and
+ [time granularities][timestamp-trunc-granularity-time] can be used.
- + `MINUTE`: The nearest lesser than or equal minute.
+
- + `HOUR`: The nearest lesser than or equal hour.
+**Date granularity definitions**
+ `DAY`: The day in the Gregorian calendar year that contains the
- `TIMESTAMP` value.
+ value to truncate.
+ `WEEK`: The first day in the week that contains the
- `TIMESTAMP` value. Weeks begin on Sundays. `WEEK` is equivalent to
+ value to truncate. Weeks begin on Sundays. `WEEK` is equivalent to
`WEEK(SUNDAY)`.
+ `WEEK(WEEKDAY)`: The first day in the week that contains the
- `TIMESTAMP` value. Weeks begin on `WEEKDAY`. `WEEKDAY` must be one of the
+ value to truncate. Weeks begin on `WEEKDAY`. `WEEKDAY` must be one of the
following: `SUNDAY`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`,
or `SATURDAY`.
+ `ISOWEEK`: The first day in the [ISO 8601 week][ISO-8601-week] that contains
- the `TIMESTAMP` value. The ISO week begins on
+ the value to truncate. The ISO week begins on
Monday. The first ISO week of each ISO year contains the first Thursday of the
corresponding Gregorian calendar year.
+ `MONTH`: The first day in the month that contains the
- `TIMESTAMP` value.
+ value to truncate.
+ `QUARTER`: The first day in the quarter that contains the
- `TIMESTAMP` value.
+ value to truncate.
+ `YEAR`: The first day in the year that contains the
- `TIMESTAMP` value.
+ value to truncate.
+ `ISOYEAR`: The first day in the [ISO 8601][ISO-8601] week-numbering year
- that contains the `TIMESTAMP` value. The ISO year is the
+ that contains the value to truncate. The ISO year is the
Monday of the first week where Thursday belongs to the corresponding
Gregorian calendar year.
@@ -44461,32 +47008,44 @@ value is always rounded to the beginning of `granularity`.
-+ `time_zone`: Use this parameter if you want to use a time zone other than
- the default time zone, which is implementation defined, as part of the
- truncate operation. This can be:
+
- + `MINUTE`
- + `HOUR`
- + `DAY`
- + `WEEK`
- + `WEEK()`
- + `ISOWEEK`
- + `MONTH`
- + `QUARTER`
- + `YEAR`
- + `ISOYEAR`
-
-When truncating a timestamp to `MINUTE`
-or`HOUR` parts, `TIMESTAMP_TRUNC` determines the civil time of the
-timestamp in the specified (or default) time zone
-and subtracts the minutes and seconds (when truncating to `HOUR`) or the seconds
-(when truncating to `MINUTE`) from that timestamp.
-While this provides intuitive results in most cases, the result is
-non-intuitive near daylight savings transitions that are not hour-aligned.
+**Time granularity definitions**
+
+ + `NANOSECOND`: If used, nothing is truncated from the value.
+
+ + `MICROSECOND`: The nearest lesser than or equal microsecond.
+
+ + `MILLISECOND`: The nearest lesser than or equal millisecond.
+
+ + `SECOND`: The nearest lesser than or equal second.
+
+ + `MINUTE`: The nearest lesser than or equal minute.
+
+ + `HOUR`: The nearest lesser than or equal hour.
+
+
+
+**Time zone part definitions**
+
++ `MINUTE`
++ `HOUR`
++ `DAY`
++ `WEEK`
++ `WEEK()`
++ `ISOWEEK`
++ `MONTH`
++ `QUARTER`
++ `YEAR`
++ `ISOYEAR`
+
+**Details**
+
+The resulting value is always rounded to the beginning of `granularity`.
**Return Data Type**
-`TIMESTAMP`
+The same data type as the first argument passed into this function.
**Examples**
@@ -44551,6 +47110,12 @@ SELECT
[timestamp-link-to-timezone-definitions]: #timezone_definitions
+[timestamp-trunc-granularity-date]: #timestamp_trunc_granularity_date
+
+[timestamp-trunc-granularity-time]: #timestamp_trunc_granularity_time
+
+[timestamp-time-zone-parts]: #timestamp_time_zone_parts
+
### `UNIX_MICROS`
```sql
@@ -44663,6 +47228,8 @@ SELECT UNIX_SECONDS(TIMESTAMP "1970-01-01 00:00:01.8+00") AS seconds;
*------------*/
```
+### Supplemental materials
+
### How time zones work with timestamp functions
@@ -44693,7 +47260,7 @@ or time zone offset from UTC (for example, -08).
To learn more about how time zones work with the `TIMESTAMP` type, see
[Time zones][data-types-timezones].
-[timezone-by-name]: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
+[data-types-timezones]: https://github.com/google/zetasql/blob/master/docs/data-types.md#time_zones
[data-types-timezones]: https://github.com/google/zetasql/blob/master/docs/data-types.md#time_zones
@@ -44703,6 +47270,170 @@ To learn more about how time zones work with the `TIMESTAMP` type, see
[data-types-link-to-timestamp_type]: https://github.com/google/zetasql/blob/master/docs/data-types.md#timestamp_type
+## Window functions
+
+ZetaSQL supports the following
+[window functions][window-function-calls].
+
+### Function list
+
+
+
+
+ Name |
+ Summary |
+
+
+
+
+
+ CUME_DIST
+ |
+
+ Gets the cumulative distribution (relative position (0,1]) of each row
+ within a window.
+ For more information, see Numbering functions.
+
+ |
+
+
+
+ DENSE_RANK
+ |
+
+ Gets the dense rank (1-based, no gaps) of each row within a window.
+ For more information, see Numbering functions.
+
+ |
+
+
+
+ FIRST_VALUE
+ |
+
+ Gets a value for the first row in the current window frame.
+ For more information, see Navigation functions.
+
+ |
+
+
+
+ LAG
+ |
+
+ Gets a value for a preceding row.
+ For more information, see Navigation functions.
+
+ |
+
+
+
+ LAST_VALUE
+ |
+
+ Gets a value for the last row in the current window frame.
+ For more information, see Navigation functions.
+
+ |
+
+
+
+ LEAD
+ |
+
+ Gets a value for a subsequent row.
+ For more information, see Navigation functions.
+
+ |
+
+
+
+ NTH_VALUE
+ |
+
+ Gets a value for the Nth row of the current window frame.
+ For more information, see Navigation functions.
+
+ |
+
+
+
+ NTILE
+ |
+
+ Gets the quantile bucket number (1-based) of each row within a window.
+ For more information, see Numbering functions.
+
+ |
+
+
+
+ PERCENT_RANK
+ |
+
+ Gets the percentile rank (from 0 to 1) of each row within a window.
+ For more information, see Numbering functions.
+
+ |
+
+
+
+ PERCENTILE_CONT
+ |
+
+ Computes the specified percentile for a value, using
+ linear interpolation.
+ For more information, see Navigation functions.
+
+ |
+
+
+
+ PERCENTILE_DISC
+ |
+
+ Computes the specified percentile for a discrete value.
+ For more information, see Navigation functions.
+
+ |
+
+
+
+ RANK
+ |
+
+ Gets the rank (1-based) of each row within a window.
+ For more information, see Numbering functions.
+
+ |
+
+
+
+ ROW_NUMBER
+ |
+
+ Gets the sequential row number (1-based) of each row within a window.
+ For more information, see Numbering functions.
+
+ |
+
+
+
+ ST_CLUSTERDBSCAN
+ |
+
+ Performs DBSCAN clustering on a group of GEOGRAPHY values and
+ produces a 0-based cluster number for this row.
+ For more information, see Geography functions.
+
+ |
+
+
+
+
+
+[window-function-calls]: https://github.com/google/zetasql/blob/master/docs/window-function-calls.md
+
[subqueries]: https://github.com/google/zetasql/blob/master/docs/subqueries.md
diff --git a/docs/geography_functions.md b/docs/geography_functions.md
index e8bf315c7..0b9651640 100644
--- a/docs/geography_functions.md
+++ b/docs/geography_functions.md
@@ -15,7 +15,7 @@ between geographical features, and construct or manipulate
All ZetaSQL geography functions return `NULL` if any input argument
is `NULL`.
-### Categories
+## Categories
The geography functions are grouped into the following categories based on their
behavior:
@@ -197,7 +197,7 @@ behavior:
-### Function list
+## Function list
@@ -209,8 +209,7 @@ behavior:
- S2_CELLIDFROMPOINT
-
+ | S2_CELLIDFROMPOINT
|
Gets the S2 cell ID covering a point GEOGRAPHY value.
@@ -218,8 +217,7 @@ behavior:
|
- S2_COVERINGCELLIDS
-
+ | S2_COVERINGCELLIDS
|
Gets an array of S2 cell IDs that cover a GEOGRAPHY value.
@@ -227,8 +225,7 @@ behavior:
|
- ST_ACCUM
-
+ | ST_ACCUM
|
Aggregates GEOGRAPHY values into an array of
@@ -237,8 +234,7 @@ behavior:
|
- ST_ANGLE
-
+ | ST_ANGLE
|
Takes three point GEOGRAPHY values, which represent two
@@ -247,8 +243,7 @@ behavior:
|
- ST_AREA
-
+ | ST_AREA
|
Gets the area covered by the polygons in a GEOGRAPHY value.
@@ -256,8 +251,7 @@ behavior:
|
- ST_ASBINARY
-
+ | ST_ASBINARY
|
Converts a GEOGRAPHY value to a
@@ -266,8 +260,7 @@ behavior:
|
- ST_ASGEOJSON
-
+ | ST_ASGEOJSON
|
Converts a GEOGRAPHY value to a STRING
@@ -276,8 +269,7 @@ behavior:
|
- ST_ASKML
-
+ | ST_ASKML
|
Converts a GEOGRAPHY value to a STRING
@@ -286,8 +278,7 @@ behavior:
|
- ST_ASTEXT
-
+ | ST_ASTEXT
|
Converts a GEOGRAPHY value to a
@@ -296,8 +287,7 @@ behavior:
|
- ST_AZIMUTH
-
+ | ST_AZIMUTH
|
Gets the azimuth of a line segment formed by two
@@ -306,8 +296,7 @@ behavior:
|
- ST_BOUNDARY
-
+ | ST_BOUNDARY
|
Gets the union of component boundaries in a
@@ -316,8 +305,7 @@ behavior:
|
- ST_BOUNDINGBOX
-
+ | ST_BOUNDINGBOX
|
Gets the bounding box for a GEOGRAPHY value.
@@ -325,8 +313,7 @@ behavior:
|
- ST_BUFFER
-
+ | ST_BUFFER
|
Gets the buffer around a GEOGRAPHY value, using a specific
@@ -335,8 +322,7 @@ behavior:
|
- ST_BUFFERWITHTOLERANCE
-
+ | ST_BUFFERWITHTOLERANCE
|
Gets the buffer around a GEOGRAPHY value, using tolerance.
@@ -344,8 +330,7 @@ behavior:
|
- ST_CENTROID
-
+ | ST_CENTROID
|
Gets the centroid of a GEOGRAPHY value.
@@ -353,8 +338,7 @@ behavior:
|
- ST_CLOSESTPOINT
-
+ | ST_CLOSESTPOINT
|
Gets the point on a GEOGRAPHY value which is closest to any
@@ -363,18 +347,17 @@ behavior:
|
- ST_CLUSTERDBSCAN
-
+ | ST_CLUSTERDBSCAN
|
Performs DBSCAN clustering on a group of GEOGRAPHY values and
produces a 0-based cluster number for this row.
+
|
- ST_CONTAINS
-
+ | ST_CONTAINS
|
Checks if one GEOGRAPHY value contains another
@@ -383,8 +366,7 @@ behavior:
|
- ST_CONVEXHULL
-
+ | ST_CONVEXHULL
|
Returns the convex hull for a GEOGRAPHY value.
@@ -392,8 +374,7 @@ behavior:
|
- ST_COVEREDBY
-
+ | ST_COVEREDBY
|
Checks if all points of a GEOGRAPHY value are on the boundary
@@ -402,8 +383,7 @@ behavior:
|
- ST_COVERS
-
+ | ST_COVERS
|
Checks if all points of a GEOGRAPHY value are on the boundary
@@ -412,8 +392,7 @@ behavior:
|
- ST_DIFFERENCE
-
+ | ST_DIFFERENCE
|
Gets the point set difference between two GEOGRAPHY values.
@@ -421,8 +400,7 @@ behavior:
|
- ST_DIMENSION
-
+ | ST_DIMENSION
|
Gets the dimension of the highest-dimensional element in a
@@ -431,8 +409,7 @@ behavior:
|
- ST_DISJOINT
-
+ | ST_DISJOINT
|
Checks if two GEOGRAPHY values are disjoint (do not intersect).
@@ -440,8 +417,7 @@ behavior:
|
- ST_DISTANCE
-
+ | ST_DISTANCE
|
Gets the shortest distance in meters between two GEOGRAPHY
@@ -450,8 +426,7 @@ behavior:
|
- ST_DUMP
-
+ | ST_DUMP
|
Returns an array of simple GEOGRAPHY components in a
@@ -460,8 +435,7 @@ behavior:
|
- ST_DUMPPOINTS
-
+ | ST_DUMPPOINTS
|
Produces an array of GEOGRAPHY points with all points, line
@@ -470,8 +444,7 @@ behavior:
|
- ST_DWITHIN
-
+ | ST_DWITHIN
|
Checks if any points in two GEOGRAPHY values are within a given
@@ -480,8 +453,7 @@ behavior:
|
- ST_ENDPOINT
-
+ | ST_ENDPOINT
|
Gets the last point of a linestring GEOGRAPHY value.
@@ -489,8 +461,7 @@ behavior:
|
- ST_EQUALS
-
+ | ST_EQUALS
|
Checks if two GEOGRAPHY values represent the same
@@ -499,17 +470,16 @@ behavior:
|
- ST_EXTENT
-
+ | ST_EXTENT
|
Gets the bounding box for a group of GEOGRAPHY values.
+
|
- ST_EXTERIORRING
-
+ | ST_EXTERIORRING
|
Returns a linestring GEOGRAPHY value that corresponds to the
@@ -518,8 +488,7 @@ behavior:
|
- ST_GEOGFROM
-
+ | ST_GEOGFROM
|
Converts a STRING or BYTES value
@@ -528,8 +497,7 @@ behavior:
|
- ST_GEOGFROMGEOJSON
-
+ | ST_GEOGFROMGEOJSON
|
Converts a STRING GeoJSON geometry value into a
@@ -538,8 +506,7 @@ behavior:
|
- ST_GEOGFROMKML
-
+ | ST_GEOGFROMKML
|
Converts a STRING KML geometry value into a
@@ -548,8 +515,7 @@ behavior:
|
- ST_GEOGFROMTEXT
-
+ | ST_GEOGFROMTEXT
|
Converts a STRING WKT geometry value into a
@@ -558,8 +524,7 @@ behavior:
|
- ST_GEOGFROMWKB
-
+ | ST_GEOGFROMWKB
|
Converts a BYTES or hexadecimal-text STRING WKT
@@ -568,8 +533,7 @@ behavior:
|
- ST_GEOGPOINT
-
+ | ST_GEOGPOINT
|
Creates a point GEOGRAPHY value for a given longitude and
@@ -578,8 +542,7 @@ behavior:
|
- ST_GEOGPOINTFROMGEOHASH
-
+ | ST_GEOGPOINTFROMGEOHASH
|
Gets a point GEOGRAPHY value that is in the middle of a
@@ -588,8 +551,7 @@ behavior:
|
- ST_GEOHASH
-
+ | ST_GEOHASH
|
Converts a point GEOGRAPHY value to a STRING
@@ -598,8 +560,7 @@ behavior:
|
- ST_GEOMETRYTYPE
-
+ | ST_GEOMETRYTYPE
|
Gets the Open Geospatial Consortium (OGC) geometry type for a
@@ -608,15 +569,13 @@ behavior:
|
- ST_HAUSDORFFDISTANCE
-
+ | ST_HAUSDORFFDISTANCE
|
Gets the discrete Hausdorff distance between two geometries. |
- ST_INTERIORRINGS
-
+ | ST_INTERIORRINGS
|
Gets the interior rings of a polygon GEOGRAPHY value.
@@ -624,8 +583,7 @@ behavior:
|
- ST_INTERSECTION
-
+ | ST_INTERSECTION
|
Gets the point set intersection of two GEOGRAPHY values.
@@ -633,8 +591,7 @@ behavior:
|
- ST_INTERSECTS
-
+ | ST_INTERSECTS
|
Checks if at least one point appears in two GEOGRAPHY
@@ -643,8 +600,7 @@ behavior:
|
- ST_INTERSECTSBOX
-
+ | ST_INTERSECTSBOX
|
Checks if a GEOGRAPHY value intersects a rectangle.
@@ -652,8 +608,7 @@ behavior:
|
- ST_ISCLOSED
-
+ | ST_ISCLOSED
|
Checks if all components in a GEOGRAPHY value are closed.
@@ -661,8 +616,7 @@ behavior:
|
- ST_ISCOLLECTION
-
+ | ST_ISCOLLECTION
|
Checks if the total number of points, linestrings, and polygons is
@@ -671,8 +625,7 @@ behavior:
|
- ST_ISEMPTY
-
+ | ST_ISEMPTY
|
Checks if a GEOGRAPHY value is empty.
@@ -680,8 +633,7 @@ behavior:
|
- ST_ISRING
-
+ | ST_ISRING
|
Checks if a GEOGRAPHY value is a closed, simple
@@ -690,8 +642,7 @@ behavior:
|
- ST_LENGTH
-
+ | ST_LENGTH
|
Gets the total length of lines in a GEOGRAPHY value.
@@ -699,8 +650,7 @@ behavior:
|
- ST_LINEINTERPOLATEPOINT
-
+ | ST_LINEINTERPOLATEPOINT
|
Gets a point at a specific fraction in a linestring GEOGRAPHY
@@ -709,8 +659,7 @@ behavior:
|
- ST_LINELOCATEPOINT
-
+ | ST_LINELOCATEPOINT
|
Gets a section of a linestring GEOGRAPHY value between the
@@ -719,8 +668,7 @@ behavior:
|
- ST_LINESUBSTRING
-
+ | ST_LINESUBSTRING
|
Gets a segment of a single linestring at a specific starting and
@@ -729,8 +677,7 @@ behavior:
|
- ST_MAKELINE
-
+ | ST_MAKELINE
|
Creates a linestring GEOGRAPHY value by concatenating the point
@@ -739,8 +686,7 @@ behavior:
|
- ST_MAKEPOLYGON
-
+ | ST_MAKEPOLYGON
|
Constructs a polygon GEOGRAPHY value by combining
@@ -749,8 +695,7 @@ behavior:
|
- ST_MAKEPOLYGONORIENTED
-
+ | ST_MAKEPOLYGONORIENTED
|
Constructs a polygon GEOGRAPHY value, using an array of
@@ -760,8 +705,7 @@ behavior:
|
- ST_MAXDISTANCE
-
+ | ST_MAXDISTANCE
|
Gets the longest distance between two non-empty
@@ -770,8 +714,7 @@ behavior:
|
- ST_NPOINTS
-
+ | ST_NPOINTS
|
An alias of ST_NUMPOINTS .
@@ -779,8 +722,7 @@ behavior:
|
- ST_NUMGEOMETRIES
-
+ | ST_NUMGEOMETRIES
|
Gets the number of geometries in a GEOGRAPHY value.
@@ -788,8 +730,7 @@ behavior:
|
- ST_NUMPOINTS
-
+ | ST_NUMPOINTS
|
Gets the number of vertices in the a GEOGRAPHY value.
@@ -797,8 +738,7 @@ behavior:
|
- ST_PERIMETER
-
+ | ST_PERIMETER
|
Gets the length of the boundary of the polygons in a
@@ -807,8 +747,7 @@ behavior:
|
- ST_POINTN
-
+ | ST_POINTN
|
Gets the point at a specific index of a linestring GEOGRAPHY
@@ -817,8 +756,7 @@ behavior:
|
- ST_SIMPLIFY
-
+ | ST_SIMPLIFY
|
Converts a GEOGRAPHY value into a simplified
@@ -827,8 +765,7 @@ behavior:
|
- ST_SNAPTOGRID
-
+ | ST_SNAPTOGRID
|
Produces a GEOGRAPHY value, where each vertex has
@@ -837,8 +774,7 @@ behavior:
|
- ST_STARTPOINT
-
+ | ST_STARTPOINT
|
Gets the first point of a linestring GEOGRAPHY value.
@@ -846,8 +782,7 @@ behavior:
|
- ST_TOUCHES
-
+ | ST_TOUCHES
|
Checks if two GEOGRAPHY values intersect and their interiors
@@ -856,8 +791,7 @@ behavior:
|
- ST_UNION
-
+ | ST_UNION
|
Gets the point set union of multiple GEOGRAPHY values.
@@ -865,18 +799,17 @@ behavior:
|
- ST_UNION_AGG
-
+ | ST_UNION_AGG
|
Aggregates over GEOGRAPHY values and gets their
point set union.
+
|
- ST_WITHIN
-
+ | ST_WITHIN
|
Checks if one GEOGRAPHY value contains another
@@ -885,8 +818,7 @@ behavior:
|
- ST_X
-
+ | ST_X
|
Gets the longitude from a point GEOGRAPHY value.
@@ -894,8 +826,7 @@ behavior:
|
- ST_Y
-
+ | ST_Y
|
Gets the latitude from a point GEOGRAPHY value.
@@ -905,7 +836,7 @@ behavior:
|
-### `S2_CELLIDFROMPOINT`
+## `S2_CELLIDFROMPOINT`
```sql
S2_CELLIDFROMPOINT(point_geography[, level => cell_level])
@@ -969,7 +900,7 @@ FROM data;
[s2-coveringcellids]: #s2_coveringcellids
-### `S2_COVERINGCELLIDS`
+## `S2_COVERINGCELLIDS`
```sql
S2_COVERINGCELLIDS(
@@ -1034,7 +965,7 @@ FROM data;
[s2-root-link]: https://s2geometry.io/
-### `ST_ACCUM`
+## `ST_ACCUM`
```sql
ST_ACCUM(geography)
@@ -1053,7 +984,7 @@ but only applies to `GEOGRAPHY` objects.
[geography-link-array-agg]: https://github.com/google/zetasql/blob/master/docs/aggregate_functions.md#array_agg
-### `ST_ANGLE`
+## `ST_ANGLE`
```sql
ST_ANGLE(point_geography_1, point_geography_2, point_geography_3)
@@ -1108,7 +1039,7 @@ SELECT ST_ANGLE(geo1,geo2,geo3) AS angle FROM geos ORDER BY id;
*---------------------*/
```
-### `ST_AREA`
+## `ST_AREA`
```sql
ST_AREA(geography_expression[, use_spheroid])
@@ -1136,7 +1067,7 @@ the value `FALSE`. The default value of `use_spheroid` is `FALSE`.
[wgs84-link]: https://en.wikipedia.org/wiki/World_Geodetic_System
-### `ST_ASBINARY`
+## `ST_ASBINARY`
```sql
ST_ASBINARY(geography_expression)
@@ -1158,7 +1089,7 @@ See [`ST_GEOGFROMWKB`][st-geogfromwkb] to construct a
[st-geogfromwkb]: #st_geogfromwkb
-### `ST_ASGEOJSON`
+## `ST_ASGEOJSON`
```sql
ST_ASGEOJSON(geography_expression)
@@ -1188,7 +1119,7 @@ See [`ST_GEOGFROMGEOJSON`][st-geogfromgeojson] to construct a
[st-geogfromgeojson]: #st_geogfromgeojson
-### `ST_ASKML`
+## `ST_ASKML`
```sql
ST_ASKML(geography)
@@ -1206,7 +1137,7 @@ of precision.
[kml-geometry-link]: https://developers.google.com/kml/documentation/kmlreference#geometry
-### `ST_ASTEXT`
+## `ST_ASTEXT`
```sql
ST_ASTEXT(geography_expression)
@@ -1228,7 +1159,7 @@ See [`ST_GEOGFROMTEXT`][st-geogfromtext] to construct a
[st-geogfromtext]: #st_geogfromtext
-### `ST_AZIMUTH`
+## `ST_AZIMUTH`
```sql
ST_AZIMUTH(point_geography_1, point_geography_2)
@@ -1289,7 +1220,7 @@ SELECT ST_AZIMUTH(geo1, geo2) AS azimuth FROM geos ORDER BY id;
*--------------------*/
```
-### `ST_BOUNDARY`
+## `ST_BOUNDARY`
```sql
ST_BOUNDARY(geography_expression)
@@ -1313,7 +1244,7 @@ defined as follows:
`GEOGRAPHY`
-### `ST_BOUNDINGBOX`
+## `ST_BOUNDINGBOX`
```sql
ST_BOUNDINGBOX(geography_expression)
@@ -1373,7 +1304,7 @@ See [`ST_EXTENT`][st-extent] for the aggregate version of `ST_BOUNDINGBOX`.
[st-extent]: #st_extent
-### `ST_BUFFER`
+## `ST_BUFFER`
```sql
ST_BUFFER(
@@ -1447,7 +1378,7 @@ SELECT
[st-numpoints]: #st_numpoints
-### `ST_BUFFERWITHTOLERANCE`
+## `ST_BUFFERWITHTOLERANCE`
```sql
ST_BUFFERWITHTOLERANCE(
@@ -1517,7 +1448,7 @@ SELECT
[st-buffer]: #st_buffer
-### `ST_CENTROID`
+## `ST_CENTROID`
```sql
ST_CENTROID(geography_expression)
@@ -1550,7 +1481,7 @@ and the likelihood of this happening is vanishingly small.
Point `GEOGRAPHY`
-### `ST_CLOSESTPOINT`
+## `ST_CLOSESTPOINT`
```sql
ST_CLOSESTPOINT(geography_1, geography_2[, use_spheroid])
@@ -1579,7 +1510,7 @@ Point `GEOGRAPHY`
[wgs84-link]: https://en.wikipedia.org/wiki/World_Geodetic_System
-### `ST_CLUSTERDBSCAN`
+## `ST_CLUSTERDBSCAN`
```sql
ST_CLUSTERDBSCAN(geography_column, epsilon, minimum_geographies)
@@ -1680,7 +1611,7 @@ Geos ORDER BY row_id
[dbscan-link]: https://en.wikipedia.org/wiki/DBSCAN
-### `ST_CONTAINS`
+## `ST_CONTAINS`
```sql
ST_CONTAINS(geography_1, geography_2)
@@ -1722,7 +1653,7 @@ FROM UNNEST([0, 1, 10]) AS i;
[st_covers]: #st_covers
-### `ST_CONVEXHULL`
+## `ST_CONVEXHULL`
```sql
ST_CONVEXHULL(geography_expression)
@@ -1775,7 +1706,7 @@ FROM Geographies;
*-----------------------------------------+--------------------------------------------------------*/
```
-### `ST_COVEREDBY`
+## `ST_COVEREDBY`
```sql
ST_COVEREDBY(geography_1, geography_2)
@@ -1796,7 +1727,7 @@ Given two `GEOGRAPHY`s `a` and `b`,
[st-covers]: #st_covers
-### `ST_COVERS`
+## `ST_COVERS`
```sql
ST_COVERS(geography_1, geography_2)
@@ -1834,7 +1765,7 @@ FROM UNNEST([0, 1, 10]) AS i;
*--------------+--------*/
```
-### `ST_DIFFERENCE`
+## `ST_DIFFERENCE`
```sql
ST_DIFFERENCE(geography_1, geography_2)
@@ -1865,7 +1796,7 @@ be in the difference.
**Example**
The following query illustrates the difference between `geog1`, a larger polygon
-`POLYGON((0 0, 10 0, 10 10, 0 0))` and `geog1`, a smaller polygon
+`POLYGON((0 0, 10 0, 10 10, 0 0))` and `geog2`, a smaller polygon
`POLYGON((4 2, 6 2, 8 6, 4 2))` that intersects with `geog1`. The result is
`geog1` with a hole where `geog2` intersects with it.
@@ -1883,7 +1814,7 @@ SELECT
*--------------------------------------------------------*/
```
-### `ST_DIMENSION`
+## `ST_DIMENSION`
```sql
ST_DIMENSION(geography_expression)
@@ -1907,7 +1838,7 @@ returns `-1`.
`INT64`
-### `ST_DISJOINT`
+## `ST_DISJOINT`
```sql
ST_DISJOINT(geography_1, geography_2)
@@ -1926,7 +1857,7 @@ that is, no point in `geography_1` also appears in `geography_2`.
[st-intersects]: #st_intersects
-### `ST_DISTANCE`
+## `ST_DISTANCE`
```
ST_DISTANCE(geography_1, geography_2[, use_spheroid])
@@ -1952,7 +1883,7 @@ of `use_spheroid` is `FALSE`.
[wgs84-link]: https://en.wikipedia.org/wiki/World_Geodetic_System
-### `ST_DUMP`
+## `ST_DUMP`
```sql
ST_DUMP(geography[, dimension])
@@ -2024,7 +1955,7 @@ FROM example
*-------------------------------------+------------------------------*/
```
-### `ST_DUMPPOINTS`
+## `ST_DUMPPOINTS`
```sql
ST_DUMPPOINTS(geography)
@@ -2063,7 +1994,7 @@ FROM example
*-------------------------------------+------------------------------------*/
```
-### `ST_DWITHIN`
+## `ST_DWITHIN`
```sql
ST_DWITHIN(geography_1, geography_2, distance[, use_spheroid])
@@ -2090,7 +2021,7 @@ the value `FALSE`. The default value of `use_spheroid` is `FALSE`.
[wgs84-link]: https://en.wikipedia.org/wiki/World_Geodetic_System
-### `ST_ENDPOINT`
+## `ST_ENDPOINT`
```sql
ST_ENDPOINT(linestring_geography)
@@ -2118,7 +2049,7 @@ SELECT ST_ENDPOINT(ST_GEOGFROMTEXT('LINESTRING(1 1, 2 1, 3 2, 3 3)')) last
*--------------*/
```
-### `ST_EQUALS`
+## `ST_EQUALS`
```sql
ST_EQUALS(geography_1, geography_2)
@@ -2126,19 +2057,24 @@ ST_EQUALS(geography_1, geography_2)
**Description**
-Returns `TRUE` if `geography_1` and `geography_2` represent the same
+Checks if two `GEOGRAPHY` values represent the same `GEOGRAPHY` value. Returns
+`TRUE` if the values are the same, otherwise returns `FALSE`.
-`GEOGRAPHY` value. More precisely, this means that
-one of the following conditions holds:
-+ `ST_COVERS(geography_1, geography_2) = TRUE` and `ST_COVERS(geography_2,
- geography_1) = TRUE`
-+ Both `geography_1` and `geography_2` are empty.
+**Definitions**
-Therefore, two `GEOGRAPHY`s may be equal even if the
-ordering of points or vertices differ, as long as they still represent the same
-geometric structure.
++ `geography_1`: The first `GEOGRAPHY` value to compare.
++ `geography_2`: The second `GEOGRAPHY` value to compare.
-**Constraints**
+**Details**
+
+As long as they still represent the same geometric structure, two
+`GEOGRAPHY` values can be equal even if the ordering of points or vertices
+differ. This means that one of the following conditions must be true for this
+function to return `TRUE`:
+
++ Both `ST_COVERS(geography_1, geography_2)` and
+ `ST_COVERS(geography_2, geography_1)` are `TRUE`.
++ Both `geography_1` and `geography_2` are empty.
`ST_EQUALS` is not guaranteed to be a transitive function.
@@ -2146,7 +2082,7 @@ geometric structure.
`BOOL`
-### `ST_EXTENT`
+## `ST_EXTENT`
```sql
ST_EXTENT(geography_expression)
@@ -2205,7 +2141,7 @@ FROM data
[st-boundingbox]: #st_boundingbox
-### `ST_EXTERIORRING`
+## `ST_EXTERIORRING`
```sql
ST_EXTERIORRING(polygon_geography)
@@ -2245,7 +2181,7 @@ SELECT ST_EXTERIORRING(g) AS ring FROM geo;
*---------------------------------------*/
```
-### `ST_GEOGFROM`
+## `ST_GEOGFROM`
```sql
ST_GEOGFROM(expression)
@@ -2335,7 +2271,7 @@ SELECT ST_GEOGFROM(
[st-geogfromgeojson]: #st_geogfromgeojson
-### `ST_GEOGFROMGEOJSON`
+## `ST_GEOGFROMGEOJSON`
```sql
ST_GEOGFROMGEOJSON(
@@ -2392,7 +2328,7 @@ The JSON input is subject to the following constraints:
[st-asgeojson]: #st_asgeojson
-### `ST_GEOGFROMKML`
+## `ST_GEOGFROMKML`
```sql
ST_GEOGFROMKML(kml_geometry)
@@ -2408,7 +2344,7 @@ Takes a `STRING` [KML geometry][kml-geometry-link] and returns a
[kml-geometry-link]: https://developers.google.com/kml/documentation/kmlreference#geometry
-### `ST_GEOGFROMTEXT`
+## `ST_GEOGFROMTEXT`
@@ -2510,7 +2446,7 @@ FROM data
[st-geogfromgeojson]: #st_geogfromgeojson
-### `ST_GEOGFROMWKB`
+## `ST_GEOGFROMWKB`
```sql
ST_GEOGFROMWKB(
@@ -2604,7 +2540,7 @@ FROM wkb_data
[st-geogfromgeojson]: #st_geogfromgeojson
-### `ST_GEOGPOINT`
+## `ST_GEOGPOINT`
```sql
ST_GEOGPOINT(longitude, latitude)
@@ -2631,7 +2567,7 @@ NOTE: Some systems present latitude first; take care with argument order.
Point `GEOGRAPHY`
-### `ST_GEOGPOINTFROMGEOHASH`
+## `ST_GEOGPOINTFROMGEOHASH`
```sql
ST_GEOGPOINTFROMGEOHASH(geohash)
@@ -2648,7 +2584,7 @@ Point `GEOGRAPHY`
[geohash-link]: https://en.wikipedia.org/wiki/Geohash
-### `ST_GEOHASH`
+## `ST_GEOHASH`
```sql
ST_GEOHASH(geography_expression[, maxchars])
@@ -2689,7 +2625,7 @@ SELECT ST_GEOHASH(ST_GEOGPOINT(-122.35, 47.62), 10) geohash
[geohash-link]: https://en.wikipedia.org/wiki/Geohash
-### `ST_GEOMETRYTYPE`
+## `ST_GEOMETRYTYPE`
```sql
ST_GEOMETRYTYPE(geography_expression)
@@ -2757,7 +2693,7 @@ FROM example;
[st-asgeojson]: #st_asgeojson
-### `ST_HAUSDORFFDISTANCE`
+## `ST_HAUSDORFFDISTANCE`
```sql
ST_HAUSDORFFDISTANCE(
@@ -2868,7 +2804,7 @@ FROM data;
[h-distance]: http://en.wikipedia.org/wiki/Hausdorff_distance
-### `ST_INTERIORRINGS`
+## `ST_INTERIORRINGS`
```sql
ST_INTERIORRINGS(polygon_geography)
@@ -2915,7 +2851,7 @@ SELECT ST_INTERIORRINGS(g) AS rings FROM geo;
*----------------------------------------------------------------------------*/
```
-### `ST_INTERSECTION`
+## `ST_INTERSECTION`
```sql
ST_INTERSECTION(geography_1, geography_2)
@@ -2942,7 +2878,7 @@ predicate functions.
[st-disjoint]: #st_disjoint
-### `ST_INTERSECTS`
+## `ST_INTERSECTS`
```sql
ST_INTERSECTS(geography_1, geography_2)
@@ -2963,7 +2899,7 @@ returns `FALSE`.
[st-disjoint]: #st_disjoint
-### `ST_INTERSECTSBOX`
+## `ST_INTERSECTSBOX`
```sql
ST_INTERSECTSBOX(geography, lng1, lat1, lng2, lat2)
@@ -3010,7 +2946,7 @@ FROM UNNEST([ST_GEOGPOINT(10, 10), ST_GEOGPOINT(170, 10),
*----------------+--------------+--------------*/
```
-### `ST_ISCLOSED`
+## `ST_ISCLOSED`
```sql
ST_ISCLOSED(geography_expression)
@@ -3066,7 +3002,7 @@ FROM example;
[st-boundary]: #st_boundary
-### `ST_ISCOLLECTION`
+## `ST_ISCOLLECTION`
```sql
ST_ISCOLLECTION(geography_expression)
@@ -3083,7 +3019,7 @@ An empty `GEOGRAPHY` is not a collection.
`BOOL`
-### `ST_ISEMPTY`
+## `ST_ISEMPTY`
```sql
ST_ISEMPTY(geography_expression)
@@ -3102,7 +3038,7 @@ For example, the results of expressions `ST_GEOGFROMTEXT('POINT EMPTY')` and
`BOOL`
-### `ST_ISRING`
+## `ST_ISRING`
```sql
ST_ISRING(geography_expression)
@@ -3124,7 +3060,7 @@ An empty `GEOGRAPHY` is not a ring.
[st-isclosed]: #st_isclosed
-### `ST_LENGTH`
+## `ST_LENGTH`
```sql
ST_LENGTH(geography_expression[, use_spheroid])
@@ -3152,7 +3088,7 @@ the value `FALSE`. The default value of `use_spheroid` is `FALSE`.
[wgs84-link]: https://en.wikipedia.org/wiki/World_Geodetic_System
-### `ST_LINEINTERPOLATEPOINT`
+## `ST_LINEINTERPOLATEPOINT`
```sql
ST_LINEINTERPOLATEPOINT(linestring_geography, fraction)
@@ -3212,7 +3148,7 @@ FROM fractions
*-------------+-------------------------------------------*/
```
-### `ST_LINELOCATEPOINT`
+## `ST_LINELOCATEPOINT`
```sql
ST_LINELOCATEPOINT(linestring_geography, point_geography)
@@ -3285,7 +3221,7 @@ FROM geos
[st-closestpoint]: #st_closestpoint
-### `ST_LINESUBSTRING`
+## `ST_LINESUBSTRING`
```sql
ST_LINESUBSTRING(linestring_geography, start_fraction, end_fraction);
@@ -3352,7 +3288,7 @@ FROM data;
+------------------------------------------*/
```
-### `ST_MAKELINE`
+## `ST_MAKELINE`
```sql
ST_MAKELINE(geography_1, geography_2)
@@ -3394,7 +3330,7 @@ the result will be a `GEOGRAPHY` with exactly one point.
LineString `GEOGRAPHY`
-### `ST_MAKEPOLYGON`
+## `ST_MAKEPOLYGON`
```sql
ST_MAKEPOLYGON(polygon_shell[, array_of_polygon_holes])
@@ -3460,7 +3396,7 @@ point.
[st-makepolygonoriented]: #st_makepolygonoriented
-### `ST_MAKEPOLYGONORIENTED`
+## `ST_MAKEPOLYGONORIENTED`
```sql
ST_MAKEPOLYGONORIENTED(array_of_geography)
@@ -3518,7 +3454,7 @@ polygon hole that is sufficiently small may disappear, or the resulting
[st-makepolygon]: #st_makepolygon
-### `ST_MAXDISTANCE`
+## `ST_MAXDISTANCE`
```sql
ST_MAXDISTANCE(geography_1, geography_2[, use_spheroid])
@@ -3549,7 +3485,7 @@ the value `FALSE`. The default value of `use_spheroid` is `FALSE`.
[wgs84-link]: https://en.wikipedia.org/wiki/World_Geodetic_System
-### `ST_NPOINTS`
+## `ST_NPOINTS`
```sql
ST_NPOINTS(geography_expression)
@@ -3561,7 +3497,7 @@ An alias of [ST_NUMPOINTS][st-numpoints].
[st-numpoints]: #st_numpoints
-### `ST_NUMGEOMETRIES`
+## `ST_NUMGEOMETRIES`
```
ST_NUMGEOMETRIES(geography_expression)
@@ -3608,7 +3544,7 @@ FROM example;
*------------------------------------------------------+----------------*/
```
-### `ST_NUMPOINTS`
+## `ST_NUMPOINTS`
```sql
ST_NUMPOINTS(geography_expression)
@@ -3627,7 +3563,7 @@ vertices.
`INT64`
-### `ST_PERIMETER`
+## `ST_PERIMETER`
```sql
ST_PERIMETER(geography_expression[, use_spheroid])
@@ -3655,7 +3591,7 @@ the value `FALSE`. The default value of `use_spheroid` is `FALSE`.
[wgs84-link]: https://en.wikipedia.org/wiki/World_Geodetic_System
-### `ST_POINTN`
+## `ST_POINTN`
```sql
ST_POINTN(linestring_geography, index)
@@ -3698,7 +3634,7 @@ FROM linestring;
[st-endpoint]: #st_endpoint
-### `ST_SIMPLIFY`
+## `ST_SIMPLIFY`
```sql
ST_SIMPLIFY(geography, tolerance_meters)
@@ -3777,7 +3713,7 @@ FROM example
*-------------------------------------+------------------+-------------------------------------*/
```
-### `ST_SNAPTOGRID`
+## `ST_SNAPTOGRID`
```sql
ST_SNAPTOGRID(geography_expression, grid_size)
@@ -3798,7 +3734,7 @@ that it is of the form `10^n`, where `-10 < n < 0`.
`GEOGRAPHY`
-### `ST_STARTPOINT`
+## `ST_STARTPOINT`
```sql
ST_STARTPOINT(linestring_geography)
@@ -3826,7 +3762,7 @@ SELECT ST_STARTPOINT(ST_GEOGFROMTEXT('LINESTRING(1 1, 2 1, 3 2, 3 3)')) first
*--------------*/
```
-### `ST_TOUCHES`
+## `ST_TOUCHES`
```sql
ST_TOUCHES(geography_1, geography_2)
@@ -3844,7 +3780,7 @@ Returns `TRUE` provided the following two conditions are satisfied:
`BOOL`
-### `ST_UNION`
+## `ST_UNION`
```sql
ST_UNION(geography_1, geography_2)
@@ -3893,7 +3829,7 @@ SELECT ST_UNION(
[st-union-agg]: #st_union_agg
-### `ST_UNION_AGG`
+## `ST_UNION_AGG`
```sql
ST_UNION_AGG(geography)
@@ -3930,7 +3866,7 @@ FROM UNNEST([
[st-union]: #st_union
-### `ST_WITHIN`
+## `ST_WITHIN`
```sql
ST_WITHIN(geography_1, geography_2)
@@ -3950,7 +3886,7 @@ as [`ST_CONTAINS`][st-contains]`(b, a)`. Note the opposite order of arguments.
[st-contains]: #st_contains
-### `ST_X`
+## `ST_X`
```sql
ST_X(point_geography_expression)
@@ -3992,7 +3928,7 @@ FROM points;
*--------------+-----------+----------*/
```
-### `ST_Y`
+## `ST_Y`
```sql
ST_Y(point_geography_expression)
diff --git a/docs/graph-conditional-expressions.md b/docs/graph-conditional-expressions.md
new file mode 100644
index 000000000..70123b708
--- /dev/null
+++ b/docs/graph-conditional-expressions.md
@@ -0,0 +1,11 @@
+
+
+
+
+# GQL conditional expressions
+
+Graph Query Language (GQL) supports all ZetaSQL conditional expressions. To
+learn more, see [Conditional expressions][conditional-expressions].
+
+[conditional-expressions]: https://github.com/google/zetasql/blob/master/docs/conditional_expressions.md
+
diff --git a/docs/graph-data-types.md b/docs/graph-data-types.md
new file mode 100644
index 000000000..316b06dc7
--- /dev/null
+++ b/docs/graph-data-types.md
@@ -0,0 +1,82 @@
+
+
+
+
+# GQL data types
+
+Graph Query Language (GQL) supports all ZetaSQL [data types][data-types],
+including the following GQL-specific data type:
+
+## Graph data types list
+
+
+
+
+ Name |
+ Summary |
+
+
+
+
+
+ Graph element type
+ |
+
+ An element in a property graph.
+ SQL type name: GRAPH_ELEMENT
+ |
+
+
+
+
+
+## Graph element type
+
+
+
+
+
+Name |
+Description |
+
+
+
+
+GRAPH_ELEMENT |
+
+ An element in a property graph.
+ |
+
+
+
+
+A variable with a `GRAPH_ELEMENT` type is produced by a graph query.
+The generated type has this format:
+
+```
+GRAPH_ELEMENT
+```
+
+A graph element can be one of two kinds: a node or edge.
+A graph element is similar to the struct type, except that fields are
+graph properties, and you can only access graph properties by name.
+A graph element can represent nodes or edges from multiple node or edge tables
+if multiple such tables match the given label expression.
+
+**Example**
+
+In the following example, `n` represents a graph element in the
+[`FinGraph`][fin-graph] property graph:
+
+```sql
+GRAPH FinGraph
+MATCH (n:Person)
+RETURN n.name
+```
+
+[graph-query]: https://github.com/google/zetasql/blob/master/docs/graph-intro.md
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+[data-types]: https://github.com/google/zetasql/blob/master/docs/data-types.md
+
diff --git a/docs/graph-gql-functions.md b/docs/graph-gql-functions.md
new file mode 100644
index 000000000..0f1ad9c12
--- /dev/null
+++ b/docs/graph-gql-functions.md
@@ -0,0 +1,916 @@
+
+
+
+
+# GQL functions
+
+All ZetaSQL [functions][functions-all] are supported,
+including the following GQL-specific functions:
+
+## Function list
+
+
+
+
+ Name |
+ Summary |
+
+
+
+
+
+
+
+ DESTINATION_NODE_ID
+
+
+ |
+ Gets a unique identifier of a graph edge's destination node. |
+
+
+
+
+
+ EDGES
+
+
+ |
+
+ Gets the edges in a graph path. The resulting array retains the
+ original order in the graph path.
+ |
+
+
+
+
+
+ ELEMENT_ID
+
+
+ |
+ Gets a graph element's unique identifier. |
+
+
+
+
+
+ IS_ACYCLIC
+
+
+ |
+ Checks if a graph path has a repeating node. |
+
+
+
+
+
+ IS_TRAIL
+
+
+ |
+ Checks if a graph path has a repeating edge. |
+
+
+
+
+
+ LABELS
+
+
+ |
+ Gets the labels associated with a graph element. |
+
+
+
+
+
+ NODES
+
+
+ |
+
+ Gets the nodes in a graph path. The resulting array retains the
+ original order in the graph path.
+ |
+
+
+
+
+
+ PATH
+
+
+ |
+ Creates a graph path from a list of graph elements. |
+
+
+
+
+
+ PATH_FIRST
+
+
+ |
+ Gets the first node in a graph path. |
+
+
+
+
+
+ PATH_LAST
+
+
+ |
+ Gets the last node in a graph path. |
+
+
+
+
+
+ PATH_LENGTH
+
+
+ |
+ Gets the number of edges in a graph path. |
+
+
+
+
+
+ PROPERTY_NAMES
+
+
+ |
+ Gets the property names associated with a graph element. |
+
+
+
+
+
+ SOURCE_NODE_ID
+
+
+ |
+ Gets a unique identifier of a graph edge's source node. |
+
+
+
+
+
+## `DESTINATION_NODE_ID`
+
+```sql
+DESTINATION_NODE_ID(edge_element)
+```
+
+**Description**
+
+Gets a unique identifier of a graph edge's destination node. The unique identifier is only valid for the scope of the query where it is obtained.
+
+**Arguments**
+
++ `edge_element`: A `GRAPH_ELEMENT` value that represents an edge.
+
+**Details**
+
+Returns `NULL` if `edge_element` is `NULL`.
+
+**Return type**
+
+`STRING`
+
+**Examples**
+
+```sql
+GRAPH FinGraph
+MATCH (:Person)-[o:Owns]->(a:Account)
+RETURN a.id AS account_id, DESTINATION_NODE_ID(o) AS destination_node_id
+
+/*------------------------------------------+
+ |account_id | destination_node_id |
+ +-----------|------------------------------+
+ | 7 | mUZpbkdyYXBoLkFjY291bnQAeJEO |
+ | 16 | mUZpbkdyYXBoLkFjY291bnQAeJEg |
+ | 20 | mUZpbkdyYXBoLkFjY291bnQAeJEo |
+ +------------------------------------------*/
+```
+
+Note that the actual identifiers obtained may be different from what's shown above.
+
+## `EDGES`
+
+```sql
+EDGES(graph_path)
+```
+
+**Description**
+
+Gets the edges in a graph path. The resulting array retains the
+original order in the graph path.
+
+**Definitions**
+
++ `graph_path`: A `GRAPH_PATH` value that represents a graph path.
+
+**Details**
+
+If `graph_path` is `NULL`, returns `NULL`.
+
+**Return type**
+
+`ARRAY`
+
+**Examples**
+
+```sql
+GRAPH FinGraph
+MATCH p=(src:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(dst:Account)
+LET es = EDGES(p)
+RETURN
+ LABELS(es[0]) AS labels,
+ es[0].to_id AS to_account;
+
+/*----------------------------*
+ | labels | to_account |
+ +----------------------------+
+ | ["Transfers"] | 7 |
+ | ["Transfers"] | 7 |
+ | ["Transfers"] | 16 |
+ | ["Transfers"] | 16 |
+ | ["Transfers"] | 16 |
+ | ["Transfers"] | 20 |
+ | ["Transfers"] | 20 |
+ *----------------------------/*
+```
+
+## `ELEMENT_ID`
+
+```sql
+ELEMENT_ID(element)
+```
+
+**Description**
+
+Gets a graph element's unique identifier. The unique identifier is only valid for the scope of the query where it is obtained.
+
+**Arguments**
+
++ `element`: A `GRAPH_ELEMENT` value.
+
+**Details**
+
+Returns `NULL` if `element` is `NULL`.
+
+**Return type**
+
+`STRING`
+
+**Examples**
+
+```sql
+GRAPH FinGraph
+MATCH (p:Person)-[o:Owns]->(:Account)
+RETURN p.name AS name, ELEMENT_ID(p) AS node_element_id, ELEMENT_ID(o) AS edge_element_id
+
+/*--------------------------------------------------------------------------------------------------------------------------------------------+
+ | name | node_element_id | edge_element_id . |
+ +------|------------------------------|------------------------------------------------------------------------------------------------------+
+ | Alex | mUZpbkdyYXBoLlBlcnNvbgB4kQI= | mUZpbkdyYXBoLlBlcnNvbk93bkFjY291bnQAeJECkQ6ZRmluR3JhcGguUGVyc29uAHiRAplGaW5HcmFwaC5BY2NvdW50AHiRDg== |
+ | Dana | mUZpbkdyYXBoLlBlcnNvbgB4kQQ= | mUZpbkdyYXBoLlBlcnNvbk93bkFjY291bnQAeJEGkSCZRmluR3JhcGguUGVyc29uAHiRBplGaW5HcmFwaC5BY2NvdW50AHiRIA== |
+ | Lee | mUZpbkdyYXBoLlBlcnNvbgB4kQY= | mUZpbkdyYXBoLlBlcnNvbk93bkFjY291bnQAeJEEkSiZRmluR3JhcGguUGVyc29uAHiRBJlGaW5HcmFwaC5BY2NvdW50AHiRKA== |
+ +--------------------------------------------------------------------------------------------------------------------------------------------*/
+```
+
+Note that the actual identifiers obtained may be different from what's shown above.
+
+## `IS_ACYCLIC`
+
+```sql
+IS_ACYCLIC(graph_path)
+```
+
+**Description**
+
+Checks if a graph path has a repeating node. Returns `TRUE` if a repetition is
+found, otherwise returns `FALSE`.
+
+**Definitions**
+
++ `graph_path`: A `GRAPH_PATH` value that represents a graph path.
+
+**Details**
+
+Two nodes are considered equal if they compare as equal.
+
+Returns `NULL` if `graph_path` is `NULL`.
+
+**Return type**
+
+`BOOL`
+
+**Examples**
+
+```sql
+GRAPH FinGraph
+MATCH p=(src:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(dst:Account)
+RETURN src.id AS source_account_id, IS_ACYCLIC(p) AS is_acyclic_path
+
+/*-------------------------------------*
+ | source_account_id | is_acyclic_path |
+ +-------------------------------------+
+ | 16 | TRUE |
+ | 20 | TRUE |
+ | 20 | TRUE |
+ | 16 | FALSE |
+ | 7 | TRUE |
+ | 7 | TRUE |
+ | 20 | FALSE |
+ *-------------------------------------*/
+```
+
+## `IS_TRAIL`
+
+```sql
+IS_TRAIL(graph_path)
+```
+
+**Description**
+
+Checks if a graph path has a repeating edge. Returns `TRUE` if a repetition is
+found, otherwise returns `FALSE`.
+
+**Definitions**
+
++ `graph_path`: A `GRAPH_PATH` value that represents a graph path.
+
+**Details**
+
+Returns `NULL` if `graph_path` is `NULL`.
+
+**Return type**
+
+`BOOL`
+
+**Examples**
+
+```sql
+GRAPH FinGraph
+MATCH
+ p=(a1:Account)-[t1:Transfers]->(a2:Account)-[t2:Transfers]->
+ (a3:Account)-[t3:Transfers]->(a4:Account)
+WHERE a1.id < a4.id
+RETURN
+ IS_TRAIL(p) AS is_trail_path, t1.id as t1_id, t2.id as t2_id, t3.id as t3_id
+
+/*---------------+-------+-------+-------+
+ | is_trail_path | t1_id | t2_id | t3_id |
+ +---------------+-------+-------+-------+
+ | FALSE | 16 | 20 | 16 |
+ | TRUE | 7 | 16 | 20 |
+ | TRUE | 7 | 16 | 20 |
+ +---------------+-------+-------+-------*/
+```
+
+## `LABELS`
+
+```sql
+LABELS(element)
+```
+
+**Description**
+
+Gets the labels associated with a graph element and preserves the original case
+of each label.
+
+**Arguments**
+
++ `element`: A `GRAPH_ELEMENT` value that represents the graph element to
+ extract labels from.
+
+**Details**
+
+Returns `NULL` if `element` is `NULL`.
+
+**Return type**
+
+`ARRAY`
+
+**Examples**
+
+```sql
+GRAPH FinGraph
+MATCH (n:Person|Account)
+RETURN LABELS(n) AS label, n.id
+
+/*----------------+
+ | label | id |
+ +----------------+
+ | [Account] | 7 |
+ | [Account] | 16 |
+ | [Account] | 20 |
+ | [Person] | 1 |
+ | [Person] | 2 |
+ | [Person] | 3 |
+ +----------------*/
+```
+
+## `NODES`
+
+```sql
+NODES(graph_path)
+```
+
+**Description**
+
+Gets the nodes in a graph path. The resulting array retains the
+original order in the graph path.
+
+**Definitions**
+
++ `graph_path`: A `GRAPH_PATH` value that represents a graph path.
+
+**Details**
+
+Returns `NULL` if `graph_path` is `NULL`.
+
+**Return type**
+
+`ARRAY`
+
+**Examples**
+
+```sql
+GRAPH FinGraph
+MATCH p=(src:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(dst:Account)
+LET ns = NODES(p)
+RETURN
+ JSON_QUERY(TO_JSON(ns)[0], '$.labels') AS labels,
+ JSON_QUERY(TO_JSON(ns)[0], '$.properties.nick_name') AS nick_name;
+
+/*--------------------------------*
+ | labels | nick_name |
+ +--------------------------------+
+ | ["Account"] | "Vacation Fund" |
+ | ["Account"] | "Rainy Day Fund" |
+ | ["Account"] | "Rainy Day Fund" |
+ | ["Account"] | "Rainy Day Fund" |
+ | ["Account"] | "Vacation Fund" |
+ | ["Account"] | "Vacation Fund" |
+ | ["Account"] | "Vacation Fund" |
+ | ["Account"] | "Rainy Day Fund" |
+ *--------------------------------/*
+```
+
+## `PATH`
+
+```sql
+PATH(graph_element[, ...])
+```
+
+**Description**
+
+Creates a graph path from a list of graph elements.
+
+**Definitions**
+
++ `graph_element`: A `GRAPH_ELEMENT` value that represents a graph element,
+ such as a node or edge, to add to a graph path.
+
+**Details**
+
+This function produces an error if:
+
++ A graph element is `NULL`.
++ Nodes aren't interleaved with edges.
++ An edge doesn't connect to neighboring nodes.
+
+**Return type**
+
+`GRAPH_PATH`
+
+**Examples**
+
+```sql
+GRAPH FinGraph
+MATCH (src:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(dst:Account)
+LET p = PATH(src, t1, mid, t2, dst)
+RETURN
+ JSON_QUERY(TO_JSON(p)[0], '$.labels') AS element_a,
+ JSON_QUERY(TO_JSON(p)[1], '$.labels') AS element_b,
+ JSON_QUERY(TO_JSON(p)[2], '$.labels') AS element_c
+
+/*-------------------------------------------*
+ | element_a | element_b | element_c |
+ +-------------------------------------------+
+ | ["Account"] | ["Transfers"] | ["Account"] |
+ | ... | ... | ... |
+ *-------------------------------------------*/
+```
+
+```sql
+-- Error: in 'p', a graph element is NULL.
+GRAPH FinGraph
+MATCH (src:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(dst:Account)
+LET p = PATH(src, NULL, mid, t2, dst)
+RETURN TO_JSON(p) AS results
+```
+
+```sql
+-- Error: in 'p', 'src' and 'mid' are nodes that should be interleaved with an
+-- edge.
+GRAPH FinGraph
+MATCH (src:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(dst:Account)
+LET p = PATH(src, mid, t2, dst)
+RETURN TO_JSON(p) AS results
+```
+
+```sql
+-- Error: in 'p', 't2' is an edge that does not connect to a neighboring node on
+-- the right.
+GRAPH FinGraph
+MATCH (src:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(dst:Account)
+LET p = PATH(src, t2, mid)
+RETURN TO_JSON(p) AS results
+```
+
+## `PATH_FIRST`
+
+```sql
+PATH_FIRST(graph_path)
+```
+
+**Description**
+
+Gets the first node in a graph path.
+
+**Definitions**
+
++ `graph_path`: A `GRAPH_PATH` value that represents the graph path to
+ extract the first node from.
+
+**Details**
+
+Returns `NULL` if `graph_path` is `NULL`.
+
+**Return type**
+
+`GRAPH_ELEMENT`
+
+**Examples**
+
+```sql
+GRAPH FinGraph
+MATCH p=(src:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(dst:Account)
+LET f = PATH_FIRST(p)
+RETURN
+ LABELS(f) AS labels,
+ f.nick_name AS nick_name;
+
+/*--------------------------*
+ | labels | nick_name |
+ +--------------------------+
+ | Account | Vacation Fund |
+ | Account | Rainy Day Fund |
+ | Account | Rainy Day Fund |
+ | Account | Vacation Fund |
+ | Account | Vacation Fund |
+ | Account | Vacation Fund |
+ | Account | Rainy Day Fund |
+ *--------------------------/*
+```
+
+## `PATH_LAST`
+
+```sql
+PATH_LAST(graph_path)
+```
+
+**Description**
+
+Gets the last node in a graph path.
+
+**Definitions**
+
++ `graph_path`: A `GRAPH_PATH` value that represents the graph path to
+ extract the last node from.
+
+**Details**
+
+Returns `NULL` if `graph_path` is `NULL`.
+
+**Return type**
+
+`GRAPH_ELEMENT`
+
+**Examples**
+
+```sql
+GRAPH FinGraph
+MATCH p=(src:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(dst:Account)
+LET f = PATH_LAST(p)
+RETURN
+ LABELS(f) AS labels,
+ f.nick_name AS nick_name;
+
+/*--------------------------*
+ | labels | nick_name |
+ +--------------------------+
+ | Account | Vacation Fund |
+ | Account | Vacation Fund |
+ | Account | Vacation Fund |
+ | Account | Vacation Fund |
+ | Account | Rainy Day Fund |
+ | Account | Rainy Day Fund |
+ | Account | Rainy Day Fund |
+ *--------------------------/*
+```
+
+## `PATH_LENGTH`
+
+```sql
+PATH_LENGTH(graph_path)
+```
+
+**Description**
+
+Gets the number of edges in a graph path.
+
+**Definitions**
+
++ `graph_path`: A `GRAPH_PATH` value that represents the graph path with the
+ edges to count.
+
+**Details**
+
+Returns `NULL` if `graph_path` is `NULL`.
+
+**Return type**
+
+`INT64`
+
+**Examples**
+
+```sql
+GRAPH FinGraph
+MATCH p=(src:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(dst:Account)
+RETURN PATH_LENGTH(p) AS results
+
+/*---------*
+ | results |
+ +---------+
+ | 2 |
+ | 2 |
+ | 2 |
+ | 2 |
+ | 2 |
+ | 2 |
+ | 2 |
+ *---------*/
+```
+
+## `PROPERTY_NAMES`
+
+```sql
+PROPERTY_NAMES(element)
+```
+
+**Description**
+
+Gets the name of each property associated with a graph element and preserves
+the original case of each name.
+
+**Arguments**
+
++ `element`: A `GRAPH_ELEMENT` value.
+
+**Details**
+
+Returns `NULL` if `element` is `NULL`.
+
+**Return type**
+
+`ARRAY`
+
+**Examples**
+
+```sql
+GRAPH FinGraph
+MATCH (n:Person|Account)
+RETURN PROPERTY_NAMES(n) AS property_names, n.id
+
+/*-----------------------------------------------+
+ | label | id |
+ +-----------------------------------------------+
+ | [create_time, id, is_blocked, nick_name] | 7 |
+ | [create_time, id, is_blocked, nick_name] | 16 |
+ | [create_time, id, is_blocked, nick_name] | 20 |
+ | [birthday, city, country, id, name] | 1 |
+ | [birthday, city, country, id, name] | 2 |
+ | [birthday, city, country, id, name] | 3 |
+ +-----------------------------------------------*/
+```
+
+## `SOURCE_NODE_ID`
+
+```sql
+SOURCE_NODE_ID(edge_element)
+```
+
+**Description**
+
+Gets a unique identifier of a graph edge's source node. The unique identifier is only valid for the scope of the query where it is obtained.
+
+**Arguments**
+
++ `edge_element`: A `GRAPH_ELEMENT` value that represents an edge.
+
+**Details**
+
+Returns `NULL` if `edge_element` is `NULL`.
+
+**Return type**
+
+`STRING`
+
+**Examples**
+
+```sql
+GRAPH FinGraph
+MATCH (p:Person)-[o:Owns]->(:Account)
+RETURN p.name AS name, SOURCE_NODE_ID(o) AS source_node_id
+
+/*-------------------------------------+
+ | name | source_node_id |
+ +------|------------------------------+
+ | Alex | mUZpbkdyYXBoLlBlcnNvbgB4kQI= |
+ | Dana | mUZpbkdyYXBoLlBlcnNvbgB4kQQ= |
+ | Lee | mUZpbkdyYXBoLlBlcnNvbgB4kQY= |
+ +-------------------------------------*/
+```
+
+Note that the actual identifiers obtained may be different from what's shown above.
+
+## Supplemental materials
+
+### Horizontal aggregate function calls in GQL
+
+
+In GQL, a horizontal aggregate function is an aggregate function that summarizes
+the contents of exactly one array-typed value. Because a horizontal aggregate
+function does not need to aggregate vertically across rows like a traditional
+aggregate function, you can use it like a normal function expression.
+Horizontal aggregates are only allowed in certain syntactic contexts: `LET`,
+`FILTER` statements or `WHERE` clauses.
+
+Horizontal aggregation is especially useful when paired with a
+[group variable][group-variables]. You can create a group variable inside a
+quantified path pattern in a linear graph query.
+
+#### Syntactic restrictions
+
++ The argument to the aggregate function must reference exactly one array-typed
+ value.
++ Can only be used in `LET`, `FILTER` statements or `WHERE`
+ clauses.
++ Nesting horizontal aggregates is not allowed.
++ Aggregate functions that support ordering (`ARRAY_AGG`, `STRING_AGG`,
+ `ARRAY_CONCAT_AGG`) can't be used as horizontal aggregate functions.
+
+#### Examples
+
+In the following query, the `SUM` function horizontally aggregates over an
+array (`arr`), and then produces the sum of the values in `arr`:
+
+```sql {.no-copy}
+GRAPH FinGraph
+LET arr = [1, 2, 3]
+LET total = SUM(arr)
+RETURN total
+
+/*-------+
+ | total |
+ +-------+
+ | 6 |
+ +-------*/
+```
+
+In the following query, the `SUM` function horizontally aggregates over an
+array of structs (`arr`), and then produces the sum of the `x` fields in the
+array:
+
+```sql {.no-copy}
+GRAPH FinGraph
+LET arr = [STRUCT(1 as x, 10 as y), STRUCT(2, 9), STRUCT(3, 8)]
+LET total = SUM(arr.x)
+RETURN total
+
+/*-------+
+ | total |
+ +-------+
+ | 6 |
+ +-------*/
+```
+
+In the following query, the `AVG` function horizontally aggregates over an
+array of structs (`arr`), and then produces the average of the `x` and `y`
+fields in the array:
+
+```sql {.no-copy}
+GRAPH FinGraph
+LET arr = [STRUCT(1 as x, 10 as y), STRUCT(2, 9), STRUCT(3, 8)]
+LET avg_sum = AVG(arr.x + arr.y)
+RETURN avg_sum
+
+/*---------+
+ | avg_sum |
+ +---------+
+ | 11 |
+ +---------*/
+```
+
+The following query produces an error because two arrays were passed into
+the `AVG` aggregate function:
+
+```sql {.bad}
+-- ERROR: Horizontal aggregation on more than one array-typed variable
+-- is not allowed
+GRAPH FinGraph
+LET arr1 = [1, 2, 3]
+LET arr2 = [5, 4, 3]
+LET avg_val = AVG(arr1 + arr2)
+RETURN avg_val
+```
+
+The following query demonstrates a common pitfall. All instances of the array
+that we're horizontal aggregating over are treated as a single element from that
+array in the aggregate.
+
+The fix is to lift any expressions that want to use the array as is outside
+the horizontal aggregation.
+
+```sql {.bad}
+-- ERROR: No matching signature for function ARRAY_LENGTH for argument types: INT64
+GRAPH FinGraph
+LET arr1 = [1, 2, 3]
+LET bad_avg_val = SUM(arr1 / ARRAY_LENGTH(arr1))
+RETURN bad_avg_val
+```
+
+The fix:
+
+```sql {.no-copy}
+GRAPH FinGraph
+LET arr1 = [1, 2, 3]
+LET len = ARRAY_LENGTH(arr1)
+LET avg_val = SUM(arr1 / len)
+RETURN avg_val
+```
+
+In the following query, the `COUNT` function counts the unique amount
+transfers with one to three hops between a source account (`src`) and a
+destination account (`dst`):
+
+```sql {.no-copy}
+GRAPH FinGraph
+MATCH (src:Account)-[e:Transfers]->{1, 3}(dst:Account)
+WHERE src != dst
+LET num_transfers = COUNT(e)
+LET unique_amount_transfers = COUNT(DISTINCT e.amount)
+FILTER unique_amount_transfers != num_transfers
+RETURN src.id as src_id, num_transfers, unique_amount_transfers, dst.id AS destination_account_id
+
+/*---------------------------------------------------------------------------+
+ | src_id | num_transfers | unique_transfers_amount | destination_account_id |
+ +---------------------------------------------------------------------------+
+ | 7 | 3 | 2 | 16 |
+ | 20 | 3 | 2 | 16 |
+ | 7 | 2 | 1 | 20 |
+ | 16 | 3 | 2 | 20 |
+ +---------------------------------------------------------------------------*/
+```
+
+In the following query, the `SUM` function takes a group variable called
+`e` that represents an array of transfers, and then sums the amount
+for each transfer. Note that horizontal aggregation is not allowed in the
+`RETURN` statement: that `ARRAY_AGG` is an aggregate over the result set.
+
+```sql
+GRAPH FinGraph
+MATCH (src:Account {id: 7})-[e:Transfers]->{1,2}(dst:Account)
+LET total_amount = SUM(e.amount)
+RETURN
+ src.id AS source_account_id, dst.id AS destination_account_id,
+ ARRAY_AGG(total_amount) as total_amounts_per_path
+
+/*---------------------------------------------------------------------+
+ | source_account_id | destination_account_id | total_amounts_per_path |
+ +---------------------------------------------------------------------+
+ | 7 | 16 | 300,100 |
+ | 7 | 20 | 600,400 |
+ +---------------------------------------------------------------------*/
+```
+
+[group-variables]: https://github.com/google/zetasql/blob/master/docs/graph-patterns#quantified_paths.md
+
+[functions-all]: https://github.com/google/zetasql/blob/master/docs/functions-and-operators.md
+
diff --git a/docs/graph-intro.md b/docs/graph-intro.md
new file mode 100644
index 000000000..32029cab6
--- /dev/null
+++ b/docs/graph-intro.md
@@ -0,0 +1,126 @@
+
+
+
+
+# GQL overview
+
+Graph Query Language (GQL) is a language designed to query graph data. This page
+describes the high level structure of GQL.
+
+## Statement and clause
+
+In GQL, a statement refers to a complete unit of execution, and a clause
+represents a modifier to statements. See the [statement list][statement-list] for a complete list.
+
+## Working table
+
+A working table refers to the intermediate table representing the input or
+output of a GQL statement.
+
+A GQL statement receives an incoming working table and produces an outgoing
+working table.
+
+The first incoming working table is a table with a single row. The last
+outgoing working table is returned as the query results.
+
+## Linear query statement
+
+A linear query statement consists of multiple statements from the [statement
+list][statement-list]. It always ends with a [`RETURN` statement][return-statement].
+
+Each statement generates intermediate results (the working table) and
+then passes those results to the next statement. The output of a
+linear query statement comes from the final `RETURN` statement.
+
+#### Examples
+
+```sql
+GRAPH FinGraph
+MATCH (p:Person)-[o:Owns]->(a:Account)
+FILTER p.birthday < '1990-01-10'
+RETURN p.name
+
+/*------+
+ | name |
+ +------+
+ | Dana |
+ | Lee |
+ +------*/
+```
+
+## Combining linear query statements with set operators
+
+You can use a set operator to combine multiple linear query statements into one.
+For more information, see the syntax for the [GQL set operation][set-op].
+
+#### Examples
+
+A set operator between two linear query statements with the same set of output
+column names and types but with different column orders is supported. For example:
+
+```sql
+GRAPH FinGraph
+MATCH (p:Person)
+RETURN p.name, 1 AS group_id
+UNION ALL
+MATCH (p:Person)
+RETURN 2 AS group_id, p.name
+
+/*------+----------+
+ | name | group_id |
+ +------+----------+
+ | Alex | 1 |
+ | Dana | 1 |
+ | Lee | 1 |
+ | Alex | 2 |
+ | Dana | 2 |
+ | Lee | 2 |
+ +------+----------*/
+```
+
+## Chaining linear query statements with the `NEXT` statement
+
+You can use the `NEXT` keyword to chain multiple linear query statements
+into one.
+
+#### Examples
+
+The following is an example of a graph query chaining multiple linear query statements
+using `NEXT`.
+
+```sql
+GRAPH FinGraph
+
+MATCH (a:Account {is_blocked: TRUE})
+RETURN a
+UNION ALL
+MATCH (a:Account)<-[:Owns]-(p:Person {id: 2})
+RETURN a
+
+NEXT
+
+MATCH (a:Account)-[t:Transfers]->(oa:Account)
+WITH DISTINCT oa
+RETURN oa.nick_name
+
+/*----------------+
+ | nick_name |
+ +----------------+
+ | Vacation Fund |
+ | Vacation Fund |
+ | Rainy Day Fund |
+ +----------------*/
+```
+
+[supertypes]: https://github.com/google/zetasql/blob/master/docs/conversion_rules.md#supertypes
+
+[match-statement]: https://github.com/google/zetasql/blob/master/docs/graph-query-statements.md#gql_match
+
+[return-statement]: https://github.com/google/zetasql/blob/master/docs/graph-query-statements.md#gql_return
+
+[statement-list]: https://github.com/google/zetasql/blob/master/docs/graph-query-statements.md#language_list
+
+[set-op]: https://github.com/google/zetasql/blob/master/docs/graph-query-statements.md#gql_set
+
+[graph-clause]: https://github.com/google/zetasql/blob/master/docs/graph-query-statements.md#graph_query
+
diff --git a/docs/graph-operators.md b/docs/graph-operators.md
new file mode 100644
index 000000000..0b0cb14d4
--- /dev/null
+++ b/docs/graph-operators.md
@@ -0,0 +1,427 @@
+
+
+
+
+# GQL operators
+
+Graph Query Language (GQL) supports all ZetaSQL [operators][operators],
+including the following GQL-specific operators:
+
+## Graph operators list
+
+
+
+
+ Name |
+ Summary |
+
+
+
+
+
+ Graph concatenation operator
+ |
+
+ Combines multiple graph paths into one and preserves the original order of
+ the nodes and edges.
+ |
+
+
+
+ Graph logical operators
+ |
+
+ Tests for the truth of a condition in a graph and produces either
+ TRUE or FALSE .
+ |
+
+
+
+ Graph predicates
+ |
+
+ Tests for the truth of a condition for a graph element and produces
+ TRUE , FALSE , or NULL .
+ |
+
+
+
+ IS DESTINATION predicate
+ |
+ In a graph, checks to see if a node is or isn't the destination of an edge. |
+
+
+
+ IS SOURCE predicate
+ |
+ In a graph, checks to see if a node is or isn't the source of an edge. |
+
+
+
+ PROPERTY_EXISTS predicate
+ |
+ In a graph, checks to see if a property exists for an element. |
+
+
+
+ SAME predicate
+ |
+
+ In a graph, determines if all graph elements in a list bind to the same node or edge.
+ |
+
+
+
+
+
+## Graph concatenation operator
+
+
+```sql
+graph_path || graph_path [ || ... ]
+```
+
+**Description**
+
+Combines multiple graph paths into one and preserves the original order of the
+nodes and edges.
+
+Arguments:
+
++ `graph_path`: A `GRAPH_PATH` value that represents a graph path to
+ concatenate.
+
+**Details**
+
+This operator produces an error if the last node in the first path isn't the
+same as the first node in the second path.
+
+```sql
+-- This successfully produces the concatenated path called `full_path`.
+MATCH
+ p=(src:Account)-[t1:Transfers]->(mid:Account),
+ q=(mid)-[t2:Transfers]->(dst:Account)
+LET full_path = p || q
+```
+
+```sql
+-- This produces an error because the first node of the path to be concatenated
+-- (mid2) is not equal to the last node of the previous path (mid1).
+MATCH
+ p=(src:Account)-[t1:Transfers]->(mid1:Account),
+ q=(mid2:Account)-[t2:Transfers]->(dst:Account)
+LET full_path = p || q
+```
+
+The first node in each subsequent path is removed from the
+concatenated path.
+
+```sql
+-- The concatenated path called `full_path` contains these elements:
+-- src, t1, mid, t2, dst.
+MATCH
+ p=(src:Account)-[t1:Transfers]->(mid:Account),
+ q=(mid)-[t2:Transfers]->(dst:Account)
+LET full_path = p || q
+```
+
+If any `graph_path` is `NULL`, produces `NULL`.
+
+**Example**
+
+In the following query, a path called `p` and `q` are concatenated. Notice that
+`mid` is used at the end of the first path and at the beginning of the
+second path. Also notice that the duplicate `mid` is removed from the
+concatenated path called `full_path`:
+
+```sql
+GRAPH FinGraph
+MATCH
+ p=(src:Account)-[t1:Transfers]->(mid:Account),
+ q = (mid)-[t2:Transfers]->(dst:Account)
+LET full_path = p || q
+RETURN
+ JSON_QUERY(TO_JSON(full_path)[0], '$.labels') AS element_a,
+ JSON_QUERY(TO_JSON(full_path)[1], '$.labels') AS element_b,
+ JSON_QUERY(TO_JSON(full_path)[2], '$.labels') AS element_c,
+ JSON_QUERY(TO_JSON(full_path)[3], '$.labels') AS element_d,
+ JSON_QUERY(TO_JSON(full_path)[4], '$.labels') AS element_e,
+ JSON_QUERY(TO_JSON(full_path)[5], '$.labels') AS element_f
+
+/*-------------------------------------------------------------------------------------*
+ | element_a | element_b | element_c | element_d | element_e | element_f |
+ +-------------------------------------------------------------------------------------+
+ | ["Account"] | ["Transfers"] | ["Account"] | ["Transfers"] | ["Account"] | |
+ | ... | ... | ... | ... | ... | ... |
+ *-------------------------------------------------------------------------------------/*
+```
+
+The following query produces an error because the last node for `p` must
+be the first node for `q`:
+
+```sql
+-- Error: `mid1` and `mid2` are not equal.
+GRAPH FinGraph
+MATCH
+ p=(src:Account)-[t1:Transfers]->(mid1:Account),
+ q=(mid2:Account)-[t2:Transfers]->(dst:Account)
+LET full_path = p || q
+RETURN TO_JSON(full_path) AS results
+```
+
+The following query produces an error because the path called `p` is `NULL`:
+
+```sql
+-- Error: a graph path is NULL.
+GRAPH FinGraph
+MATCH
+ p=NULL,
+ q=(mid:Account)-[t2:Transfers]->(dst:Account)
+LET full_path = p || q
+RETURN TO_JSON(full_path) AS results
+```
+
+## Graph logical operators
+
+
+ZetaSQL supports the following logical operators in
+[element pattern label expressions][element-pattern-definition]:
+
+
+
+
+ Name |
+ Syntax |
+ Description |
+
+
+
+
+ NOT |
+ !X |
+
+ Returns TRUE if X is not included, otherwise,
+ returns FALSE .
+ |
+
+
+ OR |
+ X | Y |
+
+ Returns TRUE if either X or Y is
+ included, otherwise, returns FALSE .
+ |
+
+
+ AND |
+ X & Y |
+
+ Returns TRUE if both X and Y are
+ included, otherwise, returns FALSE .
+ |
+
+
+
+
+[element-pattern-definition]: https://github.com/google/zetasql/blob/master/docs/graph-patterns.md#element_pattern_definition
+
+## Graph predicates
+
+
+ZetaSQL supports the following graph-specific predicates in
+graph expressions. A predicate can produce `TRUE`, `FALSE`, or `NULL`.
+
++ [`PROPERTY_EXISTS` predicate][property-exists-predicate]
++ [`IS SOURCE` predicate][is-source-predicate]
++ [`IS DESTINATION` predicate][is-destination-predicate]
++ [`SAME` predicate][same-predicate]
+
+[property-exists-predicate]: #property_exists_predicate
+
+[is-source-predicate]: #is_source_predicate
+
+[is-destination-predicate]: #is_destination_predicate
+
+[same-predicate]: #same_predicate
+
+## `IS DESTINATION` predicate
+
+
+```sql
+node IS [ NOT ] DESTINATION [ OF ] edge
+```
+
+**Description**
+
+In a graph, checks to see if a node is or isn't the destination of an edge.
+Can produce `TRUE`, `FALSE`, or `NULL`.
+
+Arguments:
+
++ `node`: The graph pattern variable for the node element.
++ `edge`: The graph pattern variable for the edge element.
+
+**Examples**
+
+```sql
+GRAPH FinGraph
+MATCH (a:Account)-[transfer:Transfers]-(b:Account)
+WHERE a IS DESTINATION of transfer
+RETURN a.id AS a_id, b.id AS b_id
+
+/*-------------+
+ | a_id | b_id |
+ +-------------+
+ | 16 | 7 |
+ | 16 | 7 |
+ | 20 | 16 |
+ | 7 | 20 |
+ | 16 | 20 |
+ +-------------*/
+```
+
+```sql
+GRAPH FinGraph
+MATCH (a:Account)-[transfer:Transfers]-(b:Account)
+WHERE b IS DESTINATION of transfer
+RETURN a.id AS a_id, b.id AS b_id
+
+/*-------------+
+ | a_id | b_id |
+ +-------------+
+ | 7 | 16 |
+ | 7 | 16 |
+ | 16 | 20 |
+ | 20 | 7 |
+ | 20 | 16 |
+ +-------------*/
+```
+
+## `IS SOURCE` predicate
+
+
+```sql
+node IS [ NOT ] SOURCE [ OF ] edge
+```
+
+**Description**
+
+In a graph, checks to see if a node is or isn't the source of an edge.
+Can produce `TRUE`, `FALSE`, or `NULL`.
+
+Arguments:
+
++ `node`: The graph pattern variable for the node element.
++ `edge`: The graph pattern variable for the edge element.
+
+**Examples**
+
+```sql
+GRAPH FinGraph
+MATCH (a:Account)-[transfer:Transfers]-(b:Account)
+WHERE a IS SOURCE of transfer
+RETURN a.id AS a_id, b.id AS b_id
+
+/*-------------+
+ | a_id | b_id |
+ +-------------+
+ | 20 | 7 |
+ | 7 | 16 |
+ | 7 | 16 |
+ | 20 | 16 |
+ | 16 | 20 |
+ +-------------*/
+```
+
+```sql
+GRAPH FinGraph
+MATCH (a:Account)-[transfer:Transfers]-(b:Account)
+WHERE b IS SOURCE of transfer
+RETURN a.id AS a_id, b.id AS b_id
+
+/*-------------+
+ | a_id | b_id |
+ +-------------+
+ | 7 | 20 |
+ | 16 | 7 |
+ | 16 | 7 |
+ | 16 | 20 |
+ | 20 | 16 |
+ +-------------*/
+```
+
+## `PROPERTY_EXISTS` predicate
+
+
+```sql
+PROPERTY_EXISTS(element, element_property)
+```
+
+**Description**
+
+In a graph, checks to see if a property exists for an element.
+Can produce `TRUE`, `FALSE`, or `NULL`.
+
+Arguments:
+
++ `element`: The graph pattern variable for a node or edge element.
++ `element_property`: The name of the property to look for in `element`.
+ The property name must refer to a property in the graph. If the property
+ does not exist in the graph, an error is produced. The property name is
+ resolved in a case-insensitive manner.
+
+**Example**
+
+```sql
+GRAPH FinGraph
+MATCH (n:Person|Account WHERE PROPERTY_EXISTS(n, name))
+RETURN n.name
+
+/*------+
+ | name |
+ +------+
+ | Alex |
+ | Dana |
+ | Lee |
+ +------*/
+```
+
+## `SAME` predicate
+
+
+```sql
+SAME (element, element[, element])
+```
+
+**Description**
+
+In a graph, determines if all graph elements in a list bind to the same node or
+edge. Can produce `TRUE`, `FALSE`, or `NULL`.
+
+Arguments:
+
++ `element`: The graph pattern variable for a node or edge element.
+
+**Example**
+
+The following query checks to see if `a` and `b` are not the same person.
+
+```sql
+GRAPH FinGraph
+MATCH (src:Account)<-[transfer:Transfers]-(dest:Account)
+WHERE NOT SAME(src, dest)
+RETURN src.id AS source_id, dest.id AS destination_id
+
+/*----------------------------+
+ | source_id | destination_id |
+ +----------------------------+
+ | 7 | 20 |
+ | 16 | 7 |
+ | 16 | 7 |
+ | 16 | 20 |
+ | 20 | 16 |
+ +----------------------------*/
+```
+
+[operators]: https://github.com/google/zetasql/blob/master/docs/operators.md
+
diff --git a/docs/graph-patterns.md b/docs/graph-patterns.md
new file mode 100644
index 000000000..13fdc0da2
--- /dev/null
+++ b/docs/graph-patterns.md
@@ -0,0 +1,1838 @@
+
+
+
+
+# GQL patterns
+
+Graph Query Language (GQL) supports the following patterns. Patterns can
+be used in a `MATCH` statement.
+
+## Pattern list
+
+
+
+
+ Name |
+ Summary |
+
+
+
+
+
+ Graph pattern
+ |
+ A pattern to search for in a graph. |
+
+
+
+ Element pattern
+ |
+
+ Represents a node pattern or an edge pattern in a path pattern.
+ |
+
+
+
+ Subpath pattern
+ |
+ Matches a portion of a path. |
+
+
+
+ Quantified path pattern
+ |
+
+ A path pattern with a portion that can repeat within a specified range.
+ |
+
+
+
+ Label expression
+ |
+
+ An expression composed from one or more graph label names in an
+ element pattern.
+ |
+
+
+
+ Path search prefix
+ |
+
+ Restricts path pattern to return all paths, any path, or a shortest path
+ from each data partition.
+ |
+
+
+
+ Path mode
+ |
+
+ Includes or excludes paths that have repeating edges.
+ |
+
+
+
+
+
+## Graph pattern
+
+
+
+graph_pattern:
+ path_pattern_list [ where_clause ]
+
+path_pattern_list:
+ top_level_path_pattern[, ...]
+
+top_level_path_pattern:
+ [ { path_search_prefix | path_mode } ] path_pattern
+
+path_pattern:
+ path_term[ ...]
+
+subpath_pattern:
+ ( [ path_mode ] path_pattern [ where_clause ] )
+
+path_term:
+ {
+ element_pattern
+ | subpath_pattern
+ }
+
+where_clause:
+ WHERE bool_expression
+
+
+#### Description
+
+A graph pattern consists of a list path patterns. You can optionally
+include a `WHERE` clause. For example:
+
+ ```sql {.no-copy}
+ (a:Account)-[e:Transfers]->(b:Account) -- path pattern
+ WHERE a.nick_name = b.nick_name -- WHERE clause
+ ```
+
+#### Definitions
+
++ `path_pattern_list`: A list of path patterns. For example, the
+ following list contains two path patterns:
+
+ ```sql {.no-copy}
+ (a:Account)-[t:Transfers]->(b:Account), -- path pattern 1
+ (a)<-[o:Owns]-(p:Person) -- path pattern 2
+ ```
++ `path_search_prefix`: a qualifier for a path pattern to return all paths, any
+path, or any shortest path. For more information, see [Path search prefix][search-prefix].
+
++ `path_mode`: The [path mode][path-mode] for a path pattern. Used to filter out
+ paths that have repeating edges.
++ `path_pattern`: A path pattern that matches paths in a property graph.
+ For example:
+
+ ```sql {.no-copy}
+ (a:Account)-[e:Transfers]->(b:Account)
+ ```
++ `path_term`: An [element pattern][element-pattern-definition] or a
+ [subpath pattern][graph-subpaths] in a path pattern.
++ `element_pattern`: A node pattern or an edge pattern. To learn more, see
+ [Element pattern definition][element-pattern-definition].
++ `subpath_pattern`: A path pattern enclosed in parentheses. To learn
+ more, see [Graph subpath pattern][graph-subpaths].
++ `where_clause`: A `WHERE` clause, which filters the matched results. For
+ example:
+
+ ```sql {.no-copy}
+ MATCH (a:Account)->(b:Account)
+ WHERE a.nick_name = b.nick_name
+ ```
+
+ Boolean expressions can be used in a `WHERE` clause, including
+ graph-specific [predicates][graph-predicates] and
+ [logical operators][graph-operators]. Use the
+ [field access operator][field-access-operator] to access graph properties.
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+The following query matches all nodes:
+
+```sql
+GRAPH FinGraph
+MATCH (n)
+RETURN n.name, n.id
+
+/*-----------+
+ | name | id |
+ +-----------+
+ | NULL | 7 |
+ | NULL | 16 |
+ | NULL | 20 |
+ | Alex | 1 |
+ | Dana | 2 |
+ | Lee | 3 |
+ +-----------*/
+```
+
+The following query matches all directed edges:
+
+```sql
+GRAPH FinGraph
+MATCH ()-[e]->()
+RETURN COUNT(e.id) AS results
+
+/*---------+
+ | results |
+ +---------+
+ | 8 |
+ +---------*/
+```
+
+The following query matches all directed edges in either direction:
+
+```sql
+GRAPH FinGraph
+MATCH ()-[e]-()
+RETURN COUNT(e.id) AS results
+
+/*---------+
+ | results |
+ +---------+
+ | 16 |
+ +---------*/
+```
+
+The following query matches paths matching two path patterns:
+
+```sql
+GRAPH FinGraph
+MATCH
+ (src:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(dst:Account),
+ (mid)<-[:Owns]-(p:Person)
+RETURN
+ p.name, src.id AS src_account_id, mid.id AS mid_account_id,
+ dst.id AS dst_account_id
+
+/*---------------------------------------------------------+
+ | name | src_account_id | mid_account_id | dst_account_id |
+ +---------------------------------------------------------+
+ | Alex | 20 | 7 | 16 |
+ | Alex | 20 | 7 | 16 |
+ | Dana | 16 | 20 | 7 |
+ | Dana | 16 | 20 | 16 |
+ | Lee | 7 | 16 | 20 |
+ | Lee | 7 | 16 | 20 |
+ | Lee | 20 | 16 | 20 |
+ +---------------------------------------------------------*/
+```
+
+[graph-subpaths]: #graph_subpaths
+
+[element-pattern-definition]: #element_pattern_definition
+
+[graph-predicates]: https://github.com/google/zetasql/blob/master/docs/operators.md#graph_predicates
+
+[graph-operators]: https://github.com/google/zetasql/blob/master/docs/operators.md#graph_logical_operators
+
+[search-prefix]: #search_prefix
+
+[path-mode]: #path_mode
+
+[gql-match]: https://github.com/google/zetasql/blob/master/docs/graph-query-statements.md#gql_match
+
+[horizontal-aggregation]: https://github.com/google/zetasql/blob/master/docs/graph-gql-functions.md
+
+## Element pattern
+
+
+Note: Syntax wrapped in double quotes (`""`) is required.
+
+
+element_pattern:
+ {
+ node_pattern |
+ edge_pattern
+ }
+
+node_pattern:
+ (pattern_filler)
+
+edge_pattern:
+ {
+ full_edge_any |
+ full_edge_left |
+ full_edge_right |
+ abbreviated_edge_any |
+ abbreviated_edge_left |
+ abbreviated_edge_right
+ }
+
+full_edge_any:
+ "-[" pattern_filler "]-"
+
+full_edge_left:
+ "<-[" pattern_filler "]-"
+
+full_edge_right:
+ "-[" pattern_filler "]->"
+
+abbreviated_edge_any:
+ -
+
+abbreviated_edge_left:
+ <-
+
+abbreviated_edge_right:
+ ->
+
+pattern_filler:
+ [ graph_pattern_variable ]
+ [ is_label_condition ]
+ [ { where_clause | property_filters } ]
+
+is_label_condition:
+ { IS | : } label_expression
+
+where_clause:
+ WHERE bool_expression
+
+property_filters:
+ "{" element_property[, ...] "}"
+
+element_property:
+ element_property_name : element_property_value
+
+
+#### Description
+
+An element pattern is either a node pattern or an edge pattern.
+
+#### Definitions
+
++ `node_pattern`: a pattern to match nodes in a property graph. For example:
+
+ ```sql {.no-copy}
+ (n:Person) -- Matches all Person nodes in a property graph.
+ ```
+
+ ```sql {.no-copy}
+ (c:City) -- Matches all City nodes in a property graph.
+ ```
+
+ ```sql {.no-copy}
+ () -- Matches all nodes in a property graph.
+ ```
++ `edge_pattern`: a pattern to match edges in a property graph. For example:
+
+ ```sql {.no-copy}
+ -[LivesIn]-> -- Matches all LivesIn edges in a property graph.
+ ```
+
+ ```sql {.no-copy}
+ -[]-> -- Matches all right directed edges in a property graph.
+ ```
+
+ ```sql {.no-copy}
+ (n:Person)-(c:City) -- Matches edges between Person and City nodes in any direction.
+ ```
+
+ There are several types of edge patterns:
+
+ + `full_edge_any`: Any-direction edge with an optional pattern filler.
+ + `abbreviated_edge_any`: Any-direction edge, no pattern filler.
+
+ ```sql {.no-copy}
+-[e:Located_In]- -- Any-direction full edge with filler.
+-[]- -- Any-direction full edge, no filler.
+- -- Any-direction abbreviated edge.
+ ```
+
+ + `full_edge_left`: Left-direction edge with an optional pattern filler.
+ + `abbreviated_edge_left`: Left-direction edge, no pattern filler.
+
+ ```sql {.no-copy}
+ <-[e:Located_In]- -- Left full edge with filler.
+ <-[]- -- Left full edge, no filler.
+ <- -- Left abbreviated edge.
+ ```
+
+ + `full_edge_right`: Right-direction edge with an optional pattern filler.
+ + `abbreviated_edge_right`: Right-direction edge, no pattern filler.
+
+ ```sql {.no-copy}
+ -[e:Located_In]-> -- Right full edge with filler.
+ -[]-> -- Right full edge, no filler.
+ -> -- Right abbreviated edge.
+ ```
++ `pattern_filler`: A pattern filler represents specifications on the node or
+ edge pattern that you want to match. A pattern filler can optionally contain:
+ `graph_pattern_variable`, `is_label_condition`, and
+ `where_clause`. For example:
+
+ ```sql {.no-copy}
+ (p:Person WHERE p.name = 'Kai')
+ ```
+
+
+
++ `graph_pattern_variable`: A variable for the pattern filler.
+ You can use a graph pattern variable to reference the element
+ it's bound to in a linear graph query.
+
+ `p` is the variable for the graph pattern element `p:Person` in the
+ following example:
+
+ ```sql {.no-copy}
+ (p:Person)-[:Located_In]->(c:City),
+ (p)-[:Knows]->(p:Person WHERE p.name = 'Kai')
+ ```
++ `is_label_condition`: A `label expression` that the matched nodes and edges
+ must satisfy. This condition includes `label expression`. You can use
+ either `IS` or `:` to begin a condition. For example, these are the same:
+
+ ```sql {.no-copy}
+ (p IS Person)
+ ```
+
+ ```sql {.no-copy}
+ (p:Person)
+ ```
+
+ ```sql {.no-copy}
+ -[IS Knows]->
+ ```
+
+ ```sql {.no-copy}
+ -[:Knows]->
+ ```
++ `label_expression`: The expression for the label. For more information,
+ see [Label expression definition][label-expression-definition].
++ `where_clause`: A `WHERE` clause, which filters the nodes or edges that were
+ matched.
+
+ Boolean expressions are supported, including graph-specific [predicates][graph-predicates]
+ and [logical operators][graph-operators].
+
+ The `WHERE` clause can't reference properties when the graph pattern variable
+ is absent.
+
+ Use the [field access operator][field-access-operator] to access
+ graph properties.
+
+ Examples:
+
+ ```sql {.no-copy}
+ (m:MusicCreator WHERE m.name = 'Cruz Richards')
+ ```
+
+ ```sql {.no-copy}
+ (s:Singer)->(album:Album)<-(s2)
+ WHERE s.name != s2.name
+ ```
+
+ ```sql {.no-copy}
+ (s:Singer)-[has_friend:Knows]->
+ (s2:Singer WHERE s2.singer_name = 'Mahan Lomond')
+ ```
++ `property_filters`: Filters the nodes or edges that were matched. It contains
+ a key value map of element properties and their values. Property filters can
+ appear in both node and edge patterns.
+
+ Examples:
+
+ ```sql {.no-copy}
+ {name: 'Cruz Richards'}
+ ```
+
+ ```sql {.no-copy}
+ {last_name: 'Richards', albums: 2}
+ ```
++ `element_property`: An element property in `property_filters`. The same
+ element property can be included more than once in the same
+ property filter list. Element properties can be included in any order in a
+ property filter list.
+
+ + `element_property_name`: An identifier that represents the name of the
+ element property.
+
+ + `element_property_value`: A scalar expression that represents the value for
+ the element property. This value can be a `NULL` literal, but the `NULL`
+ literal is interpreted as `= NULL`, not `IS NULL` when the
+ element property filter is applied.
+
+ Examples:
+
+ ```sql {.no-copy}
+ (n:Person {age: 20})
+ ```
+
+ ```sql {.no-copy}
+ (n:Person {id: n.age})
+ ```
+
+ ```sql {.no-copy}
+ (n1:Person)-[e: Owns {since: 2023}]->(n2:Account)
+ ```
+
+ ```sql {.no-copy}
+ (:Person {id: 100, age: 20})-[e:Knows]->(n2:Person)
+ ```
+
+ ```sql {.no-copy}
+ (n:Person|Student {id: n.age + n.student_id})
+ ```
+
+ ```sql {.no-copy}
+ (n:Person {age: 20, id: 30})
+ ```
+
+ ```sql {.no-copy}
+ (n {id: 100, age: 20})
+ ```
+
+ ```sql {.no-copy}
+ (n:Person {id: 10 + n.age})-[e:Knows {since: 2023 + e.id}]
+ ```
+
+ The following are equivalent:
+
+ ```sql {.no-copy}
+ (n:Person WHERE n.id = 100 AND n.age = 20)
+ ```
+
+ ```sql {.no-copy}
+ (n:Person {id: 100, age: 20})
+ ```
+
+ The following are equivalent:
+
+ ```sql {.no-copy}
+ (a:Employee {employee_id: 10})->(:University)<-(a:Alumni {alumni_id: 20})
+ ```
+
+ ```sql {.no-copy}
+ (a:Employee&Alumni {employee_id: 10, alumni_id: 20})->
+ (:University)<-(a:Employee&Alumni {employee_id: 10, alumni_id: 20})
+ ```
+
+ Although a `NULL` literal can be used as property value in the
+ property filter, the semantics is `= NULL`, not `IS NULL`.
+ This distinction is important when you create an element pattern:
+
+ ```sql {.no-copy}
+ (n:Person {age: NULL}) -- '= NULL'
+ (n:Person WHERE n.age = NULL) -- '= NULL'
+ (n:Person WHERE n.age IS NULL) -- 'IS NULL'
+ ```
+
+ The following produce errors:
+
+ ```sql {.bad .no-copy}
+ -- Error: The property specification for n2 can't reference properties in
+ -- e and n1.
+ (n1:Person)-[e:Knows]->(n2:Person {id: e.since+n1.age})
+ ```
+
+ ```sql {.bad .no-copy}
+ -- Error: Aggregate expressions are not allowed.
+ (n:Person {id: SUM(n.age)})
+ ```
+
+ ```sql {.bad .no-copy}
+ -- Error: A property called unknown_property does not exist for Person.
+ (n:Person {unknown_property: 100})
+ ```
+
+ ```sql {.bad .no-copy}
+ -- Error: An element property filter list can't be empty
+ (n:Person {})
+ ```
+
+#### Details
+
+Nodes and edges matched by `element pattern` are referred to as graph elements.
+Graph elements can be used in GQL [predicates][graph-predicates], [functions][graph-functions]
+and subqueries within GQL.
+
+Set operations support graph elements that have a common [supertype][supertypes].
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+The following query matches all nodes in the graph. `n` is a graph pattern
+variable that is bound to the matching nodes:
+
+```sql
+GRAPH FinGraph
+MATCH (n)
+RETURN LABELS(n) AS label
+
+/*-----------+
+ | label |
+ +-----------+
+ | [Account] |
+ | [Account] |
+ | [Account] |
+ | [Person] |
+ | [Person] |
+ | [Person] |
+ +-----------*/
+```
+
+The following query matches all edges in the graph.
+`e` is a graph pattern variable that is bound to the matching edges:
+
+```sql
+GRAPH FinGraph
+MATCH -[e]->
+RETURN e.id
+
+/*----+
+ | id |
+ +----+
+ | 20 |
+ | 7 |
+ | 7 |
+ | 20 |
+ | 16 |
+ | 1 |
+ | 3 |
+ | 2 |
+ +----*/
+```
+
+The following queries matches all nodes with a given label in the graph. `n` is
+a graph pattern variable that is bound to the matching nodes:
+
+```sql
+GRAPH FinGraph
+MATCH (n:Person)
+RETURN n.name, n.id
+
+/*-----------+
+ | name | id |
+ +-----------+
+ | Alex | 1 |
+ | Dana | 2 |
+ | Lee | 3 |
+ +-----------*/
+```
+
+```sql
+GRAPH FinGraph
+MATCH (n:Person|Account)
+RETURN n.id, n.name, n.nick_name
+
+/*----------------------------+
+ | id | name | nick_name |
+ +----------------------------+
+ | 7 | NULL | Vacation Fund |
+ | 16 | NULL | Vacation Fund |
+ | 20 | NULL | Rainy Day Fund |
+ | 1 | Alex | NULL |
+ | 2 | Dana | NULL |
+ | 3 | Lee | NULL |
+ +----------------------------*/
+```
+
+The following query matches all edges in the graph that have the `Owns` label.
+`e` is a graph pattern variable that is bound to the matching edges:
+
+```sql
+GRAPH FinGraph
+MATCH -[e:Owns]->
+RETURN e.id
+
+/*----+
+ | id |
+ +----+
+ | 1 |
+ | 3 |
+ | 2 |
+ +----*/
+```
+
+In the following query, the `WHERE` clause is used to filter out nodes whose
+`birthday` property is no greater than `1990-01-10`:
+
+```sql
+GRAPH FinGraph
+MATCH (n:Person WHERE n.birthday > '1990-01-10')
+RETURN n.name
+
+/*------+
+ | name |
+ +------+
+ | Alex |
+ +------*/
+```
+
+In the following query, the `WHERE` clause is used to only include edges whose
+ `create_time` property is greater than `2020-01-14` and less than `2020-05-14`:
+
+```sql
+GRAPH FinGraph
+MATCH -[e:Owns WHERE e.create_time > '2020-01-14'
+ AND e.create_time < '2020-05-14']->
+RETURN e.id
+
+/*----+
+ | id |
+ +----+
+ | 2 |
+ | 3 |
+ +----*/
+```
+
+In the following query, the
+[`PROPERTY_EXISTS` predicate][graph-predicates] is used to only include nodes
+that have a `name` property:
+
+```sql
+GRAPH FinGraph
+MATCH (n:Person|Account WHERE PROPERTY_EXISTS(n, name))
+RETURN n.id, n.name
+
+/*-----------+
+ | id | name |
+ +-----------+
+ | 1 | Alex |
+ | 2 | Dana |
+ | 3 | Lee |
+ +-----------*/
+```
+
+You can filter graph elements with property filters. The following query
+uses a property filter, `{is_blocked: false}`, to only include elements
+that have the `is_blocked` property set as `false`:
+
+```sql
+GRAPH FinGraph
+MATCH (a:Account {is_blocked: false})
+RETURN a.id
+
+/*----+
+ | id |
+ +----+
+ | 7 |
+ | 20 |
+ +----*/
+```
+
+You can use multiple property element filters to filter results. The following
+query uses the property element filter list,
+`{is_blocked: false, nick_name: 'Vacation Fund'}`
+to only include elements that have the `is_blocked` property set as `false`
+and the `nick_name` property set as `Vacation Fund`:
+
+```sql
+GRAPH FinGraph
+MATCH (a:Account {is_blocked: false, nick_name: 'Vacation Fund'})
+RETURN a.id
+
+/*----+
+ | id |
+ +----+
+ | 7 |
+ +----*/
+```
+
+The following query matches right directed `Transfers` edges connecting two
+`Account` nodes.
+
+```sql
+GRAPH FinGraph
+MATCH (src:Account)-[transfer:Transfers]->(dst:Account)
+RETURN src.id AS src_id, transfer.amount, dst.id AS dst_id
+
+/*--------------------------+
+ | src_id | amount | dst_id |
+ +--------------------------+
+ | 7 | 300 | 16 |
+ | 7 | 100 | 16 |
+ | 16 | 300 | 20 |
+ | 20 | 500 | 7 |
+ | 20 | 200 | 16 |
+ +--------------------------*/
+```
+
+The following query matches any direction `Transfers` edges connecting two
+`Account` nodes.
+
+```sql
+GRAPH FinGraph
+MATCH (src:Account)-[transfer:Transfers]-(dst:Account)
+RETURN src.id AS src_id, transfer.amount, dst.id AS dst_id
+
+/*--------------------------+
+ | src_id | amount | dst_id |
+ +--------------------------+
+ | 16 | 300 | 7 |
+ | 16 | 100 | 7 |
+ | 20 | 300 | 7 |
+ | 7 | 500 | 16 |
+ | 7 | 200 | 16 |
+ | 20 | 300 | 16 |
+ | 20 | 100 | 16 |
+ | 16 | 300 | 20 |
+ | 7 | 500 | 20 |
+ | 16 | 200 | 20 |
+ +--------------------------*/
+```
+
+The following query matches left directed edges connecting `Person` nodes to
+`Account` nodes, using the left directed abbreviated edge pattern.
+
+```sql
+GRAPH FinGraph
+MATCH (account:Account)<-(person:Person)
+RETURN account.id, person.name
+
+/*------------+
+ | id | name |
+ +------------+
+ | 7 | Alex |
+ | 20 | Dana |
+ | 16 | Lee |
+ +------------*/
+```
+
+You can reuse variable names in patterns. The same variable name binds to the
+same node or edge. The following query reuses a variable called `a`:
+
+```sql
+GRAPH FinGraph
+MATCH (a:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(a:Account)
+RETURN a.id AS a_id
+
+/*------+
+ | a_id |
+ +------+
+ | 16 |
+ | 20 |
+ +------*/
+```
+
+In the following query, `a` and `a2` are different variable names but can match
+the same node:
+
+```sql
+GRAPH FinGraph
+MATCH (a:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(a2)
+RETURN a.id AS a_id, a2.id AS a2_id
+
+/*--------------+
+ | a_id | a2_id |
+ +--------------+
+ | 20 | 16 |
+ | 20 | 16 |
+ | 7 | 20 |
+ | 7 | 20 |
+ | 20 | 20 |
+ | 16 | 7 |
+ | 16 | 16 |
+ +--------------*/
+```
+
+You need to explicitly apply the `WHERE` filter if you only want to match a path
+if `a` and `a2` are different. For example:
+
+```sql
+GRAPH FinGraph
+MATCH (a:Account)-[t1:Transfers]->(mid:Account)-[t2:Transfers]->(a2)
+WHERE a.id != a2.id
+RETURN a.id AS a_id, a2.id AS a2_id
+
+/*--------------+
+ | a_id | a2_id |
+ +--------------+
+ | 20 | 16 |
+ | 20 | 16 |
+ | 7 | 20 |
+ | 7 | 20 |
+ | 16 | 7 |
+ +--------------*/
+```
+
+[graph-pattern-variables]: #graph_pattern_variables
+
+[label-expression-definition]: #label_expression_definition
+
+[supertypes]: https://github.com/google/zetasql/blob/master/docs/conversion_rules.md#supertypes
+
+[graph-operators]: https://github.com/google/zetasql/blob/master/docs/operators.md#graph_logical_operators
+
+[graph-predicates]: https://github.com/google/zetasql/blob/master/docs/operators.md#graph_predicates
+
+[graph-functions]: https://github.com/google/zetasql/blob/master/docs/graph-gql-functions.md
+
+[field-access-operator]: https://github.com/google/zetasql/blob/master/docs/operators.md#field_access_operator
+
+[to-json-func]: https://github.com/google/zetasql/blob/master/docs/json_functions.md#to_json
+
+## Subpath pattern
+
+
+A subpath pattern matches a portion of a path. You can create a subpath pattern
+by enclosing a portion of a path pattern within parentheses. A subpath pattern
+can contain inner subpath patterns.
+
+#### Rules
+
++ A subpath pattern can be combined with node patterns, edge patterns, or
+ other subpaths on either end.
++ The portion of a path pattern enclosed within a subpath pattern must adhere
+ to the same rules as a standard path pattern
++ A subpath pattern can contain subpath patterns. This results in
+ outer subpath patterns and inner subpath patterns.
++ Inner subpath patterns are resolved first, followed by
+ outer subpath patterns, and then the rest of the path pattern.
++ If a variable is declared outside of a subpath pattern, it
+ can't be referenced inside the subpath pattern.
++ If a variable is declared inside of a subpath pattern, it can
+ be referenced outside of the subpath pattern.
+
+#### Details
+
+When you execute a query, an empty node pattern is added to the beginning
+and ending inside a subpath if the beginning and ending don't already have
+node patterns. For example:
+
+
+
+
+ Before |
+ After |
+
+
+
+
+ (node edge node) |
+ (node edge node) |
+
+
+ (edge node) |
+ (empty_node edge node) |
+
+
+ (node edge) |
+ (node edge empty_node) |
+
+
+ (edge) |
+ (empty_node edge empty_node) |
+
+
+
+
+If this results in two node patterns that are
+next to each other or a node pattern is next to a subpath, a `SAME` operation
+is performed on to the consecutive node patterns.
+
+The following are examples of subpath patterns:
+
+ ```sql {.no-copy}
+ -- Success: e and p are both declared within the same subpath pattern and
+ -- can be referenced in that subpath pattern.
+ (-[e:LocatedIn]->(p:Person)->(c:City) WHERE p.id = e.id)
+ ```
+
+ ```sql {.no-copy}
+ -- Success: e and p are both declared within the same subpath pattern
+ -- hierarchy and can be referenced inside of that subpath pattern hierarchy.
+ (-[e:LocatedIn]->((p:Person)->(c:City)) WHERE p.id = e.id)
+ ```
+
+ ```sql {.no-copy}
+ -- Error: e is declared outside of the inner subpath pattern and therefore
+ -- can't be referenced inside of the inner subpath pattern.
+ (-[e:LocatedIn]->((p:Person)->(c:City) WHERE p.id = e.id))
+ ```
+
+ ```sql {.no-copy}
+ -- Success: e and p are declared in a subpath pattern and can be used outside
+ -- of the subpath pattern.
+ (-[e:LocatedIn]->(p:Person))->(c:City) WHERE p.id = e.id
+ ```
+
+ ```sql {.no-copy}
+ -- No subpath patterns:
+ (p:Person)-[e:LocatedIn]->(c:City)-[s:StudyAt]->(u:School)
+ ```
+
+ ```sql {.no-copy}
+ -- One subpath pattern on the left:
+ ((p:Person)-[e:LocatedIn]->(c:City))-[s:StudyAt]->(u:School)
+ ```
+
+ ```sql {.no-copy}
+ -- One subpath pattern on the right:
+ (p:Person)-[e:LocatedIn]->((c:City)-[s:StudyAt]->(u:School))
+ ```
+
+ ```sql {.no-copy}
+ -- One subpath pattern around the entire path pattern:
+ ((p:Person)-[e:LocatedIn]->(c:City)-[s:StudyAt]->(u:School))
+ ```
+
+ ```sql {.no-copy}
+ -- One subpath pattern that contains only a node pattern:
+ ((p:Person))-[e:LocatedIn]->(c:City)-[s:StudyAt]->(u:School)
+ ```
+
+ ```sql {.no-copy}
+ -- One subpath pattern that contains only an edge pattern:
+ (p:Person)(-[e:LocatedIn]->)(c:City)-[s:StudyAt]->(u:School)
+ ```
+
+ ```sql {.no-copy}
+ -- Two subpath patterns, one inside the other:
+ ((p:Person)(-[e:LocatedIn]->(c:City)))-[s:StudyAt]->(u:School)
+ ```
+
+ ```sql {.no-copy}
+ -- Three consecutive subpath patterns:
+ ((p:Person))(-[e:LocatedIn]->(c:City))(-[s:StudyAt]->(u:School))
+ ```
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+In the following query, the subpath
+`(src:Account)-[t1:Transfers]->(mid:Account)` is evaluated first, then the rest
+of the path pattern:
+
+```sql
+GRAPH FinGraph
+MATCH
+ ((src:Account)-[t1:Transfers]->(mid:Account))-[t2:Transfers]->(dst:Account)
+RETURN
+ src.id AS src_account_id, mid.id AS mid_account_id, dst.id AS dst_account_id
+
+/*--------------------------------------------------+
+ | src_account_id | mid_account_id | dst_account_id |
+ +--------------------------------------------------+
+ | 20 | 7 | 16 |
+ | 20 | 7 | 16 |
+ | 7 | 16 | 20 |
+ | 7 | 16 | 20 |
+ | 20 | 16 | 20 |
+ | 16 | 20 | 7 |
+ | 16 | 20 | 16 |
+ +--------------------------------------------------*/
+```
+
+## Quantified path pattern
+
+
+Note: Syntax wrapped in double quotes (`""`) is required.
+
+
+quantified_path_primary:
+ path_primary
+ { fixed_quantifier | bounded_quantifier }
+
+fixed_quantifier:
+ "{" bound "}"
+
+bounded_quantifier:
+ "{" [ lower_bound ], upper_bound "}"
+
+
+#### Description
+
+A quantified path pattern is a path pattern with a portion that can repeat
+within a specified range. You can specify the range, using a quantifier. A
+quantified path pattern is commonly used to match variable-length paths.
+
+#### Definitions
+
++ `quantified_path_primary`: The quantified path pattern to add to the graph
+query.
+
++ `path_primary`: The portion of a path pattern to be quantified.
++ `fixed_quantifier`: The exact number of times the path pattern portion must
+ repeat.
+
+ + `bound`: A positive integer that represents the exact number of repetitions.
++ `bounded_quantifier`: The minimum and maximum number of times the path pattern
+ portion can repeat.
+
+ + `lower_bound`: A non-negative integer that represents the minimum number of
+ times that the path pattern portion must repeat. If a lower bound is not
+ provided, 0 is used by default.
+
+ + `upper_bound`: A positive integer that represents the maximum number of
+ times that the path pattern portion can repeat. This number must be
+ specified and be equal to or greater than `lower_bound`.
+
+#### Details
+
++ A path must have a _minimum node count_ greater than 0. The minimum node
+ count of a quantified portion within the path is calculated as:
+
+ ```none
+ min_node_count = lower_quantifier * node_count_of_quantified_path
+ ```
+
+ When `bound` or `lower_bound` of the quantified path pattern portion is 0,
+ the path must contain other parts with _minimum node count_ greater than 0.
++ A quantified path must have a _minimum path length_ greater than 0. The
+ minimum path length of a quantified path is calculated as:
+
+ ```none
+ min_path_length = lower_quantifier * length_of_quantified_path
+ ```
+
+ The path length of a node is 0. The path length of an edge is 1.
++ A quantified path pattern with `bounded_quantifier` matches paths of any
+ length between the lower and the upper bound. This is equivalent to unioning
+ match results from multiple quantified path patterns with `fixed_quantifier`,
+ one for each number between the lower bound and upper bound.
++ Quantification is allowed on an edge or subpath. When quantifying an edge,
+ the edge pattern is canonicalized into a subpath.
+
+ ```none
+ -[]->{1, 5}
+ ```
+
+ is canonicalized into
+
+ ```none
+ (()-[]->()){1, 5}
+ ```
++ Multiple quantifications are allowed in the same graph pattern, however,
+ quantifications may not be nested.
++ Only singleton variables can be multiply-declared. A singleton variable is a
+ variable that binds exactly to one node or edge.
+
+ In the following `MATCH` statement, the variables `p`, `knows`, and `f` are
+ singleton variables, which bind exactly to one element each.
+
+ ```sql {.no-copy}
+ MATCH (p)-[knows]->(f)
+ ```
++ Variables defined within a quantified path pattern bind to an array of
+ elements outside of the quantified path pattern and are called group
+ variables.
+
+ In the following `MATCH` statement, the path pattern has the quantifier
+ `{1, 3}`. The variables `p`, `knows`, and `f` are each bind to an array of
+ elements in the `MATCH` statement result and are considered group variables:
+
+ ```sql {.no-copy}
+ MATCH ((p)-[knows]->(f)){1, 3}
+ ```
+
+ Within the quantified pattern, before the quantifier is applied, `p`, `knows`,
+ and `f` each bind to exactly one element and are considered singleton
+ variables.
+
+Examples:
+
+```sql {.no-copy}
+-- Quantified path pattern with a fixed quantifier:
+MATCH ((p:Person)-[k:Knows]->(f:Person)){2}
+
+-- Equivalent:
+MATCH ((p0:Person)-[k0:Knows]->(f0:Person)(p1:Person)-[k1:Knows]->(f1:Person))
+```
+
+```sql {.no-copy}
+-- Quantified path pattern with a bounded quantifier:
+MATCH ((p:Person)-[k:Knows]->(f:Person)){1,3}
+
+-- Equivalent:
+MATCH ((p:Person)-[k:Knows]->(f:Person)){1}
+UNION ALL
+MATCH ((p:Person)-[k:Knows]->(f:Person)){2}
+UNION ALL
+MATCH ((p:Person)-[k:Knows]->(f:Person)){3}
+```
+
+```sql {.no-copy}
+-- Quantified subpath with default lower bound (0) and an upper bound.
+-- When subpath is repeated 0 times, the path pattern is semantically equivalent
+-- to (source_person:Person)(dest_person:Person).
+MATCH (source_person:Person)((p:Person)-[k:Knows]->(f:Person)){, 4}(dest_person:Person)
+```
+
+```sql {.no-copy}
+-- Edge quantification is canonicalized into subpath quantification:
+MATCH (p:Person)-[k:Knows]->{1,2}(f:Person)
+
+-- Equivalent:
+MATCH (p:Person)(()-[k:Knows]->()){1,2}(f:Person)
+```
+
+```sql {.no-copy}
+-- ERROR: Minimum path length for the quantified path is 0.
+MATCH (p:Person){1, 3}
+```
+
+```sql {.no-copy}
+-- ERROR: Minimum node count and minimum path length for the entire path is 0.
+MATCH ((p:Person)-[k:Knows]->(f:Person)){0}
+```
+
+```sql {.no-copy}
+-- ERROR: Minimum path length for the entire path is 0 when quantified portion
+-- is repeated 0 times.
+MATCH (:Person)((p:Person)-[k:Knows]->(f:Person)){0, 3}
+```
+
+```sql {.no-copy}
+-- ERROR: `p` is declared once as a group variable and once as a singleton
+-- variable.
+MATCH (s:Person) ((p:Person)-[k:Knows]->(f:Person)){1, 3}->(p:Person)
+```
+
+```sql {.no-copy}
+-- ERROR: `p` is declared twice as a group variable.
+MATCH ((p:Person)-[k:Knows]->(f:Person)){1, 3}->[x.Knows]->((p:Person)-[z:Knows]-(d:Person)){2}
+```
+
+```sql {.no-copy}
+-- Since both declarations of `p` are within the quantifier’s pattern,
+-- they are treated as singleton variables and can be multiply-declared.
+MATCH (s:person)((p:Person)-[k:Knows]->(p:Person)){1, 3}
+```
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+The following query uses a quantified path pattern to match all of the
+destination accounts that are one to three transfers away from a source account
+with `id` equal to `7`:
+
+```sql
+GRAPH FinGraph
+MATCH (src:Account {id: 7})-[e:Transfers]->{1, 3}(dst:Account)
+WHERE src != dst
+RETURN ARRAY_LENGTH(e) AS hops, dst.id AS dst_account_id
+
+/*-----------------------+
+ | hops | dst_account_id |
+ +-----------------------+
+ | 1 | 16 |
+ | 3 | 16 |
+ | 3 | 16 |
+ | 1 | 16 |
+ | 2 | 20 |
+ | 2 | 20 |
+ +-----------------------*/
+```
+
+The following query uses a quantified path pattern to match paths between
+accounts with one to two transfers through intermediate accounts that are
+blocked:
+
+```sql
+GRAPH FinGraph
+MATCH
+ (src:Account)
+ ((:Account)-[:Transfers]->(mid:Account) WHERE mid.is_blocked){1,2}
+ -[:Transfers]->(dst:Account)
+RETURN src.id AS src_account_id, dst.id AS dst_account_id
+
+/*---------------------------------+
+ | src_account_id | dst_account_id |
+ +---------------------------------+
+ | 7 | 20 |
+ | 7 | 20 |
+ | 20 | 20 |
+ +---------------------------------*/
+```
+
+In the following query, `e` is declared in a quantified path pattern. When
+referenced outside of that pattern, `e` is a group variable bound to an array
+of `Transfers`. You can use the group variable in aggregate functions such
+as `SUM` and `ARRAY_LENGTH`:
+
+```sql
+GRAPH FinGraph
+MATCH
+ (src:Account {id: 7})-[e:Transfers WHERE e.amount > 100]->{0,2}
+ (dst:Account)
+WHERE src.id != dst.id
+LET total_amount = SUM(e.amount)
+RETURN
+ src.id AS src_account_id, dst.id AS dst_account_id,
+ ARRAY_LENGTH(e) AS number_of_hops, total_amount
+
+/*-----------------------------------------------------------------+
+ | src_account_id | dst_account_id | number_of_hops | total_amount |
+ +-----------------------------------------------------------------+
+ | 7 | 16 | 1 | 300 |
+ | 7 | 20 | 2 | 600 |
+ +-----------------------------------------------------------------*/
+```
+
+[graph-pattern-definition]: #graph_pattern_definition
+
+## Label expression
+
+
+
+label_expression:
+ {
+ label_name
+ | or_expression
+ | and_expression
+ | not_expression
+ }
+
+
+
+#### Description
+
+A label expression is formed by combining one or more labels with logical
+operators (AND, OR, NOT) and parentheses for grouping.
+
+#### Definitions
+
++ `label_name`: The label to match. Use `%` to match any label in the
+ graph. For example:
+
+ ```sql {.no-copy}
+ (p:Person)
+ ```
+
+ ```sql {.no-copy}
+ (p:%)
+ ```
++ `or_expression`: [GQL logical `OR` operation][graph-operators] for
+ label expressions. For example:
+
+ ```sql {.no-copy}
+ (p:(Singer|Writer))
+ ```
+
+ ```sql {.no-copy}
+ (p:(Singer|(Producer|Writer)))
+ ```
+
+ ```sql {.no-copy}
+ (p:(Singer|(Producer&Writer)))
+ ```
++ `and_expression`: [GQL logical `AND` operation][graph-operators] for
+ label expressions. For example:
+
+ ```sql {.no-copy}
+ (p:(Singer&Producer))
+ ```
+
+ ```sql {.no-copy}
+ (p:(Singer&(Writer|Producer)))
+ ```
+
+ ```sql {.no-copy}
+ (p:(Singer&(Writer&Producer)))
+ ```
++ `not_expression`: [GQL logical `NOT` operation][graph-operators] for
+ label expressions. For example:
+
+ ```sql {.no-copy}
+ (p:!Singer)
+ ```
+
+ ```sql {.no-copy}
+ (p:(!Singer&!Writer))
+ ```
+
+ ```sql {.no-copy}
+ (p:(Singer|(!Writer&!Producer)))
+ ```
+
+#### Details
+
+A label exposes a set of properties. When a node or edge carries a certain label,
+the properties exposed by that label are accessible through the node or edge.
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+The following query matches all nodes with the label `Person`
+in the [`FinGraph`][fin-graph] property graph.
+
+```sql
+GRAPH FinGraph
+MATCH (n:Person)
+RETURN n.name
+
+/*------+
+ | name |
+ +------+
+ | Alex |
+ | Dana |
+ | Lee |
+ +------*/
+```
+
+The following query matches all nodes that have either a `Person`
+or an `Account` label in the [`FinGraph`][fin-graph] property graph.
+
+```sql
+GRAPH FinGraph
+MATCH (n:Person|Account)
+RETURN n.id
+
+/*----+
+ | id |
+ +----+
+ | 7 |
+ | 16 |
+ | 20 |
+ | 1 |
+ | 2 |
+ | 3 |
+ +----*/
+```
+
+[graph-operators]: https://github.com/google/zetasql/blob/master/docs/operators.md#graph_logical_operators
+
+## Path search prefix
+
+
+
+path_search_prefix:
+ {
+ ALL
+ | ANY
+ | ANY SHORTEST
+ }
+
+
+#### Description
+
+Restricts path pattern to return all paths, any path, or a shortest path from
+each data partition.
+
+#### Definitions
+
++ `ALL` (default) : Returns all paths matching the path pattern. This is the
+ default value when no search prefix is specified.
++ `ANY`: Returns any path matching the path pattern from each data partition.
++ `ANY SHORTEST`: Returns a shortest path (the path with the least number of
+ edges) matching the path pattern from each data partition. If there are more
+ than one shortest paths per partition, returns any one of them.
+
+#### Details
+
+The path search prefix first partitions the match results by their endpoints
+(the first and last nodes) then selects paths from each group.
+
+The `ANY` and `ANY SHORTEST` prefixes can return multiple paths, one
+for each distinct pair of endpoints.
+
+When using `ANY` or `ANY SHORTEST` prefixes in a path pattern, do not reuse
+variables defined within that pattern elsewhere in the same `MATCH` statement,
+unless the variable represents an endpoint. Each prefix needs to operate
+independently on its associated path pattern.
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+The following query matches a shortest path between each pair of `[a, b]`.
+
+```sql {.no-copy}
+GRAPH FinGraph
+MATCH ANY SHORTEST (a:Account)-[t:Transfers]->{1, 4} (b:Account)
+WHERE a.is_blocked
+LET total = SUM(t.amount)
+RETURN a.id AS a_id, total, b.id AS b_id
+
+/*------+-------+------+
+ | a_id | total | b_id |
+ +------+-------+------+
+ | 16 | 500 | 16 |
+ | 16 | 800 | 7 |
+ | 16 | 300 | 20 |
+ +------+-------+------*/
+```
+
+The following query matches any path between each pair of `[a, b]`.
+
+```sql {.no-copy}
+GRAPH FinGraph
+MATCH ANY (a:Account)->(mid:Account)->(b:Account)
+WHERE a.is_blocked
+RETURN a.id AS a_id, mid.id AS mid_id, b.id AS b_id
+
+/*------+--------+------+
+ | a_id | mid_id | b_id |
+ +------+--------+------+
+ | 16 | 20 | 16 |
+ | 16 | 20 | 7 |
+ +------+--------+------*/
+```
+
+The following query matches all paths between each pair of `[a, b]`. The `ALL`
+prefix does not filter out any result.
+
+```sql {.no-copy}
+GRAPH FinGraph
+MATCH ALL (a:Account {id: 20})-[t:Transfers]->(b:Account)
+RETURN a.id AS a_id, t.amount, b.id AS b_id
+
+-- Equivalent:
+GRAPH FinGraph
+MATCH (a:Account {id: 20})-[t:Transfers]->(b:Account)
+RETURN a.id AS a_id, t.amount, b.id AS b_id
+
+/*------+--------+------+
+ | a_id | amount | b_id |
+ +------+--------+------+
+ | 20 | 500 | 7 |
+ | 20 | 200 | 16 |
+ +------+--------+------*/
+```
+
+The following query finds the middle account of any two-hop loops that starts and
+ends with the same account with `id = 20`, and gets the middle account's owner.
+
+```sql {.no-copy}
+GRAPH FinGraph
+MATCH ANY (a:Account {id: 20})->(mid:Account)->(a:Account)
+MATCH ALL (p:Person)->(mid)
+RETURN p.name, mid.id
+
+/*------+----+
+ | name | id |
+ +------+----+
+ | Lee | 16 |
+ +------+----*/
+```
+
+The following query produces an error because `mid`, defined within a path
+pattern with the `ANY` prefix, can't be reused outside that pattern in the
+same `MATCH` statement. This is not permitted because `mid` is not an endpoint.
+
+```sql {.bad .no-copy}
+-- Error
+GRAPH FinGraph
+MATCH
+ ANY (a:Account {id: 20})->(mid:Account)->(a:Account)->(mid:Account)->(a:Account),
+ ALL (p:Person)->(mid)
+RETURN p.name
+```
+
+The following query succeeds because `a`, even though defined in a path pattern
+with the `ANY` path search prefix, can be reused outside of the path pattern
+within the same `MATCH` statement, since `a` is an endpoint.
+
+```sql
+-- Succeeds
+GRAPH FinGraph
+MATCH
+ ANY (a:Account {id: 20})->(mid:Account)->(a:Account)->(mid:Account)->(a:Account),
+ ALL (p:Person)->(a)
+RETURN p.name
+
+/*------+
+ | name |
+ +------+
+ | Dana |
+ +------*/
+```
+
+The following query succeeds because `mid` is not reused outside of the path
+pattern with the `ANY` prefix in the same `MATCH` statement.
+
+```sql
+-- Succeeds
+GRAPH FinGraph
+MATCH ANY (a:Account {id: 20})->(mid:Account)->(a:Account)->(mid:Account)->(a:Account)
+MATCH ALL (p:Person)->(mid)
+RETURN p.name
+
+/*------+
+ | name |
+ +------+
+ | Lee |
+ +------*/
+```
+
+All rules for [quantified path patterns][quantified-path-pattern] apply. In the
+following examples, although `p` is on the boundary of the first path, it is a
+group variable and still not allowed be declared again outside its parent
+quantified path:
+
+```sql {.bad .no-copy}
+-- Error
+GRAPH FinGraph
+MATCH ANY ((p:Person)->(f:Person)){1, 3},
+ ALL ->(p)->
+RETURN p.name
+```
+
+```sql {.bad .no-copy}
+-- Error
+GRAPH FinGraph
+MATCH ANY ((p:Person)->(f:Person)){1, 3},
+ ALL ((p)->){1, 3}
+RETURN p.name
+```
+
+[quantified-path-pattern]: #quantified_paths
+
+## Path mode
+
+
+
+path_mode:
+ {
+ WALK [ PATH | PATHS ]
+ | TRAIL [ PATH | PATHS ]
+ }
+
+
+#### Description
+
+Includes or excludes paths that have repeating edges based on the specified
+mode.
+
+#### Definitions
+
++ `WALK` (default) : Keeps all paths. If the path mode is not present, `WALK`
+ is used by default.
++ `TRAIL`: Filters out paths that have repeating edges.
+
+#### Details
+
+A path mode is typically added in order to filter out paths with duplicate
+edges. It can be applied to any path or subpath pattern.
+
+A path mode is applied to the whole path or subpath pattern that it restricts,
+regardless of whether other modes are used on subpath patterns.
+
+A path mode is applied to path patterns only, not graph patterns.
+
+A path can have either a path mode or a [path search prefix][search-prefix], but
+not both.
+
+Keywords `PATH` and `PATHS` are syntactic sugar and have no effect on execution.
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+The following query demonstrates the use of the `WALK` path mode on a
+non-quantified path pattern. The first path on the results uses the same edge
+for `t1` and `t3`. Notice that results use property `id` as a proxy for the
+edge's id for illustration purposes.
+
+```sql {.no-copy}
+GRAPH FinGraph
+MATCH
+ WALK (a1:Account)-[t1:Transfers]->(a2:Account)-[t2:Transfers]->
+ (a3:Account)-[t3:Transfers]->(a4:Account)
+WHERE a1.id < a4.id
+RETURN
+ t1.id as t1_id, t2.id as t2_id, t3.id as t3_id
+
+/*-------+-------+-------+
+ | t1_id | t2_id | t3_id |
+ +-------+-------+-------+
+ | 16 | 20 | 16 |
+ | 7 | 16 | 20 |
+ | 7 | 16 | 20 |
+ +-------+-------+-------*/
+```
+
+The following query demonstrates the use of the `TRAIL` path mode on a
+non-quantified path pattern. Notice that the path whose `t1` and `t3` edges are
+equal has been filtered out.
+
+```sql {.no-copy}
+GRAPH FinGraph
+MATCH
+ TRAIL (a1:Account)-[t1:Transfers]->(a2:Account)-[t2:Transfers]->
+ (a3:Account)-[t3:Transfers]->(a4:Account)
+WHERE a1.id < a4.id
+RETURN
+ t1.id as t1_id, t2.id as t2_id, t3.id as t3_id
+
+/*-------+-------+-------+
+ | t1_id | t2_id | t3_id |
+ +-------+-------+-------+
+ | 7 | 16 | 20 |
+ | 7 | 16 | 20 |
+ +-------+-------+-------*/
+```
+
+The following query demonstrates that path modes are applied on path patterns
+and not on graph patterns. Notice that, if `TRAIL` was applied on the graph
+pattern then there would be zero results returned since edge `t1` is explicitly
+duplicated. Instead, it is only applied on path pattern `(a1)-[t1]-(a2)`.
+
+```sql {.no-copy}
+GRAPH FinGraph
+MATCH TRAIL (a1)-[t1]-(a2), (a2)-[t1]-(a3)
+RETURN COUNT(1) as num_paths
+
+/*-----------+
+ | num_paths |
+ +-----------+
+ | 16 |
+ +-----------*/
+
+GRAPH FinGraph
+MATCH TRAIL (a1)-[t1]-(a2)-[t1]-(a3)
+RETURN COUNT(1) as num_paths
+
+/*-----------+
+ | num_paths |
+ +-----------+
+ | 0 |
+ +-----------*/
+```
+
+The following query demonstrates the use of the `TRAIL` path mode on a
+quantified path pattern. Notice that `TRAIL` is applied on a path pattern that
+is the concatenation of four subpath patterns of the form `()-[:Transfers]->()`.
+
+```sql {.no-copy}
+GRAPH FinGraph
+MATCH TRAIL (a1:Account)-[t1:Transfers]->{4}(a5:Account)
+RETURN COUNT(1) as num_paths
+
+/*-----------+
+ | num_paths |
+ +-----------+
+ | 6 |
+ +-----------*/
+```
+
+The following query demonstrates that path modes are applied individually on the
+path or subpath pattern in which they are defined. In this example, the
+existence of `WALK` does not negate the semantics of the outer `TRAIL`. Notice
+that the result is the same with the previous example where `WALK` is not
+present.
+
+```sql {.no-copy}
+GRAPH FinGraph
+MATCH TRAIL (WALK (a1:Account)-[t1:Transfers]->{4}(a5:Account))
+RETURN COUNT(1) as num_paths
+
+/*-----------+
+ | num_paths |
+ +-----------+
+ | 6 |
+ +-----------*/
+```
+
+The following query demonstrates the use of the `TRAIL` path mode on a subpath
+pattern. Notice that `TRAIL` is applied on a path pattern that is the
+concatenation of three subpath patterns of the form `()-[:Transfers]->()`. Since
+edge `t4` is outside this path pattern, it can be equal to any of the edges on
+it. Compare this result with the result of the previous query.
+
+```sql {.no-copy}
+GRAPH FinGraph
+MATCH
+ (TRAIL (a1:Account)-[t1:Transfers]->{3}(a4:Account))
+ -[t4:Transfers]->(a5:Account)
+RETURN COUNT(1) as num_paths
+
+/*-----------+
+ | num_paths |
+ +-----------+
+ | 14 |
+ +-----------*/
+```
+
+The following query demonstrates the use of the `TRAIL` path mode within a
+quantified path pattern. Notice that the resulting path is the concatenation of
+two subpaths of the form
+`()-[:Transfers]->()-[:Transfers]->()-[:Transfers]->()`. Therefore each path
+includes six edges in total. `TRAIL` is applied separately on the edges of each
+of the two subpaths. Specifically, the three edges on the first supath must be
+distinct from each other. Similarly, the three edges on the second subpath must
+also be distinct from each other. However, there may be edges that are equal
+between the two subpaths.
+
+```sql {.no-copy}
+GRAPH FinGraph
+MATCH (TRAIL -[t1:Transfers]->()-[t2:Transfers]->()-[t3:Transfers]->){2}
+RETURN COUNT(1) as num_paths
+
+/*-----------+
+ | num_paths |
+ +-----------+
+ | 26 |
+ +-----------*/
+```
+
+The following query demonstrates that there are no paths of length six with
+non-repeating edges.
+
+```sql {.no-copy}
+GRAPH FinGraph
+MATCH TRAIL -[:Transfers]->{6}
+RETURN COUNT(1) as num_paths
+
+/*-----------+
+ | num_paths |
+ +-----------+
+ | 0 |
+ +-----------*/
+```
+
+The following query demonstrates that a path can't have both a path mode and a
+[path search prefix][search-prefix]:
+
+```sql {.bad .no-copy}
+-- Error
+GRAPH FinGraph
+MATCH ANY SHORTEST TRAIL ->{1,4}
+RETURN COUNT(1) as num_paths
+```
+
+The following query demonstrates that path modes can coexist with
+[path search prefixes][search-prefix] when the path mode is placed on a subpath.
+
+```sql {.no-copy}
+GRAPH FinGraph
+MATCH ANY SHORTEST (TRAIL ->{1,4})
+RETURN COUNT(1) as num_paths
+
+/*-----------+
+ | num_paths |
+ +-----------+
+ | 18 |
+ +-----------*/
+```
+
+[quantified-path-pattern]: #quantified_paths
+
+[search-prefix]: #search_prefix
+
diff --git a/docs/graph-query-statements.md b/docs/graph-query-statements.md
new file mode 100644
index 000000000..05e6d9a59
--- /dev/null
+++ b/docs/graph-query-statements.md
@@ -0,0 +1,1857 @@
+
+
+
+
+# GQL query statements
+
+Graph Query Language (GQL) lets you execute multiple linear
+graph queries in one query. Each linear graph query generates results
+(the working table) and then passes those results to the next.
+
+GQL supports the following building blocks, which can be composited into a
+GQL query based on the [syntax rules][gql_syntax].
+
+## Language list
+
+
+
+
+ Name |
+ Summary |
+
+
+
+
+
+ GQL syntax
+ |
+ Creates a graph query with the GQL syntax. |
+
+
+
+ GRAPH clause
+ |
+
+ Specifies a property graph to query.
+ |
+
+
+
+ FILTER statement
+ |
+
+ Filters out rows in the query results that do not satisfy a specified
+ condition.
+ |
+
+
+
+ FOR statement
+ |
+
+ Unnests an ARRAY -typed expression.
+ |
+
+
+
+ LET statement
+ |
+
+ Defines variables and assigns values for later use in the current linear
+ query statement.
+ |
+
+
+
+ LIMIT statement
+ |
+ Limits the number of query results. |
+
+
+
+ MATCH statement
+ |
+ Matches data described by a graph pattern. |
+
+
+
+ NEXT statement
+ |
+ Chains multiple linear query statements together. |
+
+
+
+ OFFSET statement
+ |
+ Skips a specified number of rows in the query results. |
+
+
+
+ ORDER BY statement
+ |
+ Orders the query results. |
+
+
+
+ RETURN statement
+ |
+ Marks the end of a linear query statement and returns the results. |
+
+
+
+ SKIP statement
+ |
+ Synonym for the OFFSET statement. |
+
+
+
+ WITH statement
+ |
+
+ Passes on the specified columns, optionally filtering, renaming, and
+ transforming those results.
+ |
+
+
+
+ Set operation
+ |
+ Combines a sequence of linear query statements with a set operation. |
+
+
+
+
+
+## GQL syntax
+
+
+
+graph_query:
+ GRAPH clause
+ multi_linear_query_statement
+
+multi_linear_query_statement:
+ linear_query_statement
+ [
+ NEXT
+ linear_query_statement
+ ]
+ [...]
+
+linear_query_statement:
+ {
+ simple_linear_query_statement
+ | composite_linear_query_statement
+ }
+
+composite_linear_query_statement:
+ simple_linear_query_statement
+ set_operator simple_linear_query_statement
+ [...]
+
+simple_linear_query_statement:
+ primitive_query_statement
+ [...]
+
+
+#### Description
+
+Creates a graph query with the GQL syntax. The syntax rules define how
+to composite the building blocks of GQL into a query.
+
+#### Definitions
+
++ `primitive_query_statement`: A statement in [Query statements][language-list]
+ except for the `NEXT` statement.
++ `simple_linear_query_statement`: A list of `primitive_query_statement`s that
+ ends with a [`RETURN` statement][return].
++ `composite_linear_query_statement`: A list of
+ `simple_linear_query_statement`s composited with the [set operators][set-op].
++ `linear_query_statement`: A statement that is either a
+ `simple_linear_query_statement` or a `composite_linear_query_statement`.
++ `multi_linear_query_statement`: A list of `linear_query_statement`s chained
+ together with the [`NEXT` statement][next].
++ `graph_query`: A GQL query that starts with a [`GRAPH` clause][graph-clause],
+ then follows with a `multi_linear_query_statement`.
+
+[language-list]: https://github.com/google/zetasql/blob/master/docs/graph-query-statements.md#language-list
+
+[graph-clause]: https://github.com/google/zetasql/blob/master/docs/graph-query-statements.md#graph_query
+
+[set-op]: https://github.com/google/zetasql/blob/master/docs/graph-query-statements.md#gql_set
+
+[return]: https://github.com/google/zetasql/blob/master/docs/graph-query-statements.md#gql_return
+
+[next]: https://github.com/google/zetasql/blob/master/docs/graph-query-statements.md#gql_next
+
+[graph-clause]: https://github.com/google/zetasql/blob/master/docs/graph-query-statements.md#graph_query
+
+## `GRAPH` clause
+
+
+
+GRAPH property_graph_name
+multi_linear_query_statement
+
+
+#### Description
+
+Specifies a property graph to query. This clause must be added before the first
+linear query statement in a graph query.
+
+#### Definitions
+
++ `property_graph_name`: The name of the property graph to query.
++ `multi_linear_query_statement`: A multi linear query statement. For more
+ information, see `multi_linear_query_statement` in [GQL syntax][gql_syntax].
+
+#### Examples
+
+The following example queries the [`FinGraph`][fin-graph] property graph to find
+accounts with incoming transfers and looks up their owners:
+
+```sql
+GRAPH FinGraph
+MATCH (:Account)-[:Transfers]->(account:Account)
+RETURN account, COUNT(*) AS num_incoming_transfers
+GROUP BY account
+
+NEXT
+
+MATCH (account:Account)<-[:Owns]-(owner:Person)
+RETURN
+ account.id AS account_id, owner.name AS owner_name,
+ num_incoming_transfers
+
+/*--------------------------------------------------+
+ | account_id | owner_name | num_incoming_transfers |
+ +--------------------------------------------------+
+ | 7 | Alex | 1 |
+ | 20 | Dana | 1 |
+ | 6 | Lee | 3 |
+ +--------------------------------------------------*/
+```
+
+[graph-query-statements]: https://github.com/google/zetasql/blob/master/docs/graph-query-statements.md
+
+[gql_syntax]: https://github.com/google/zetasql/blob/master/docs/graph-query-statements.md#gql_syntax
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+[next]: https://github.com/google/zetasql/blob/master/docs/graph-query-statements.md#gql_next
+
+## `FILTER` statement
+
+
+
+FILTER [ WHERE ] bool_expression
+
+
+#### Description
+
+Filters out rows in the query results that do not satisfy a specified condition.
+
+#### Definitions
+
++ `bool_expression`: A boolean expression. Only rows whose `bool_expression`
+ evaluates to `TRUE` are included. Rows whose `bool_expression` evaluates to
+ `NULL` or `FALSE` are discarded.
+
+#### Details
+
+The `FILTER` statement can reference columns in the working table.
+
+The syntax for the `FILTER` statement is similar to the syntax for the
+[graph pattern `WHERE` clause][graph-pattern-definition], but they are evaluated
+differently. The `FILTER` statement is evaluated after the previous statement.
+The `WHERE` clause is evaluated as part of the containing statement.
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+In the following query, only people who were born before `1990-01-10`
+are included in the results table:
+
+```sql
+GRAPH FinGraph
+MATCH (p:Person)-[o:Owns]->(a:Account)
+FILTER p.birthday < '1990-01-10'
+RETURN p.name
+
+/*------+
+ | name |
+ +------+
+ | Dana |
+ | Lee |
+ +------*/
+```
+
+`WHERE` is an optional keyword that you can include in a `FILTER` statement.
+The following query is semantically identical to the previous query:
+
+```sql
+GRAPH FinGraph
+MATCH (p:Person)-[o:Owns]->(a:Account)
+FILTER WHERE p.birthday < '1990-01-10'
+RETURN p.name
+
+/*------+
+ | name |
+ +------+
+ | Dana |
+ | Lee |
+ +------*/
+```
+
+In the following example, `FILTER` follows an aggregation step with
+grouping. Semantically, it's similar to the `HAVING` clause in SQL:
+
+```sql
+GRAPH FinGraph
+MATCH (source:Account)-[e:Transfers]->(dest:Account)
+RETURN source, dest, SUM(e.amount) AS total_amount
+GROUP BY source, dest
+
+NEXT
+
+FILTER WHERE total_amount < 400
+RETURN source.id AS source_id, dest.id AS destination_id, total_amount
+
+/*-------------------------------------------+
+ | source_id | destination_id | total_amount |
+ +-------------------------------------------+
+ | 16 | 20 | 300 |
+ | 20 | 16 | 200 |
+ +-------------------------------------------*/
+```
+
+In the following example, an error is produced because `FILTER` references
+`m`, which is not in the working table:
+
+```sql {.bad}
+-- Error: m does not exist
+GRAPH FinGraph
+MATCH (p:Person)-[o:Owns]->(a:Account)
+FILTER WHERE m.birthday < '1990-01-10'
+RETURN p.name
+```
+
+In the following example, an error is produced because even though `p` is in the
+working table, `p` does not have a property called `date_of_birth`:
+
+```sql {.bad}
+-- ERROR: date_of_birth is not a property of p
+GRAPH FinGraph
+MATCH (p:Person)-[o:Owns]->(a:Account)
+FILTER WHERE p.date_of_birth < '1990-01-10'
+RETURN p.name
+```
+
+[where-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#where_clause
+
+[having-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#having_clause
+
+[graph-pattern-definition]: https://github.com/google/zetasql/blob/master/docs/graph-patterns.md#graph_pattern_definition
+
+[horizontal-aggregation]: https://github.com/google/zetasql/blob/master/docs/graph-gql-functions.md
+
+## `FOR` statement
+
+
+
+FOR element_name IN array_expression
+ [ with_offset_clause ]
+
+with_offset_clause:
+ WITH OFFSET [ AS offset_name ]
+
+
+#### Description
+
+Unnests an `ARRAY`-typed expression and joins the result with the current working table.
+
+#### Definitions
+
++ `array_expression`: An `ARRAY`-typed expression.
++ `element_name`: The name of the element column. The name can't be the name
+ of a column that already exists in the current linear query statement.
++ `offset_name`: The name of the offset column. The name can't be the name of
+ a column that already exists in the current linear query statement. If not
+ specified, the default is `offset`.
+
+#### Details
+
+The `FOR` statement expands the working table by defining a new column for the
+elements of `array_expression`, with an optional offset column. The cardinality
+of the working table might change as a result.
+
+The `FOR` statement can reference columns in the working table.
+
+The `FOR` statement evaluation is similar to the [`UNNEST`][unnest-operator] operator.
+
+The `FOR` statement does not preserve order.
+
+And empty or `NULL` `array_expression` produces zero rows.
+
+The keyword `WITH` following the `FOR` statement is always interpreted as the
+beginning of `with_offset_clause`. If you want to use the `WITH` statement
+following the `FOR` statement, you should fully qualify the `FOR` statement with
+`with_offset_clause`, or use the `RETURN` statement instead of the `WITH`
+statement.
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+In the following query, there are three rows in the working table prior to the
+`FOR` statement. After the `FOR` statement, each row is expanded into two rows,
+one per `element` value from the array.
+
+```sql
+GRAPH FinGraph
+MATCH (p:Person)-[o:Owns]->(a:Account)
+FOR element in ["all","some"] WITH OFFSET
+RETURN p.name, element as alert_type, offset
+ORDER BY p.name, element, offset
+
+/*----------------------------+
+ | name | alert_type | offset |
+ +----------------------------+
+ | Alex | all | 0 |
+ | Alex | some | 1 |
+ | Dana | all | 0 |
+ | Dana | some | 1 |
+ | Lee | all | 0 |
+ | Lee | some | 1 |
+ +----------------------------*/
+```
+
+In the following query, there are two rows in the working table prior to the
+`FOR` statement. After the `FOR` statement, each row is expanded into a
+different number of rows, based on the value of `array_expression` for that row.
+
+```sql
+GRAPH FinGraph
+MATCH (p:Person)-[o:Owns]->(a:Account)
+FILTER p.name != "Alex"
+FOR element in GENERATE_ARRAY(1,LENGTH(p.name))
+RETURN p.name, element
+ORDER BY p.name, element
+
+/*----------------+
+ | name | element |
+ +----------------+
+ | Dana | 1 |
+ | Dana | 2 |
+ | Dana | 3 |
+ | Dana | 4 |
+ | Lee | 1 |
+ | Lee | 2 |
+ | Lee | 3 |
+ +----------------*/
+```
+
+In the following query, there are three rows in the working table prior to the
+`FOR` statement. After the `FOR` statement, no row is produced because
+`array_expression` is an empty array.
+
+```sql
+-- No rows produced
+GRAPH FinGraph
+MATCH (p:Person)
+FOR element in [] WITH OFFSET AS off
+RETURN p.name, element, off
+```
+
+In the following query, there are three rows in the working table prior to the
+`FOR` statement. After the `FOR` statement, no row is produced because
+`array_expression` is a `NULL` array.
+
+```sql
+-- No rows produced
+GRAPH FinGraph
+MATCH (p:Person)
+FOR element in CAST(NULL AS ARRAY) WITH OFFSET
+RETURN p.name, element, offset
+```
+
+In the following example, an error is produced because `WITH` is used directly
+After the `FOR` statement. The query can be fixed by adding `WITH OFFSET` after
+the `FOR` statement, or by using `RETURN` directly instead of `WITH`.
+
+```sql {.bad}
+-- Error: Expected keyword OFFSET but got identifier "element"
+GRAPH FinGraph
+FOR element in [1,2,3]
+WITH element as col
+RETURN col
+ORDER BY col
+```
+
+```sql
+GRAPH FinGraph
+FOR element in [1,2,3] WITH OFFSET
+WITH element as col
+RETURN col
+ORDER BY col
+
+/*-----+
+ | col |
+ +-----+
+ | 1 |
+ | 2 |
+ | 3 |
+ +-----*/
+```
+
+[unnest-operator]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#unnest_operator
+
+## `LET` statement
+
+
+
+LET linear_graph_variable[, ...]
+
+linear_graph_variable:
+ variable_name = value
+
+
+#### Description
+
+Defines variables and assigns values to them for later use in the current
+linear query statement.
+
+#### Definitions
+
++ `linear_graph_variable`: The variable to define.
++ `variable_name`: The name of the variable.
++ `value`: A scalar expression that represents the value of the variable.
+ The names referenced by this expression must be in the incoming working
+ table.
+
+#### Details
+
+`LET` does not change the cardinality of the working table nor modify its
+existing columns.
+
+The variable can only be used in the current linear query statement. To use it
+in a following linear query statement, you must include it in the `RETURN`
+statement as a column.
+
+You can't define and reference a variable within the same `LET` statement.
+
+You can't redefine a variable with the same name.
+
+You can use horizontal aggregate functions in this statement. To learn more, see
+[Horizontal aggregate function calls in GQL][horizontal-aggregation].
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+In the following graph query, the variable `a` is defined and then referenced
+later:
+
+```sql
+GRAPH FinGraph
+MATCH (source:Account)-[e:Transfers]->(destination:Account)
+LET a = source
+RETURN a.id AS a_id
+
+/*------+
+ | a_id |
+ +------+
+ | 20 |
+ | 7 |
+ | 7 |
+ | 20 |
+ | 16 |
+ +------*/
+```
+
+The following `LET` statement in the second linear query statement is valid
+because `a` is defined and returned from the first linear query statement:
+
+```sql
+GRAPH FinGraph
+MATCH (source:Account)-[e:Transfers]->(destination:Account)
+LET a = source
+RETURN a
+
+NEXT
+
+LET b = a -- Valid: 'a' is defined and returned from the linear query statement above.
+RETURN b.id AS b_id
+
+/*------+
+ | b_id |
+ +------+
+ | 20 |
+ | 7 |
+ | 7 |
+ | 20 |
+ | 16 |
+ +------*/
+```
+
+The following `LET` statement in the second linear query statement is invalid
+because `a` is not returned from the first linear query statement.
+
+```sql {.bad}
+GRAPH FinGraph
+MATCH (source:Account)-[e:Transfers]->(destination:Account)
+LET a = source
+RETURN source.id
+
+NEXT
+
+LET b = a -- ERROR: 'a' does not exist.
+RETURN b.id AS b_id
+```
+
+The following `LET` statement is invalid because `a` is defined and then
+referenced in the same `LET` statement:
+
+```sql {.bad}
+GRAPH FinGraph
+MATCH (source:Account)-[e:Transfers]->(destination:Account)
+LET a = source, b = a -- ERROR: Can't define and reference 'a' in the same operation.
+RETURN a
+```
+
+The following `LET` statement is valid because `a` is defined first and then
+referenced afterwards:
+
+```sql
+GRAPH FinGraph
+MATCH (source:Account)-[e:Transfers]->(destination:Account)
+LET a = source
+LET b = a
+RETURN b.id AS b_id
+
+/*------+
+ | b_id |
+ +------+
+ | 20 |
+ | 7 |
+ | 7 |
+ | 20 |
+ | 16 |
+ +------*/
+```
+
+In the following examples, the `LET` statements are invalid because `a` is
+redefined:
+
+```sql {.bad}
+GRAPH FinGraph
+MATCH (source:Account)-[e:Transfers]->(destination:Account)
+LET a = source, a = destination -- ERROR: 'a' has already been defined.
+RETURN a.id AS a_id
+```
+
+```sql {.bad}
+GRAPH FinGraph
+MATCH (source:Account)-[e:Transfers]->(destination:Account)
+LET a = source
+LET a = destination -- ERROR: 'a' has already been defined.
+RETURN a.id AS a_id
+```
+
+In the following examples, the `LET` statements are invalid because `b` is
+redefined:
+
+```sql {.bad}
+GRAPH FinGraph
+MATCH (source:Account)-[e:Transfers]->(destination:Account)
+LET a = source
+LET b = destination
+RETURN a, b
+
+NEXT
+
+MATCH (a)
+LET b = a -- ERROR: 'b' has already been defined.
+RETURN b.id
+```
+
+The following `LET` statement is valid because although `b` is defined in the
+first linear query statement, it's not passed to the second linear query
+statement:
+
+```sql
+GRAPH FinGraph
+MATCH (source:Account)-[e:Transfers]->(destination:Account)
+LET a = source
+LET b = destination
+RETURN a
+
+NEXT
+
+MATCH (a)
+LET b = a
+RETURN b.id
+
+/*------+
+ | b_id |
+ +------+
+ | 20 |
+ | 7 |
+ | 7 |
+ | 20 |
+ | 16 |
+ +------*/
+```
+
+[horizontal-aggregation]: https://github.com/google/zetasql/blob/master/docs/graph-gql-functions.md
+
+## `LIMIT` statement
+
+
+
+LIMIT count
+
+
+#### Description
+
+Limits the number of query results.
+
+#### Definitions
+
++ `count`: A non-negative `INT64` value that represents the number of
+ results to produce. For more information,
+ see the [`LIMIT` and `OFFSET` clauses][limit-and-offset-clause].
+
+#### Details
+
+The `LIMIT` statement can appear before the `RETURN` statement. You can also use
+it as a qualifying clause in the [`RETURN` statement][gql-return].
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+The following example uses the `LIMIT` statement to limit the query results to
+three rows:
+
+```sql
+GRAPH FinGraph
+MATCH (source:Account)-[e:Transfers]->(destination:Account)
+ORDER BY source.nick_name
+LIMIT 3
+RETURN source.nick_name
+
+/*----------------+
+ | nick_name |
+ +----------------+
+ | Rainy day fund |
+ | Rainy day fund |
+ | Vacation fund |
+ +----------------*/
+```
+
+The following query finds the account and its owner with the largest outgoing
+transfer to a blocked account:
+
+```sql
+GRAPH FinGraph
+MATCH (src_account:Account)-[transfer:Transfers]->(dst_account:Account)
+WHERE dst_account.is_blocked
+ORDER BY transfer.amount DESC
+LIMIT 1
+MATCH (src_account:Account)<-[owns:Owns]-(owner:Person)
+RETURN src_account.id AS account_id, owner.name AS owner_name
+
+/*-------------------------+
+ | account_id | owner_name |
+ +-------------------------+
+ | 7 | Alex |
+ +-------------------------*/
+```
+
+[gql-return]: #gql_return
+
+[limit-and-offset-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#limit_and_offset_clause
+
+## `MATCH` statement
+
+
+
+[ OPTIONAL ] MATCH graph_pattern
+
+
+#### Description
+
+Matches data described by a graph pattern. You can have zero or more `MATCH`
+statements in a linear query statement.
+
+#### Definitions
+
++ `MATCH graph_pattern`: The graph pattern to match. For more information,
+ see [`MATCH` graph pattern definition][graph-pattern-definition].
++ `OPTIONAL MATCH graph_pattern`: The graph pattern to optionally match. If there
+ are missing parts in the pattern, the missing parts are represented by `NULL`
+ values. For more information, see
+ [`OPTIONAL MATCH` graph pattern definition][graph-pattern-definition].
+
+#### Details
+
+The `MATCH` statement joins the incoming working table with the matched
+result with either `INNER JOIN` or `CROSS JOIN` semantics.
+
+The `INNER JOIN` semantics is used when the working table and matched result
+have variables in common. In the following example, the `INNER JOIN`
+semantics is used because `friend` is produced by both `MATCH` statements:
+
+```sql
+MATCH (person:Person)-[:knows]->(friend:Person)
+MATCH (friend)-[:knows]->(otherFriend:Person)
+```
+
+The `CROSS JOIN` semantics is used when the incoming working table and matched
+result have no variables in common. In the following example, the `CROSS JOIN`
+semantics is used because `person1` and `friend` exist in the result of the
+first `MATCH` statement, but not the second one:
+
+```sql
+MATCH (person1:Person)-[:knows]->(friend:Person)
+MATCH (person2:Person)-[:knows]->(otherFriend:Person)
+```
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+The following query matches all `Person` nodes and returns the name and ID of
+each person:
+
+```sql
+GRAPH FinGraph
+MATCH (p:Person)
+RETURN p.name, p.id
+
+/*-----------+
+ | name | id |
+ +-----------+
+ | Alex | 1 |
+ | Dana | 2 |
+ | Lee | 3 |
+ +-----------*/
+```
+
+The following query matches all `Person` and `Account` nodes and returns their
+labels and ID:
+
+```sql
+GRAPH FinGraph
+MATCH (n:Person|Account)
+RETURN LABELS(n) AS label, n.id
+
+/*----------------+
+ | label | id |
+ +----------------+
+ | [Account] | 7 |
+ | [Account] | 16 |
+ | [Account] | 20 |
+ | [Person] | 1 |
+ | [Person] | 2 |
+ | [Person] | 3 |
+ +----------------*/
+```
+
+The following query matches all `Account` nodes that are not blocked:
+
+```sql
+GRAPH FinGraph
+MATCH (a:Account {is_blocked: false})
+RETURN a.id
+
+/*----+
+ | id |
+ +----+
+ | 7 |
+ | 20 |
+ +----*/
+```
+
+The following query matches all `Person` nodes that have a `birthday` less than
+`1990-01-10`:
+
+```sql
+GRAPH FinGraph
+MATCH (p:Person WHERE p.birthday < '1990-01-10')
+RETURN p.name
+
+/*------+
+ | name |
+ +------+
+ | Dana |
+ | Lee |
+ +------*/
+```
+
+The following query matches all `Owns` edges:
+
+```sql
+GRAPH FinGraph
+MATCH -[e:Owns]->
+RETURN e.id
+
+/*----+
+ | id |
+ +----+
+ | 1 |
+ | 3 |
+ | 2 |
+ +----*/
+```
+
+The following query matches all `Owns` edges created within a specific period of
+time:
+
+```sql
+GRAPH FinGraph
+MATCH -[e:Owns WHERE e.create_time > '2020-01-14' AND e.create_time < '2020-05-14']->
+RETURN e.id
+
+/*----+
+ | id |
+ +----+
+ | 2 |
+ | 3 |
+ +----*/
+```
+
+The following query matches all `Transfers` edges where a blocked account is
+involved in any direction:
+
+```sql
+GRAPH FinGraph
+MATCH (account:Account)-[transfer:Transfers]-(:Account)
+WHERE account.is_blocked
+RETURN transfer.order_number, transfer.amount
+
+/*--------------------------+
+ | order_number | amount |
+ +--------------------------+
+ | 304330008004315 | 300 |
+ | 304120005529714 | 100 |
+ | 103650009791820 | 300 |
+ | 302290001255747 | 200 |
+ +--------------------------*/
+```
+
+The following query matches all `Transfers` initiated from an `Account` owned by
+`Person` with `id` equal to `2`:
+
+```sql
+GRAPH FinGraph
+MATCH
+ (p:Person {id: 2})-[:Owns]->(account:Account)-[t:Transfers]->
+ (to_account:Account)
+RETURN p.id AS sender_id, to_account.id AS to_id
+
+/*-------------------+
+ | sender_id | to_id |
+ +-------------------+
+ | 2 | 7 |
+ | 2 | 16 |
+ +-------------------*/
+```
+
+The following query matches all the destination `Accounts` one to three
+transfers away from a source `Account` with `id` equal to `7`, other than the
+source itself:
+
+```sql
+GRAPH FinGraph
+MATCH (src:Account {id: 7})-[e:Transfers]->{1, 3}(dst:Account)
+WHERE src != dst
+RETURN ARRAY_LENGTH(e) AS hops, dst.id AS destination_account_id
+
+/*-------------------------------+
+ | hops | destination_account_id |
+ +-------------------------------+
+ | 1 | 16 |
+ | 3 | 16 |
+ | 3 | 16 |
+ | 1 | 16 |
+ | 2 | 20 |
+ | 2 | 20 |
+ +-------------------------------*/
+```
+
+The following query matches paths between `Account` nodes with one to two
+`Transfers` edges through intermediate accounts that are blocked:
+
+```sql
+GRAPH FinGraph
+MATCH
+ (src:Account)
+ ((:Account)-[:Transfers]->(interm:Account) WHERE interm.is_blocked){1,2}
+ -[:Transfers]->(dst:Account)
+RETURN src.id AS source_account_id, dst.id AS destination_account_id
+
+/*--------------------------------------------+
+ | source_account_id | destination_account_id |
+ +--------------------------------------------+
+ | 7 | 20 |
+ | 7 | 20 |
+ | 20 | 20 |
+ +--------------------------------------------*/
+```
+
+The following query finds unique reachable accounts which are one or two
+transfers away from a given `Account` node:
+
+```sql
+GRAPH FinGraph
+MATCH ANY (src:Account {id: 7})-[e:Transfers]->{1,2}(dst:Account)
+LET ids_in_path = ARRAY(SELECT e.to_id FROM UNNEST(e) AS e)
+RETURN src.id AS source_account_id, dst.id AS destination_account_id, ids_in_path
+
+/*----------------------------------------------------------+
+ | source_account_id | destination_account_id | ids_in_path |
+ +----------------------------------------------------------+
+ | 7 | 16 | 16 |
+ | 7 | 20 | 16,20 |
+ +----------------------------------------------------------*/
+```
+
+The following query matches all `Person` nodes and optionally matches the
+blocked `Account` owned by the `Person`. The missing blocked `Account` is
+represented as `NULL`:
+
+```sql
+GRAPH FinGraph
+MATCH (n:Person)
+OPTIONAL MATCH (n:Person)-[:Owns]->(a:Account {is_blocked: TRUE})
+RETURN n.name, a.id AS blocked_account_id
+
+/*--------------+
+ | name | id |
+ +--------------+
+ | Lee | 16 |
+ | Alex | NULL |
+ | Dana | NULL |
+ +--------------*/
+```
+
+[graph-pattern-definition]: https://github.com/google/zetasql/blob/master/docs/graph-patterns.md#graph_pattern_definition
+
+## `NEXT` statement
+
+
+
+NEXT
+
+
+##### Description
+
+Chains multiple linear query statements together.
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+The following linear query statements are chained by the `NEXT` statement:
+
+```sql
+GRAPH FinGraph
+MATCH (:Account)-[:Transfers]->(account:Account)
+RETURN account, COUNT(*) AS num_incoming_transfers
+GROUP BY account
+
+NEXT
+
+MATCH (account:Account)<-[:Owns]-(owner:Person)
+RETURN
+ account.id AS account_id, owner.name AS owner_name,
+ num_incoming_transfers
+
+NEXT
+
+FILTER num_incoming_transfers < 2
+RETURN account_id, owner_name
+UNION ALL
+RETURN "Bob" AS owner_name, 100 AS account_id
+
+/*-------------------------+
+ | account_id | owner_name |
+ +-------------------------+
+ | 7 | Alex |
+ | 20 | Dana |
+ | 100 | Bob |
+ | 100 | Bob |
+ | 100 | Bob |
+ +-------------------------*/
+```
+
+## `OFFSET` statement
+
+
+
+OFFSET count
+
+
+#### Description
+
+Skips a specified number of rows in the query results.
+
+#### Definitions
+
++ `count`: A non-negative `INT64` value that represents the number of
+ rows to skip. For more information,
+ see the [`LIMIT` and `OFFSET` clauses][limit-and-offset-clause].
+
+#### Details
+
+The `OFFSET` statement can appear anywhere in a linear query statement before
+the `RETURN` statement.
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+In the following example, the first two rows are not included in the results:
+
+```sql
+GRAPH FinGraph
+MATCH (p:Person)
+OFFSET 2
+RETURN p.name, p.id
+
+/*-----------+
+ | name | id |
+ +-----------+
+ | Lee | 3 |
+ +-----------*/
+```
+
+[limit-and-offset-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#limit_and_offset_clause
+
+## `ORDER BY` statement
+
+
+
+ORDER BY order_by_specification[, ...]
+
+order_by_specification:
+ expression
+ [ COLLATE collation_specification ]
+ [ { ASC | ASCENDING | DESC | DESCENDING } ]
+ [ { NULLS FIRST | NULLS LAST } ]
+
+
+#### Description
+
+Orders the query results.
+
+#### Definitions
+
++ `expression`: The sort criterion for the result set. For more information,
+ see the [`ORDER BY` clause][order-by-clause].
++ `COLLATE collation_specification`: The collation specification for
+ `expression`. For more information, see the
+ [`ORDER BY` clause][order-by-clause].
++ `ASC | ASCENDING | DESC | DESCENDING`: The sort order, which can be either
+ ascending or descending. The following options are synonymous:
+
+ + `ASC` and `ASCENDING`
+
+ + `DESC` and `DESCENDING`
+
+ For more information about sort order, see the
+ [`ORDER BY` clause][order-by-clause].
++ `NULLS FIRST | NULLS LAST`: Determines how `NULL` values are sorted for
+ `expression`. For more information, see the
+ [`ORDER BY` clause][order-by-clause].
+
+#### Details
+
+Ordinals are not supported in the `ORDER BY` statement.
+
+The `ORDER BY` statement is ignored unless it is immediately followed by the
+`LIMIT` or `OFFSET` statement.
+
+If you would like to apply `ORDER BY` to what is in `RETURN` statement, use the
+`ORDER BY` clause in `RETURN` statement. For more information, see
+[`RETURN` statement][return-statement].
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+The following query sorts the results by the `transfer.amount`
+values in descending order:
+
+```sql
+GRAPH FinGraph
+MATCH (src_account:Account)-[transfer:Transfers]->(dst_account:Account)
+ORDER BY transfer.amount DESC
+LIMIT 3
+RETURN src_account.id AS account_id, transfer.amount AS transfer_amount
+
+/*------------------------------+
+ | account_id | transfer_amount |
+ +------------------------------+
+ | 20 | 500 |
+ | 7 | 300 |
+ | 16 | 300 |
+ +------------------------------*/
+```
+
+```sql
+GRAPH FinGraph
+MATCH (src_account:Account)-[transfer:Transfers]->(dst_account:Account)
+ORDER BY transfer.amount DESC
+OFFSET 1
+RETURN src_account.id AS account_id, transfer.amount AS transfer_amount
+
+/*------------------------------+
+ | account_id | transfer_amount |
+ +------------------------------+
+ | 7 | 300 |
+ | 16 | 300 |
+ | 20 | 200 |
+ | 7 | 100 |
+ +------------------------------*/
+```
+
+If you don't include the `LIMIT` or `OFFSET` statement right after the
+`ORDER BY` statement, the effect of `ORDER BY` is discarded and the result is
+unordered.
+
+```sql
+-- Warning: The transfer.amount values are not sorted because the
+-- LIMIT statement is missing.
+GRAPH FinGraph
+MATCH (src_account:Account)-[transfer:Transfers]->(dst_account:Account)
+ORDER BY transfer.amount DESC
+RETURN src_account.id AS account_id, transfer.amount AS transfer_amount
+
+/*------------------------------+
+ | account_id | transfer_amount |
+ +------------------------------+
+ | 7 | 300 |
+ | 7 | 100 |
+ | 16 | 300 |
+ | 20 | 500 |
+ | 20 | 200 |
+ +------------------------------*/
+```
+
+```sql
+-- Warning: Using the LIMIT clause in the RETURN statement, but not immediately
+-- after the ORDER BY statement, also returns the unordered transfer.amount
+-- values.
+GRAPH FinGraph
+MATCH (src_account:Account)-[transfer:Transfers]->(dst_account:Account)
+ORDER BY transfer.amount DESC
+RETURN src_account.id AS account_id, transfer.amount AS transfer_amount
+LIMIT 10
+
+/*------------------------------+
+ | account_id | transfer_amount |
+ +------------------------------+
+ | 7 | 300 |
+ | 7 | 100 |
+ | 16 | 300 |
+ | 20 | 500 |
+ | 20 | 200 |
+ +------------------------------*/
+```
+
+[order-by-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#order_by_clause
+
+[return-statement]: #gql_return
+
+## `RETURN` statement
+
+
+
+RETURN *
+
+
+
+RETURN
+ [ { ALL | DISTINCT } ]
+ return_item[, ... ]
+ [ group_by_clause ]
+ [ order_by_clause ]
+ [ limit_and_offset_clauses ]
+
+return_item:
+ { expression [ AS alias ] | * }
+
+limit_and_offset_clauses:
+ {
+ limit_clause
+ | offset_clause
+ | offset_clause limit_clause
+ }
+
+
+#### Description
+
+Marks the end of a linear query statement and returns the results. Only one `RETURN`
+statement is allowed in a linear query statement.
+
+#### Definitions
+
++ `*`: Returns all columns in the current working table.
++ `return_item`: A column to include in the results.
++ `ALL`: Returns all rows. This is equivalent to not using any prefix.
++ `DISTINCT`: Duplicate rows are discarded and only the remaining distinct
+ rows are returned. This deduplication takes place after any aggregation
+ is performed.
++ `expression`: An expression that represents a column to produce.
+ Aggregation is supported.
++ `alias`: An alias for `expression`.
++ `group_by_clause`: Groups the current rows of the working table, using the
+ [`GROUP BY` clause][group-by-clause]. If
+ `GROUP BY ALL` is applied, the groupable items from the
+ `return_item` list are used to group the rows.
++ `order_by_clause`: Orders the current rows in a
+ linear query statement, using the [`ORDER BY` clause][order-by-clause].
++ `limit_clause`: Limits the number of current rows in a
+ linear query statement, using the [`LIMIT` clause][limit-and-offset-clause].
++ `offset_clause`: Skips a specified number of rows in a linear query statement,
+ using the [`OFFSET` clause][limit-and-offset-clause].
+
+#### Details
+
+If any expression performs aggregation, and no `GROUP BY` clause is
+specified, all groupable items from the return list are used implicitly as
+grouping keys (This is
+equivalent to `GROUP BY ALL`).
+
+Ordinals are not supported in the `ORDER BY` and `GROUP BY` clauses.
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+The following query returns `p.name` and `p.id`:
+
+```sql
+GRAPH FinGraph
+MATCH (p:Person)
+RETURN p.name, p.id
+
+/*-----------+
+ | name | id |
+ +-----------+
+ | Alex | 1 |
+ | Dana | 2 |
+ | Lee | 3 |
+ +-----------*/
+```
+
+In the following example, the first linear query statement returns all columns
+including `p`, `a`, `b`, and `c`. The second linear query statement returns the
+specified `p.name` and `d` columns:
+
+```sql
+GRAPH FinGraph
+MATCH (p:Person)
+LET a = 1, b = 2, c = 3
+RETURN *
+
+NEXT
+
+RETURN p.name, (a + b + c) AS d
+
+/*----------+
+ | name | d |
+ +----------+
+ | Alex | 6 |
+ | Dana | 6 |
+ | Lee | 6 |
+ +----------*/
+```
+
+The following query returns distinct rows:
+
+```sql
+GRAPH FinGraph
+MATCH (src:Account {id: 7})-[e:Transfers]->{1, 3}(dst:Account)
+RETURN DISTINCT ARRAY_LENGTH(e) AS hops, dst.id AS destination_account_id
+
+/*-------------------------------+
+ | hops | destination_account_id |
+ +-------------------------------+
+ | 3 | 7 |
+ | 1 | 16 |
+ | 3 | 16 |
+ | 2 | 20 |
+ +-------------------------------*/
+```
+
+In the following example, the first linear query statement returns `account` and
+aggregated `num_incoming_transfers` per account. The second statement returns
+sorted result.
+
+```sql
+GRAPH FinGraph
+MATCH (:Account)-[:Transfers]->(account:Account)
+RETURN account, COUNT(*) AS num_incoming_transfers
+GROUP BY account
+
+NEXT
+
+MATCH (account:Account)<-[:Owns]-(owner:Person)
+RETURN owner.name AS owner_name, num_incoming_transfers
+ORDER BY num_incoming_transfers DESC
+
+/*-------------------------------------+
+ | owner_name | num_incoming_transfers |
+ +-------------------------------------+
+ | Lee | 3 |
+ | Alex | 1 |
+ | Dana | 1 |
+ +-------------------------------------*/
+```
+
+In the following example, the `GROUP BY ALL` clause groups rows by inferring
+grouping keys from the return items in the `RETURN` statement.
+
+```sql
+GRAPH FinGraph
+MATCH (:Account)-[:Transfers]->(account:Account)
+RETURN account, COUNT(*) AS num_incoming_transfers
+GROUP BY ALL
+ORDER BY num_incoming_transfers DESC
+
+NEXT
+
+MATCH (account:Account)<-[:Owns]-(owner:Person)
+RETURN owner.name AS owner_name, num_incoming_transfers
+
+/*-------------------------------------+
+ | owner_name | num_incoming_transfers |
+ +-------------------------------------+
+ | Alex | 1 |
+ | Dana | 1 |
+ | Lee | 3 |
+ +-------------------------------------*/
+```
+
+In the following example, the `LIMIT` clause in the `RETURN` statement
+reduces the results to one row:
+
+```sql
+GRAPH FinGraph
+MATCH (p:Person)
+RETURN p.name, p.id
+LIMIT 1
+
+/*-----------+
+ | name | id |
+ +-----------+
+ | Alex | 1 |
+ +-----------*/
+```
+
+In the following example, the `OFFSET` clause in the `RETURN` statement
+skips the first row:
+
+```sql
+GRAPH FinGraph
+MATCH (p:Person)
+RETURN p.name, p.id
+OFFSET 1
+
+/*-----------+
+ | name | id |
+ +-----------+
+ | Dana | 2 |
+ | Lee | 3 |
+ +-----------*/
+```
+
+In the following example, the `OFFSET` clause in the `RETURN` statement
+skips the first row, then the `LIMIT` clause reduces the
+results to one row:
+
+```sql
+GRAPH FinGraph
+MATCH (p:Person)
+RETURN p.name, p.id
+OFFSET 1
+LIMIT 1
+
+/*-----------+
+ | name | id |
+ +-----------+
+ | Dana | 2 |
+ +-----------*/
+```
+
+In the following example, an error is produced because the `OFFSET` clause must
+come before the `LIMIT` clause when they are both used in the
+`RETURN` statement:
+
+```sql {.bad}
+-- Error: The LIMIT clause must come after the OFFSET clause in a
+-- RETURN operation.
+GRAPH FinGraph
+MATCH (p:Person)
+RETURN p.name, p.id
+LIMIT 1
+OFFSET 1
+```
+
+In the following example, the `ORDER BY` clause in the `RETURN` statement sorts
+the results by `hops` and then `destination_account_id`:
+
+```sql
+GRAPH FinGraph
+MATCH (src:Account {id: 7})-[e:Transfers]->{1, 3}(dst:Account)
+RETURN DISTINCT ARRAY_LENGTH(e) AS hops, dst.id AS destination_account_id
+ORDER BY hops, destination_account_id
+
+/*-------------------------------+
+ | hops | destination_account_id |
+ +-------------------------------+
+ | 1 | 16 |
+ | 2 | 20 |
+ | 3 | 7 |
+ | 3 | 16 |
+ +-------------------------------*/
+```
+
+[group-by-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#group_by_clause
+
+[order-by-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#order_by_clause
+
+[limit-and-offset-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#limit_and_offset_clause
+
+## `SKIP` statement
+
+
+
+SKIP count
+
+
+#### Description
+
+Synonym for the [`OFFSET` statement][gql-offset].
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+`SKIP` is a synonym for `OFFSET`. Therefore, these queries are equivalent:
+
+```sql
+GRAPH FinGraph
+MATCH (p:Person)
+SKIP 2
+RETURN p.name, p.id
+
+/*-----------+
+ | name | id |
+ +-----------+
+ | Lee | 3 |
+ +-----------*/
+```
+
+```sql
+GRAPH FinGraph
+MATCH (p:Person)
+OFFSET 2
+RETURN p.name, p.id
+
+/*-----------+
+ | name | id |
+ +-----------+
+ | Lee | 3 |
+ +-----------*/
+```
+
+[gql-offset]: https://github.com/google/zetasql/blob/master/docs/graph-query-statements.md#gql_offset
+
+## `WITH` statement
+
+
+
+WITH
+ [ { ALL | DISTINCT } ]
+ return_item[, ... ]
+ [ group_by_clause ]
+
+return_item:
+ { expression [ AS alias ] | * }
+
+
+#### Description
+
+Passes on the specified columns, optionally filtering, renaming, and
+transforming those results.
+
+#### Definitions
+
++ `*`: Returns all columns in the current working table.
++ `ALL`: Returns all rows. This is equivalent to not using any prefix.
++ `DISTINCT`: Returns distinct rows. Deduplication takes place after
+ aggregations are performed.
++ `return_item`: A column to include in the results.
++ `expression`: An expression that represents a column to produce. Aggregation
+ is supported.
++ `alias`: An alias for `expression`.
++ `group_by_clause`: Groups the current rows of the working table, using the
+ [`GROUP BY` clause][group-by-clause]. If `GROUP BY ALL`
+ is applied, the groupable items from the `return_item` list are used to
+ group the rows.
+
+#### Details
+
+If any expression performs aggregation, and no `GROUP BY` clause is
+specified, all groupable items from the return list are implicitly used as
+grouping keys (This is equivalent to `GROUP BY ALL`).
+
+Window functions are not supported in `expression`.
+
+Ordinals are not supported in the `GROUP BY` clause.
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+The following query returns all distinct destination account IDs:
+
+```sql
+GRAPH FinGraph
+MATCH (src:Account)-[transfer:Transfers]->(dst:Account)
+WITH DISTINCT dst
+RETURN dst.id AS destination_id
+
+/*----------------+
+ | destination_id |
+ +----------------+
+ | 7 |
+ | 16 |
+ | 20 |
+ +----------------*/
+```
+
+The following query uses `*` to carry over the existing columns of
+the working table in addition to defining a new one for the destination
+account id.
+
+```sql
+GRAPH FinGraph
+MATCH (src:Account)-[transfer:Transfers]->(dst:Account)
+WITH *, dst.id
+RETURN dst.id AS destination_id
+
+/*----------------+
+ | destination_id |
+ +----------------+
+ | 7 |
+ | 16 |
+ | 16 |
+ | 16 |
+ | 20 |
+ +----------------*/
+```
+
+In the following example, aggregation is performed implicitly because the
+`WITH` statement has an aggregate expression but does not specify a `GROUP BY`
+clause. All groupable items from the return item list are used as grouping keys
+ (This is equivalent to `GROUP BY ALL`).
+In this case, the grouping keys inferred are `src.id` and `dst.id`.
+Therefore, this query returns the number of transfers for each
+distinct combination of `src.id` and `dst.id`.
+
+```sql
+GRAPH FinGraph
+MATCH (src:Account)-[transfer:Transfers]->(dst:Account)
+WITH COUNT(*) AS transfer_total, src.id AS source_id, dst.id AS destination_id
+RETURN transfer_total, destination_id, source_id
+
+/*---------------------------------------------+
+ | transfer_total | destination_id | source_id |
+ +---------------------------------------------+
+ | 2 | 16 | 7 |
+ | 1 | 20 | 16 |
+ | 1 | 7 | 20 |
+ | 1 | 16 | 20 |
+ +---------------------------------------------*/
+```
+
+In the following example, an error is produced because the `WITH` statement only
+contains `dst`. `src` is not visible after the `WITH` statement in the `RETURN`
+statement.
+
+```sql {.bad}
+-- Error: src does not exist
+GRAPH FinGraph
+MATCH (src:Account)-[transfer:Transfers]->(dst:Account)
+WITH dst
+RETURN src.id AS source_id
+```
+
+[group-by-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#group_by_clause
+
+## Set operation
+
+
+
+linear_query_statement
+set_operator
+linear_query_statement
+[
+ set_operator
+ linear_graph_query
+][...]
+
+set_operator:
+ {
+ UNION ALL
+ | UNION DISTINCT
+ | INTERSECT ALL
+ | INTERSECT DISTINCT
+ | EXCEPT ALL
+ | EXCEPT DISTINCT
+ }
+
+
+#### Description
+
+Combines a sequence of linear query statements with a set operation.
+Only one type of set operation is allowed per set operation.
+
+#### Definitions
+
++ `linear_query_statement`: A [linear query statement][gql_syntax] to
+ include in the set operation.
+
+#### Details
+
+Each linear query statement in the same set operation shares the same working table.
+
+Most of the rules for GQL set operators are the same as those for
+SQL [set operators][set-op], but there are some differences:
+
++ A GQL set operator does not support hints, or the `CORRESPONDING` keyword.
+ Since each set operation input (a linear query statement) only
+ produces columns with names, the default behavior of GQL set operations
+ requires all inputs to have the same set of column names and all
+ paired columns to share the same [supertype][supertypes].
++ GQL does not allow chaining different kinds of set operations in the same
+ set operation.
++ GQL does not allow using parentheses to separate different set operations.
++ The results produced by the linear query statements are combined in a left
+ associative order.
+
+#### Examples
+
+A set operation between two linear query statements with the same set of
+output column names and types but with different column orders is supported.
+For example:
+
+```sql
+GRAPH FinGraph
+MATCH (p:Person)
+RETURN p.name, 1 AS group_id
+UNION ALL
+MATCH (p:Person)
+RETURN 2 AS group_id, p.name
+
+/*------+----------+
+ | name | group_id |
+ +------+----------+
+ | Alex | 1 |
+ | Dana | 1 |
+ | Lee | 1 |
+ | Alex | 2 |
+ | Dana | 2 |
+ | Lee | 2 |
+ +------+----------*/
+```
+
+In a set operation, chaining the same kind of set operation is supported, but
+chaining different kinds of set operations is not.
+For example:
+
+```sql
+GRAPH FinGraph
+MATCH (p:Person)
+RETURN p.name, 1 AS group_id
+UNION ALL
+MATCH (p:Person)
+RETURN 2 AS group_id, p.name
+UNION ALL
+MATCH (p:Person)
+RETURN 3 AS group_id, p.name
+
+/*------+----------+
+ | name | group_id |
+ +------+----------+
+ | Alex | 1 |
+ | Dana | 1 |
+ | Lee | 1 |
+ | Alex | 2 |
+ | Dana | 2 |
+ | Lee | 2 |
+ | Alex | 3 |
+ | Dana | 3 |
+ | Lee | 3 |
+ +------+----------*/
+```
+
+```sql {.bad}
+-- ERROR: GQL does not allow chaining EXCEPT DISTINCT with UNION ALL
+GRAPH FinGraph
+MATCH (p:Person)
+RETURN p.name, 1 AS group_id
+UNION ALL
+MATCH (p:Person)
+RETURN 2 AS group_id, p.name
+EXCEPT DISTINCT
+MATCH (p:Person)
+RETURN 3 AS group_id, p.name
+```
+
+[set-op]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#set_operators
+
+[supertypes]: https://github.com/google/zetasql/blob/master/docs/conversion_rules.md#supertypes
+
+[gql_syntax]: https://github.com/google/zetasql/blob/master/docs/graph-query-statements.md#gql_syntax
+
diff --git a/docs/graph-schema-statements.md b/docs/graph-schema-statements.md
new file mode 100644
index 000000000..d2d742363
--- /dev/null
+++ b/docs/graph-schema-statements.md
@@ -0,0 +1,453 @@
+
+
+
+
+# GQL schema statements
+
+Graph Query Language (GQL) supports all ZetaSQL DDL statements,
+including the following GQL-specific DDL statements:
+
+## Statement list
+
+
+
+## `CREATE PROPERTY GRAPH` statement {: #gql_create_graph}
+
+### Property graph definition
+
+
+
+CREATE
+ [ OR REPLACE ]
+ PROPERTY GRAPH
+ [ IF NOT EXISTS ]
+ property_graph_name
+ property_graph_content
+ [ OPTIONS (key=value, ...) ];
+
+property_graph_content:
+ node_tables
+ [ edge_tables ]
+
+node_tables:
+ NODE TABLES element_list
+
+edge_tables:
+ EDGE TABLES element_list
+
+element_list:
+ (element[, ...])
+
+
+**Description**
+
+Creates a property graph.
+
+Note: all GQL examples in the GQL reference use the
+[`FinGraph`][fin-graph] property graph example.
+
+**Definitions**
+
++ `OR REPLACE`: Replaces any property graph with the same name if it exists.
+ If the property graph does not exist, creates the property graph. Can't
+ appear with `IF NOT EXISTS`.
++ `IF NOT EXISTS`: If any property graph exists with the same name, the
+ `CREATE` statement has no effect. Can't appear with `OR REPLACE`.
++ `OPTIONS`: If you have schema options, you can add them when you create
+ the property graph. These options are system-specific and follow the
+ ZetaSQL [`HINT` syntax][hints]
++ `property_graph_name`: The name of the property graph. This name can be a
+ path expression. This name must not conflict with the name of an existing
+ table, view, or property graph.
++ `property_graph_content`: Add the definitions for the nodes and edges in the
+ property graph.
++ `node_tables`: A collection of node definitions. A node definition defines a
+ new type of node in the graph.
+
+ The following example represents three node definitions:
+ `Account`, `Customer`, and `GeoLocation`.
+
+ ```sql
+ NODE TABLES (
+ Account,
+ Customer
+ LABEL Client
+ PROPERTIES (cid, name),
+ Location AS GeoLocation
+ DEFAULT LABEL
+ PROPERTIES ALL COLUMNS
+ )
+ ```
++ `edge_tables`: A collection of edge definitions. An edge definition defines
+ a new type of edge in the graph. An edge is directed and connects a source and
+ a destination node.
+
+ The following example represents two edge definitions:
+ `Own` and `Transfer`.
+
+ ```sql
+ EDGE TABLES (
+ Own
+ SOURCE KEY (cid) REFERENCES Customer (cid)
+ DESTINATION KEY (aid) REFERENCES Account
+ NO PROPERTIES,
+ Transfer
+ SOURCE KEY (from_id) REFERENCES Account (aid)
+ DESTINATION KEY (to_id) REFERENCES Account (aid)
+ LABEL Transfer NO PROPERTIES
+ )
+ ```
++ `element_list`: A list of element (node or edge) definitions.
++ `element`: Refer to [Element definition][element-definition] for details.
+
+### Element definition
+
+
+
+element:
+ element_name
+ [ AS element_alias ]
+ [ element_keys ]
+ [ { label_and_properties_list | element_properties } ]
+
+element_keys:
+ { node_element_key | edge_element_keys }
+
+node_element_key:
+ element_key
+
+edge_element_keys:
+ element_key
+ source_key
+ destination_key
+
+element_key:
+ KEY column_name_list
+
+source_key:
+ SOURCE KEY column_name_list
+ REFERENCES element_alias_reference [ column_name_list ]
+
+destination_key:
+ DESTINATION KEY column_name_list
+ REFERENCES element_alias_reference [ column_name_list ]
+
+column_name_list:
+ (column_name[, ...])
+
+
+**Description**
+
+Adds an element definition to the property graph. For example:
+
+```sql
+Customer
+ LABEL Client
+ PROPERTIES (cid, name)
+```
+
+In a graph, labels and properties are uniquely identified by their names. Labels
+and properties with the same name can appear in multiple node or edge
+definitions. However, labels and properties with the same name must follow these
+rules:
+
++ Properties with the same name must have the same value type.
++ Labels with the same name must expose the same set of properties.
+
+**Definitions**
+
++ `element_name`: The name of the input table from which elements are created.
++ `element_alias`: An optional alias. You must use an alias if you use an input
+ table for more than one element definition.
++ `element_keys`: The key for a graph element. This uniquely identifies a graph
+ element.
+
+ + By default, the element key is the primary key of the input table.
+
+ + Element keys can be explicitly defined with the `KEY` clause.
+
+
++ `node_element_key`: The element key for a node.
+
+ ```sql
+ KEY (item1_column, item2_column)
+ ```
++ `edge_element_keys`: The element key, source key, and destination key
+ for an edge.
+
+ ```sql
+ KEY (item1_column, item2_column)
+ SOURCE KEY (item1_column) REFERENCES item_node (item_node_column)
+ DESTINATION KEY (item2_column) REFERENCES item_node (item_node_column)
+ ```
++ `element_key`: The key that identifies the node or edge element.
+
+ ```sql
+ KEY (item1_column, item2_column)
+ ```
++ `source_key`: The key for the source node of the edge.
+
+ ```sql
+ SOURCE KEY (item1_column) REFERENCES item_node (item_node_column)
+ ```
++ `destination_key`: The key for the destination node of the edge.
+
+ ```sql
+ DESTINATION KEY (item2_column) REFERENCES item_node (item_node_column)
+ ```
++ `column_name_list`: One or more columns to assign to a key.
+
+ In `column_name_list`, column names must be unique.
+
+ If `column_name_list` is used in `REFERENCES` of `source_key` or
+ `destination_key`, the columns they reference must exist in the corresponding
+ node tables.
+
+ If `column_name_list` is absent in `REFERENCES` of `source_key` or
+ `destination_key`, a foreign key constraint must exist on the edge table's
+ `SOURCE KEY` columns or `DESTINATION KEY` columns to their corresponding node
+ tables.
++ `element_alias_reference`: The alias of another element to reference.
++ `label_and_property_list`: The list of labels and properties to add to
+ an element. For more information, see
+ [Label and properties list definition][label-property-definition].
+
+### Label and properties list definition
+
+
+
+label_and_properties_list:
+ label_and_properties[...]
+
+label_and_properties:
+ element_label
+ [ element_properties ]
+
+element_label:
+ {
+ LABEL label_name |
+ DEFAULT LABEL
+ }
+
+
+
+**Description**
+
+Adds a list of labels and properties to an element.
+
+**Definitions**
+
++ `label_and_properties`: The label to add to the element and the properties
+ exposed by that label. For example:
+
+ ```sql
+ LABEL Tourist PROPERTIES (home_city, home_country)
+ ```
+
+ When `label_and_properties` is not specified, the following is
+ applied implicitly:
+
+ ```sql
+ DEFAULT LABEL PROPERTIES ARE ALL COLUMNS
+ ```
+
+ A property must be unique in `label_and_properties`.
++ `element_label`: Add a custom label or use the default label for the
+ element. `label_name` must be unique in `element`.
+
+ If you use `DEFAULT LABEL`, `label_name` is the same as `element_table_alias`.
++ `element_properties`: The properties associated with a label. A property
+ can't be used more than once for a specific label. For more information, see
+ [Element properties definition][element-table-property-definition].
+
+### Element properties definition
+
+
+
+element_properties:
+ {
+ NO PROPERTIES |
+ properties_are |
+ derived_property_list
+ }
+
+properties_are:
+ PROPERTIES [ ARE ] ALL COLUMNS [ EXCEPT column_name_list ]
+
+column_name_list:
+ (column_name[, ...])
+
+derived_property_list:
+ PROPERTIES derived_property[, ...]
+
+derived_property:
+ value_expression [ AS property_name ]
+
+
+**Description**
+
+Adds properties associated with a label.
+
+**Definitions**
+
++ `NO PROPERTIES`: The element doesn't have properties.
++ `properties_are`: Define which columns to include as element
+ properties.
+
+ If you don't include this definition, all columns are included by
+ default, and the following definition is applied implicitly:
+
+ ```sql
+ PROPERTIES ARE ALL COLUMNS
+ ```
+
+ In the following examples, all columns in a table are included as
+ element properties:
+
+ ```sql
+ PROPERTIES ARE ALL COLUMNS
+ ```
+
+ ```sql
+ PROPERTIES ALL COLUMNS
+ ```
+
+ In the following example, all columns in a table except for `home_city` and
+ `home_country` are included as element properties:
+
+ ```sql
+ PROPERTIES ARE ALL COLUMNS EXCEPT (home_city, home_country)
+ ```
++ `column_name_list`: A list of columns to exclude as element properties.
+
+ Column names in the `EXCEPT column_name_list` must be unique.
++ `derived_property_list`: A list of element property definitions.
++ `derived_property`: An expression that defines a property and can optionally
+ reference the input table columns.
+
+ In the following example, the `id` and `name` columns are included as
+ properties. Additionally, the result of the `salary + bonus` expression are
+ included as the `income` property:
+
+ ```sql
+ PROPERTIES (id, name, salary + bonus AS income)
+ ```
+
+ A derived property includes:
+
+ + `value_expression`: An expression that can be represented by simple constructs
+ such as column references and functions. Subqueries are excluded.
+
+ + `AS property_name`: Alias to assign to the value expression. This is
+ optional unless `value_expression` is a function.
+
+ If `derived_property` has any column reference in `value_expression`, that
+ column reference must refer to a column of the underlying table.
+
+ If `derived_property` doesn't define `property_name`, `value_expression`
+ must be a column reference and the implicit `property_name` is the
+ column name.
+
+### `FinGraph` Example
+
+
+The following property graph, `FinGraph`, contains two node
+definitions (`Account` and `Person`) and two edge definitions
+(`PersonOwnAccount` and `AccountTransferAccount`).
+
+Note: all GQL examples in the GQL reference use the
+[`FinGraph`][fin-graph] property graph example.
+
+```sql
+CREATE OR REPLACE PROPERTY GRAPH FinGraph
+ NODE TABLES (
+ Account,
+ Person
+ )
+ EDGE TABLES (
+ PersonOwnAccount
+ SOURCE KEY (id) REFERENCES Person (id)
+ DESTINATION KEY (account_id) REFERENCES Account (id)
+ LABEL Owns,
+ AccountTransferAccount
+ SOURCE KEY (id) REFERENCES Account (id)
+ DESTINATION KEY (to_id) REFERENCES Account (id)
+ LABEL Transfers
+ );
+```
+
+Once the property graph is created, you can use it in [GQL][gql] queries. For
+example, the following query matches all nodes labeled `Person` and then returns
+the `name` values in the results.
+
+```sql
+GRAPH FinGraph
+MATCH (p:Person)
+RETURN p.name
+
+/*---------+
+ | name |
+ +---------+
+ | Alex |
+ | Dana |
+ | Lee |
+ +---------*/
+```
+
+[hints]: https://github.com/google/zetasql/blob/master/docs/lexical.md#hints
+
+[element-definition]: #element_definition
+
+[label-property-definition]: #label_property_definition
+
+[element-table-property-definition]: #element_table_property_definition
+
+[fin-graph]: #fin_graph
+
+[gql]: https://github.com/google/zetasql/blob/master/docs/graph-query-statements.md
+
+## `DROP PROPERTY GRAPH` statement
+
+
+
+DROP PROPERTY GRAPH [ IF EXISTS ] property_graph_name;
+
+
+**Description**
+
+Deletes a property graph.
+
+**Definitions**
+
++ `IF EXISTS`: If a property graph of the specified name doesn't exist, then the
+ DROP statement has no effect and no error is generated.
++ `property_graph_name`: The name of the property graph to drop.
+
+**Example**
+
+```sql
+DROP PROPERTY GRAPH FinGraph;
+```
+
diff --git a/docs/graph-sql-queries.md b/docs/graph-sql-queries.md
new file mode 100644
index 000000000..1b64a86e9
--- /dev/null
+++ b/docs/graph-sql-queries.md
@@ -0,0 +1,140 @@
+
+
+
+
+# GQL within SQL
+
+ZetaSQL supports the following syntax to use GQL
+within SQL queries.
+
+## Language list
+
+
+
+
+ Name |
+ Summary |
+
+
+
+
+
+ GRAPH_TABLE operator
+ |
+
+ Performs an operation on a graph in the FROM clause of a SQL
+ query and then produces a table with the results.
+ |
+
+
+
+
+
+## `GRAPH_TABLE` operator
+
+
+
+FROM GRAPH_TABLE (
+ property_graph_name
+ multi_linear_query_statement
+) [ [ AS ] alias ]
+
+
+#### Description
+
+Performs an operation on a graph in the `FROM` clause of a SQL query and then
+produces a table with the results.
+
+With the `GRAPH_TABLE` operator, you can use the [GQL syntax][graph-query-statements]
+to query a property graph. The result of this operation is produced as a table that
+you can use in the rest of the query.
+
+#### Definitions
+
++ `property_graph_name`: The name of the property graph to query for patterns.
++ `multi_linear_query_statement`: You can use GQL to query a property graph for
+ patterns. For more information, see [Graph query language][graph-query-statements].
++ `alias`: An optional alias, which you can use to refer to the table
+ produced by the `GRAPH_TABLE` operator elsewhere in the query.
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+You can use the `RETURN` statement to return specific node and edge properties.
+For example:
+
+```sql
+SELECT name, id
+FROM GRAPH_TABLE(
+ FinGraph
+ MATCH (n)
+ RETURN n.name AS name, n.id AS id
+);
+
+/*-----------+
+ | name | id |
+ +-----------+
+ | NULL | 7 |
+ | NULL | 16 |
+ | NULL | 20 |
+ | Alex | 1 |
+ | Dana | 2 |
+ | Lee | 3 |
+ +-----------*/
+```
+
+You can use the `RETURN` statement to produce output with graph pattern
+variables. These variables can be referenced outside `GRAPH_TABLE`. For example,
+
+```sql
+SELECT n.name, n.id
+FROM GRAPH_TABLE(
+ FinGraph
+ MATCH (n)
+ RETURN n
+);
+
+/*-----------+
+ | name | id |
+ +-----------+
+ | NULL | 7 |
+ | NULL | 16 |
+ | NULL | 20 |
+ | Alex | 1 |
+ | Dana | 2 |
+ | Lee | 3 |
+ +-----------*/
+```
+
+The following query produces an error because `id` is not
+included in the `RETURN` statement, even though this property exists for
+element `n`:
+
+```sql {.bad}
+SELECT name, id
+FROM GRAPH_TABLE(
+ FinGraph
+ MATCH (n)
+ RETURN n.name
+);
+```
+
+The following query produces an error because `n` is a graph element and
+graph elements can't be included as query output:
+
+```sql {.bad}
+-- Error
+SELECT n
+FROM GRAPH_TABLE(
+ FinGraph
+ MATCH (n)
+ RETURN n
+);
+```
+
+[graph-query-statements]: https://github.com/google/zetasql/blob/master/docs/graph-query-statements.md
+
diff --git a/docs/graph-subqueries.md b/docs/graph-subqueries.md
new file mode 100644
index 000000000..d25c605dc
--- /dev/null
+++ b/docs/graph-subqueries.md
@@ -0,0 +1,298 @@
+
+
+
+
+# GQL subqueries
+
+The following subqueries are supported in GQL query statements:
+
+## Subquery list
+
+
+
+
+ Name |
+ Summary |
+
+
+
+
+
+ ARRAY subquery
+ |
+ Subquery expression that produces an array. |
+
+
+
+ EXISTS subquery
+ |
+ Checks if a subquery produces at least one row. |
+
+
+
+ IN subquery
+ |
+ Checks if a subquery produces a specified value. |
+
+
+
+ VALUE subquery
+ |
+ Subquery expression that produces a scalar value. |
+
+
+
+
+
+## `ARRAY` subquery
+
+
+
+ARRAY { gql_query_expr }
+
+
+#### Description
+
+Subquery expression that produces an array. If the subquery produces zero rows,
+an empty array is produced. Never produces a `NULL` array. This can be used
+wherever a query expression is supported in a GQL query statement.
+
+#### Definitions
+
++ `gql_query_expr`: A GQL query expression.
+
+#### Return type
+
+`ARRAY`
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+In the following query, an array of transfer amounts is produced for each
+`Account` owned by each `Person` node:
+
+```sql
+GRAPH FinGraph
+MATCH (p:Person)-[:Owns]->(account:Account)
+RETURN
+ p.name, account.id AS account_id,
+ ARRAY {
+ MATCH (a:Account)-[transfer:Transfers]->(:Account)
+ WHERE a = account
+ RETURN transfer.amount AS transfers
+ } AS transfers;
+
+/*-------------------------------+
+ | name | account_id | transfers |
+ +-------------------+-----------+
+ | Alex | 7 | [300,100] |
+ | Dana | 20 | [500,200] |
+ | Lee | 16 | [300] |
+ +-------------------------------*/
+```
+
+## `EXISTS` subquery
+
+
+
+EXISTS { gql_query_expr }
+
+
+
+EXISTS { match_statement }
+
+
+
+EXISTS { graph_pattern }
+
+
+#### Description
+
+Checks if the subquery produces at least one row. Returns `TRUE` if
+at least one row is produced, otherwise returns `FALSE`. Never produces `NULL`.
+
+#### Definitions
+
++ `gql_query_expr`: A GQL query expression.
++ `match_statement`: A pattern matching operation to perform on a graph.
+ For more information, see [`MATCH` statement][match-statement].
++ `graph_pattern`: A pattern to match in a graph.
+ For more information, see [graph pattern definition][graph-pattern-definition].
+
+#### Return type
+
+`BOOL`
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+The following query checks whether any `Person` whose name starts with the
+letter `'D'` owns an `Account`. The subquery contains a graph query expression.
+
+```sql
+GRAPH FinGraph
+RETURN EXISTS {
+ MATCH (p:Person)-[o:Owns]->(a:Account)
+ WHERE p.Name LIKE 'D%'
+ RETURN p.Name
+ LIMIT 1
+} AS results;
+
+/*---------+
+ | results |
+ +---------+
+ | true |
+ +---------*/
+```
+
+You can include a `MATCH` statement or a graph pattern in an `EXISTS`
+subquery. The following examples include two ways to construct the subquery
+and produce similar results:
+
+```sql
+GRAPH FinGraph
+RETURN EXISTS {
+ MATCH (p:Person)-[o:Owns]->(a:Account)
+ WHERE p.Name LIKE 'D%'
+} AS results;
+
+/*---------+
+ | results |
+ +---------+
+ | true |
+ +---------*/
+```
+
+```sql
+GRAPH FinGraph
+RETURN EXISTS {
+ (p:Person)-[o:Owns]->(a:Account) WHERE p.Name LIKE 'D%'
+} AS results;
+
+/*---------+
+ | results |
+ +---------+
+ | true |
+ +---------*/
+```
+
+[match-statement]: https://github.com/google/zetasql/blob/master/docs/graph-query-statements.md#gql_match
+
+[graph-pattern-definition]: https://github.com/google/zetasql/blob/master/docs/graph-patterns.md#graph_pattern_definition
+
+## `IN` subquery
+
+
+
+value [ NOT ] IN { gql_query_expr }
+
+
+#### Description
+
+Checks if `value` is present in the subquery result. Returns `TRUE` if the
+result contains the `value`, otherwise returns `FALSE`.
+
+#### Definitions
+
++ `value`: The value look for in the subquery result.
++ `IN`: `TRUE` if the value is in the subquery result, otherwise
+ `FALSE`.
++ `NOT IN`: `FALSE` if the value is in the subquery result,
+ otherwise `TRUE`.
++ `gql_query_expr`: A GQL query expression.
+
+#### Details
+
+The subquery result must have a single column and that column type must
+be comparable to the `value` type. If not, an error is returned.
+
+#### Return type
+
+`BOOL`
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+The following query checks if `'Dana'` is a name of a person who owns an
+account.
+
+```sql
+GRAPH FinGraph
+RETURN 'Dana' IN {
+ MATCH (p:Person)-[o:Owns]->(a:Account)
+ RETURN p.name
+} AS results;
+
+/*---------+
+ | results |
+ +---------+
+ | true |
+ +---------*/
+```
+
+## `VALUE` subquery
+
+
+
+VALUE { gql_query_expr }
+
+
+#### Description
+
+A subquery expression that produces a scalar value.
+
+#### Definitions
+
++ `gql_query_expr`: A GQL query expression.
+
+#### Details
+
+The result of the subquery must have a single column. If the subquery returns
+more than one column, the query fails with an analysis error. The result type of
+the subquery expression is the produced column type. If the subquery produces
+exactly one row, that single value is the subquery expression result. If the
+subquery returns zero rows, the subquery expression result is `NULL`. If the
+subquery returns more than one row, the query fails with a runtime error.
+
+#### Return type
+
+The same as the column type in the subquery result.
+
+#### Examples
+
+Note: The examples in this section reference a property graph called
+[`FinGraph`][fin-graph].
+
+[fin-graph]: https://github.com/google/zetasql/blob/master/docs/graph-schema-statements.md#fin_graph
+
+The following query returns a the name of any `Person` whose name contains the
+character `'e'`:
+
+```sql
+GRAPH FinGraph
+RETURN VALUE {
+ MATCH (p:Person)
+ WHERE p.name LIKE '%e%'
+ RETURN p.name
+ LIMIT 1
+} AS results;
+
+/*---------+
+ | results |
+ +---------+
+ | [Alex] |
+ +---------*/
+```
+
diff --git a/docs/hash_functions.md b/docs/hash_functions.md
index ac34eb715..6c16ad3e1 100644
--- a/docs/hash_functions.md
+++ b/docs/hash_functions.md
@@ -6,7 +6,7 @@
ZetaSQL supports the following hash functions.
-### Function list
+## Function list
@@ -18,8 +18,7 @@ ZetaSQL supports the following hash functions.
- FARM_FINGERPRINT
-
+ | FARM_FINGERPRINT
|
Computes the fingerprint of a STRING or
@@ -28,8 +27,7 @@ ZetaSQL supports the following hash functions.
|
- MD5
-
+ | MD5
|
Computes the hash of a STRING or
@@ -38,8 +36,7 @@ ZetaSQL supports the following hash functions.
|
- SHA1
-
+ | SHA1
|
Computes the hash of a STRING or
@@ -48,8 +45,7 @@ ZetaSQL supports the following hash functions.
|
- SHA256
-
+ | SHA256
|
Computes the hash of a STRING or
@@ -58,8 +54,7 @@ ZetaSQL supports the following hash functions.
|
- SHA512
-
+ | SHA512
|
Computes the hash of a STRING or
@@ -70,7 +65,7 @@ ZetaSQL supports the following hash functions.
|
-### `FARM_FINGERPRINT`
+## `FARM_FINGERPRINT`
```
FARM_FINGERPRINT(value)
@@ -111,7 +106,7 @@ FROM example;
[hash-link-to-farmhash-github]: https://github.com/google/farmhash
-### `MD5`
+## `MD5`
```
MD5(input)
@@ -146,7 +141,7 @@ SELECT MD5("Hello World") as md5;
[hash-link-to-md5-wikipedia]: https://en.wikipedia.org/wiki/MD5
-### `SHA1`
+## `SHA1`
```
SHA1(input)
@@ -181,7 +176,7 @@ SELECT SHA1("Hello World") as sha1;
[hash-link-to-sha-1-wikipedia]: https://en.wikipedia.org/wiki/SHA-1
-### `SHA256`
+## `SHA256`
```
SHA256(input)
@@ -207,7 +202,7 @@ SELECT SHA256("Hello World") as sha256;
[hash-link-to-sha-2-wikipedia]: https://en.wikipedia.org/wiki/SHA-2
-### `SHA512`
+## `SHA512`
```
SHA512(input)
diff --git a/docs/hll_functions.md b/docs/hll_functions.md
index b08f213be..bf27c2b8e 100644
--- a/docs/hll_functions.md
+++ b/docs/hll_functions.md
@@ -24,7 +24,7 @@ and custom precision.
ZetaSQL supports the following HLL++ functions:
-### Function list
+## Function list
@@ -36,8 +36,7 @@ ZetaSQL supports the following HLL++ functions:
- HLL_COUNT.EXTRACT
-
+ | HLL_COUNT.EXTRACT
|
Extracts a cardinality estimate of an HLL++ sketch.
@@ -45,8 +44,7 @@ ZetaSQL supports the following HLL++ functions:
|
- HLL_COUNT.INIT
-
+ | HLL_COUNT.INIT
|
Aggregates values of the same underlying type into a new HLL++ sketch.
@@ -54,8 +52,7 @@ ZetaSQL supports the following HLL++ functions:
|
- HLL_COUNT.MERGE
-
+ | HLL_COUNT.MERGE
|
Merges HLL++ sketches of the same underlying type into a new sketch, and
@@ -64,8 +61,7 @@ ZetaSQL supports the following HLL++ functions:
|
- HLL_COUNT.MERGE_PARTIAL
-
+ | HLL_COUNT.MERGE_PARTIAL
|
Merges HLL++ sketches of the same underlying type into a new sketch.
@@ -75,7 +71,7 @@ ZetaSQL supports the following HLL++ functions:
|
-### `HLL_COUNT.EXTRACT`
+## `HLL_COUNT.EXTRACT`
```
HLL_COUNT.EXTRACT(sketch)
@@ -133,7 +129,7 @@ FROM
[hll-link-to-research-whitepaper]: https://research.google.com/pubs/pub40671.html
-### `HLL_COUNT.INIT`
+## `HLL_COUNT.INIT`
```
HLL_COUNT.INIT(input [, precision])
@@ -205,7 +201,7 @@ GROUP BY country;
[precision_hll]: https://github.com/google/zetasql/blob/master/docs/sketches.md#precision_hll
-### `HLL_COUNT.MERGE`
+## `HLL_COUNT.MERGE`
```
HLL_COUNT.MERGE(sketch)
@@ -267,7 +263,7 @@ FROM
[hll-link-to-research-whitepaper]: https://research.google.com/pubs/pub40671.html
-### `HLL_COUNT.MERGE_PARTIAL`
+## `HLL_COUNT.MERGE_PARTIAL`
```
HLL_COUNT.MERGE_PARTIAL(sketch)
diff --git a/docs/interval_functions.md b/docs/interval_functions.md
index 016a93c7f..0504a572a 100644
--- a/docs/interval_functions.md
+++ b/docs/interval_functions.md
@@ -6,7 +6,7 @@
ZetaSQL supports the following interval functions.
-### Function list
+## Function list
@@ -18,8 +18,7 @@ ZetaSQL supports the following interval functions.
- EXTRACT
-
+ | EXTRACT
|
Extracts part of an INTERVAL value.
@@ -27,8 +26,7 @@ ZetaSQL supports the following interval functions.
|
- JUSTIFY_DAYS
-
+ | JUSTIFY_DAYS
|
Normalizes the day part of an INTERVAL value.
@@ -36,8 +34,7 @@ ZetaSQL supports the following interval functions.
|
- JUSTIFY_HOURS
-
+ | JUSTIFY_HOURS
|
Normalizes the time part of an INTERVAL value.
@@ -45,8 +42,7 @@ ZetaSQL supports the following interval functions.
|
- JUSTIFY_INTERVAL
-
+ | JUSTIFY_INTERVAL
|
Normalizes the day and time parts of an INTERVAL value.
@@ -54,8 +50,7 @@ ZetaSQL supports the following interval functions.
|
- MAKE_INTERVAL
-
+ | MAKE_INTERVAL
|
Constructs an INTERVAL value.
@@ -65,7 +60,7 @@ ZetaSQL supports the following interval functions.
|
-### `EXTRACT`
+## `EXTRACT`
```sql
EXTRACT(part FROM interval_expression)
@@ -141,7 +136,7 @@ FROM
*------+--------*/
```
-### `JUSTIFY_DAYS`
+## `JUSTIFY_DAYS`
```sql
JUSTIFY_DAYS(interval_expression)
@@ -173,7 +168,7 @@ SELECT
*--------------+--------------+-------------+---------------+--------------*/
```
-### `JUSTIFY_HOURS`
+## `JUSTIFY_HOURS`
```sql
JUSTIFY_HOURS(interval_expression)
@@ -204,7 +199,7 @@ SELECT
*--------------+--------------+--------------+-----------------*/
```
-### `JUSTIFY_INTERVAL`
+## `JUSTIFY_INTERVAL`
```sql
JUSTIFY_INTERVAL(interval_expression)
@@ -230,7 +225,7 @@ SELECT JUSTIFY_INTERVAL(INTERVAL '29 49:00:00' DAY TO SECOND) AS i
*-------------*/
```
-### `MAKE_INTERVAL`
+## `MAKE_INTERVAL`
```sql
MAKE_INTERVAL(
diff --git a/docs/json_functions.md b/docs/json_functions.md
index dfbd81577..14c4e871b 100644
--- a/docs/json_functions.md
+++ b/docs/json_functions.md
@@ -7,7 +7,7 @@
ZetaSQL supports the following functions, which can retrieve and
transform JSON data.
-### Categories
+## Categories
The JSON functions are grouped into the following categories based on their
behavior:
@@ -201,6 +201,9 @@ behavior:
TO_JSON
+ SAFE_TO_JSON
+
+
TO_JSON_STRING
@@ -263,7 +266,7 @@ behavior:
-### Function list
+## Function list
@@ -275,25 +278,28 @@ behavior:
- BOOL
-
+ | BOOL
|
Converts a JSON boolean to a SQL BOOL value.
+
|
- BOOL
-
+ | BOOL_ARRAY
|
- Converts a JSON array of booleans to a SQL ARRAY<BOOL> value. |
+
+ Converts a JSON array of booleans to a
+ SQL ARRAY<BOOL> value.
+
+ |
- DOUBLE
+ DOUBLE
|
@@ -306,7 +312,7 @@ behavior:
- DOUBLE_ARRAY
+ DOUBLE_ARRAY
|
@@ -316,7 +322,7 @@ behavior:
- FLOAT
+ FLOAT
|
@@ -326,7 +332,7 @@ behavior:
- FLOAT_ARRAY
+ FLOAT_ARRAY
|
@@ -334,61 +340,71 @@ behavior:
- INT32
-
+ | INT32
|
Converts a JSON number to a SQL INT32 value.
+
|
- INT32_ARRAY
-
+ | INT32_ARRAY
|
- Converts a JSON number to a SQL ARRAY<INT32> value. |
+
+ Converts a JSON number to a SQL ARRAY<INT32> value.
+
+ |
- INT64
-
+ | INT64
|
Converts a JSON number to a SQL INT64 value.
+
|
- INT64_ARRAY
-
+ | INT64_ARRAY
|
- Converts a JSON array of numbers to a SQL ARRAY<INT64> value. |
+
+ Converts a JSON array of numbers to a
+ SQL ARRAY<INT64> value.
+
+ |
- JSON_ARRAY
-
+ | JSON_ARRAY
|
- Creates a JSON array. |
+
+ Creates a JSON array.
+
+ |
- JSON_ARRAY_APPEND
-
+ | JSON_ARRAY_APPEND
|
- Appends JSON data to the end of a JSON array. |
+
+ Appends JSON data to the end of a JSON array.
+
+ |
- JSON_ARRAY_INSERT
-
+ | JSON_ARRAY_INSERT
|
- Inserts JSON data into a JSON array. |
+
+ Inserts JSON data into a JSON array.
+
+ |
- JSON_EXTRACT
-
+ | JSON_EXTRACT
|
(Deprecated)
@@ -402,8 +418,7 @@ behavior:
|
- JSON_EXTRACT_ARRAY
-
+ | JSON_EXTRACT_ARRAY
|
(Deprecated)
@@ -413,12 +428,12 @@ behavior:
ARRAY<JSON>
value.
+
|
- JSON_EXTRACT_SCALAR
-
+ | JSON_EXTRACT_SCALAR
|
(Deprecated)
@@ -428,26 +443,24 @@ behavior:
|
- JSON_EXTRACT_STRING_ARRAY
-
+ | JSON_EXTRACT_STRING_ARRAY
|
(Deprecated)
Extracts a JSON array of scalar values and converts it to a SQL
ARRAY<STRING> value.
+
|
- JSON_OBJECT
-
+ | JSON_OBJECT
|
Creates a JSON object. |
- JSON_QUERY
-
+ | JSON_QUERY
|
Extracts a JSON value and converts it to a SQL
@@ -460,8 +473,7 @@ behavior:
|
- JSON_QUERY_ARRAY
-
+ | JSON_QUERY_ARRAY
|
Extracts a JSON array and converts it to
@@ -470,33 +482,30 @@ behavior:
ARRAY<JSON>
value.
+
|
- JSON_REMOVE
-
+ | JSON_REMOVE
|
Produces JSON with the specified JSON data removed. |
- JSON_SET
-
+ | JSON_SET
|
Inserts or replaces JSON data. |
- JSON_STRIP_NULLS
-
+ | JSON_STRIP_NULLS
|
Removes JSON nulls from JSON objects and JSON arrays. |
- JSON_TYPE
-
+ | JSON_TYPE
|
Gets the JSON type of the outermost JSON value and converts the name of
@@ -505,8 +514,7 @@ behavior:
|
- JSON_VALUE
-
+ | JSON_VALUE
|
Extracts a JSON scalar value and converts it to a SQL
@@ -515,238 +523,282 @@ behavior:
|
- JSON_VALUE_ARRAY
-
+ | JSON_VALUE_ARRAY
|
Extracts a JSON array of scalar values and converts it to a SQL
ARRAY<STRING> value.
+
|
- LAX_BOOL
-
+ | LAX_BOOL
|
Attempts to convert a JSON value to a SQL BOOL value.
+
|
- LAX_BOOL_ARRAY
-
+ | LAX_BOOL_ARRAY
|
- Attempts to convert a JSON value to a SQL ARRAY<BOOL> value. |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<BOOL> value.
+
+ |
- LAX_DOUBLE
+ LAX_DOUBLE
|
Attempts to convert a JSON value to a
SQL DOUBLE value.
+
|
- LAX_DOUBLE_ARRAY
+ LAX_DOUBLE_ARRAY
|
- Attempts to convert a JSON value to a SQL ARRAY<DOUBLE> value. |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<DOUBLE> value.
+
+ |
- LAX_FLOAT
+ LAX_FLOAT
|
- Attempts to convert a JSON value to a SQL FLOAT value. |
+
+ Attempts to convert a JSON value to a
+ SQL FLOAT value.
+
+ |
- LAX_FLOAT_ARRAY
+ LAX_FLOAT_ARRAY
|
- Attempts to convert a JSON value to a SQL ARRAY>FLOAT< value. |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY>FLOAT< value.
+
+ |
- LAX_INT32
-
+ | LAX_INT32
|
Attempts to convert a JSON value to a SQL INT32 value.
+
|
- LAX_INT32_ARRAY
-
+ | LAX_INT32_ARRAY
|
- Attempts to convert a JSON value to a SQL ARRAY<INT32> value. |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<INT32> value.
+
+ |
- LAX_INT64
-
+ | LAX_INT64
|
Attempts to convert a JSON value to a SQL INT64 value.
+
|
- LAX_INT64_ARRAY
-
+ | LAX_INT64_ARRAY
|
- Attempts to convert a JSON value to a SQL ARRAY<INT64> value. |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<INT64> value.
+
+ |
- LAX_STRING
-
+ | LAX_STRING
|
Attempts to convert a JSON value to a SQL STRING value.
+
|
- LAX_STRING_ARRAY
-
+ | LAX_STRING_ARRAY
|
- Attempts to convert a JSON value to a SQL ARRAY<STRING> value. |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<STRING> value.
+
+ |
- LAX_UINT32
-
+ | LAX_UINT32
|
Attempts to convert a JSON value to a SQL UINT32 value.
+
|
- LAX_UINT32_ARRAY
-
+ | LAX_UINT32_ARRAY
|
- Attempts to convert a JSON value to a SQL ARRAY<UINT32> value. |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<UINT32> value.
+
+ |
- LAX_UINT64
-
+ | LAX_UINT64
|
Attempts to convert a JSON value to a SQL UINT64 value.
+
|
- LAX_UINT64_ARRAY
-
+ | LAX_UINT64_ARRAY
|
- Attempts to convert a JSON value to a SQL ARRAY<UINT64> value. |
+
+ Attempts to convert a JSON value to a
+ SQL ARRAY<UINT64> value.
+
+ |
- PARSE_JSON
-
+ | PARSE_JSON
|
Converts a JSON-formatted STRING value to a
JSON value.
+
|
- STRING
-
+ | SAFE_TO_JSON
|
- Converts a JSON string to a SQL STRING value.
+ Similar to the `TO_JSON` function, but for each unsupported field in the
+ input argument, produces a JSON null instead of an error.
|
+ STRING (JSON)
+ |
- STRING_ARRAY
-
+ Converts a JSON string to a SQL STRING value.
+
|
+
+
+
+ STRING_ARRAY
+ |
Converts a JSON array of strings to a SQL ARRAY<STRING>
value.
+
|
- TO_JSON
-
+ | TO_JSON
|
Converts a SQL value to a JSON value.
+
|
- TO_JSON_STRING
-
+ | TO_JSON_STRING
|
Converts a SQL value to a JSON-formatted STRING value.
+
|
- UINT32
-
+ | UINT32
|
Converts a JSON number to a SQL UINT32 value.
+
|
- UINT32_ARRAY
-
+ | UINT32_ARRAY
|
- Converts a JSON number to a SQL ARRAY<UINT32> value. |
+
+ Converts a JSON number to a
+ SQL ARRAY<UINT32> value.
+
+ |
- UINT64
-
+ | UINT64
|
Converts a JSON number to a SQL UINT64 value.
+
|
- UINT64_ARRAY
-
+ | UINT64_ARRAY
|
- Converts a JSON number to a SQL ARRAY<UINT64> value. |
+
+ Converts a JSON number to a SQL ARRAY<UINT64> value.
+
+ |
-### `BOOL`
+## `BOOL`
```sql
@@ -803,7 +855,7 @@ SELECT BOOL(JSON 'null') AS result; -- Throws an error
SELECT SAFE.BOOL(JSON '123') AS result; -- Returns a SQL NULL
```
-### `BOOL_ARRAY`
+## `BOOL_ARRAY`
```sql
@@ -850,7 +902,7 @@ SELECT BOOL_ARRAY(JSON '[null]') AS result; -- Throws an error
SELECT BOOL_ARRAY(JSON 'null') AS result; -- Throws an error
```
-### `DOUBLE`
+## `DOUBLE`
```sql
@@ -949,7 +1001,7 @@ SELECT DOUBLE(JSON '18446744073709551615', wide_number_mode=>'exact') as result;
SELECT SAFE.DOUBLE(JSON '"strawberry"') AS result;
```
-### `DOUBLE_ARRAY`
+## `DOUBLE_ARRAY`
```sql
@@ -1036,7 +1088,7 @@ SELECT DOUBLE_ARRAY(JSON '[123.4]', wide_number_mode=>'exac') as result;
SELECT DOUBLE_ARRAY(JSON '[18446744073709551615]', wide_number_mode=>'exact') as result;
```
-### `FLOAT`
+## `FLOAT`
```sql
@@ -1135,7 +1187,7 @@ SELECT FLOAT(JSON '16777217', wide_number_mode=>'exact') as result;
SELECT SAFE.FLOAT(JSON '"strawberry"') AS result;
```
-### `FLOAT_ARRAY`
+## `FLOAT_ARRAY`
```sql
@@ -1222,7 +1274,7 @@ SELECT FLOAT_ARRAY(JSON '[123.4]', wide_number_mode=>'exac') as result;
SELECT FLOAT_ARRAY(JSON '[16777217]', wide_number_mode=>'exact') as result;
```
-### `INT32`
+## `INT32`
```sql
@@ -1291,7 +1343,7 @@ SELECT INT32(JSON 'null') AS result; -- Throws an error
SELECT SAFE.INT32(JSON '"strawberry"') AS result; -- Returns a SQL NULL
```
-### `INT32_ARRAY`
+## `INT32_ARRAY`
```sql
@@ -1350,7 +1402,7 @@ SELECT INT32_ARRAY(JSON '[null]') AS result; -- Throws an error
SELECT INT32_ARRAY(JSON 'null') AS result; -- Throws an error
```
-### `INT64`
+## `INT64`
```sql
@@ -1419,7 +1471,7 @@ SELECT INT64(JSON 'null') AS result; -- Throws an error
SELECT SAFE.INT64(JSON '"strawberry"') AS result; -- Returns a SQL NULL
```
-### `INT64_ARRAY`
+## `INT64_ARRAY`
```sql
@@ -1478,7 +1530,7 @@ SELECT INT64_ARRAY(JSON '[null]') AS result; -- Throws an error
SELECT INT64_ARRAY(JSON 'null') AS result; -- Throws an error
```
-### `JSON_ARRAY`
+## `JSON_ARRAY`
```sql
JSON_ARRAY([value][, ...])
@@ -1575,7 +1627,7 @@ SELECT JSON_ARRAY() AS json_data
*-----------*/
```
-### `JSON_ARRAY_APPEND`
+## `JSON_ARRAY_APPEND`
```sql
JSON_ARRAY_APPEND(
@@ -1766,7 +1818,7 @@ SELECT JSON_ARRAY_APPEND(JSON '{"a": 1}', '$.b', 2) AS json_data
*-----------*/
```
-### `JSON_ARRAY_INSERT`
+## `JSON_ARRAY_INSERT`
```sql
JSON_ARRAY_INSERT(
@@ -1953,7 +2005,7 @@ SELECT JSON_ARRAY_INSERT(JSON '1', '$[0]', 'r1') AS json_data
*-----------*/
```
-### `JSON_EXTRACT`
+## `JSON_EXTRACT`
Note: This function is deprecated. Consider using [JSON_QUERY][json-query].
@@ -2199,7 +2251,7 @@ SELECT JSON_EXTRACT(JSON '{"a": null}', "$.b"); -- Returns a SQL NULL
[differences-json-and-string]: #differences_json_and_string
-### `JSON_EXTRACT_ARRAY`
+## `JSON_EXTRACT_ARRAY`
Note: This function is deprecated. Consider using
[JSON_QUERY_ARRAY][json-query-array].
@@ -2403,7 +2455,7 @@ SELECT JSON_EXTRACT_ARRAY('{"a": "foo", "b": []}', '$.b') AS result;
[differences-json-and-string]: #differences_json_and_string
-### `JSON_EXTRACT_SCALAR`
+## `JSON_EXTRACT_SCALAR`
Note: This function is deprecated. Consider using [JSON_VALUE][json-value].
@@ -2513,7 +2565,7 @@ SELECT JSON_EXTRACT_SCALAR('{"a.b": {"c": "world"}}', "$['a.b'].c") AS hello;
[differences-json-and-string]: #differences_json_and_string
-### `JSON_EXTRACT_STRING_ARRAY`
+## `JSON_EXTRACT_STRING_ARRAY`
Note: This function is deprecated. Consider using
[JSON_VALUE_ARRAY][json-value-array].
@@ -2744,7 +2796,7 @@ SELECT JSON_EXTRACT_STRING_ARRAY('["world", 1, null]') AS result;
[differences-json-and-string]: #differences_json_and_string
-### `JSON_OBJECT`
+## `JSON_OBJECT`
+ [Signature 1](#json_object_signature1):
`JSON_OBJECT([json_key, json_value][, ...])`
@@ -3001,7 +3053,7 @@ SELECT JSON_OBJECT(['a', 'b'], CAST(NULL AS ARRAY)) AS json_data
[json-encodings]: #json_encodings
-### `JSON_QUERY`
+## `JSON_QUERY`
```sql
JSON_QUERY(json_string_expr, json_path)
@@ -3253,7 +3305,7 @@ SELECT JSON_QUERY(JSON '{"a": null}', "$.b"); -- Returns a SQL NULL
[JSONPath-mode]: #JSONPath_mode
-### `JSON_QUERY_ARRAY`
+## `JSON_QUERY_ARRAY`
```sql
JSON_QUERY_ARRAY(json_string_expr[, json_path])
@@ -3449,7 +3501,7 @@ SELECT JSON_QUERY_ARRAY('{"a": "foo", "b": []}', '$.b') AS result;
[differences-json-and-string]: #differences_json_and_string
-### `JSON_REMOVE`
+## `JSON_REMOVE`
```sql
JSON_REMOVE(json_expr, json_path[, ...])
@@ -3614,7 +3666,7 @@ SELECT JSON_REMOVE(JSON 'null', '$.a.b') AS json_data
*-----------*/
```
-### `JSON_SET`
+## `JSON_SET`
```sql
JSON_SET(
@@ -3904,7 +3956,7 @@ SELECT JSON_SET(
*---------------------------------------------------*/
```
-### `JSON_STRIP_NULLS`
+## `JSON_STRIP_NULLS`
```sql
JSON_STRIP_NULLS(
@@ -4088,7 +4140,7 @@ SELECT JSON_STRIP_NULLS(JSON 'null') AS json_data
*-----------*/
```
-### `JSON_TYPE`
+## `JSON_TYPE`
```sql
@@ -4146,7 +4198,7 @@ FROM
*----------------------------------+---------*/
```
-### `JSON_VALUE`
+## `JSON_VALUE`
```sql
JSON_VALUE(json_string_expr[, json_path])
@@ -4252,7 +4304,7 @@ SELECT JSON_VALUE('{"a.b": {"c": "world"}}', '$."a.b".c') AS hello;
[differences-json-and-string]: #differences_json_and_string
-### `JSON_VALUE_ARRAY`
+## `JSON_VALUE_ARRAY`
```sql
JSON_VALUE_ARRAY(json_string_expr[, json_path])
@@ -4479,7 +4531,7 @@ SELECT JSON_VALUE_ARRAY('["world", null, 1]') AS result;
[differences-json-and-string]: #differences_json_and_string
-### `LAX_BOOL`
+## `LAX_BOOL`
```sql
@@ -4632,7 +4684,7 @@ SELECT LAX_BOOL(JSON '-1.1') AS result;
*--------*/
```
-### `LAX_BOOL_ARRAY`
+## `LAX_BOOL_ARRAY`
```sql
@@ -4771,7 +4823,7 @@ SELECT LAX_BOOL_ARRAY(JSON 'true') AS result;
*--------*/
```
-### `LAX_DOUBLE`
+## `LAX_DOUBLE`
```sql
@@ -4994,7 +5046,7 @@ SELECT LAX_DOUBLE(JSON '"foo"') AS result;
*--------*/
```
-### `LAX_DOUBLE_ARRAY`
+## `LAX_DOUBLE_ARRAY`
```sql
@@ -5176,7 +5228,7 @@ SELECT LAX_DOUBLE_ARRAY(JSON '9.8') AS result;
*--------*/
```
-### `LAX_FLOAT`
+## `LAX_FLOAT`
```sql
@@ -5398,7 +5450,7 @@ SELECT LAX_FLOAT(JSON '"foo"') AS result;
*--------*/
```
-### `LAX_FLOAT_ARRAY`
+## `LAX_FLOAT_ARRAY`
```sql
@@ -5589,7 +5641,7 @@ SELECT LAX_FLOAT_ARRAY(JSON '9.8') AS result;
*--------*/
```
-### `LAX_INT32`
+## `LAX_INT32`
```sql
@@ -5801,7 +5853,7 @@ SELECT LAX_INT32(JSON '"foo"') AS result;
*--------*/
```
-### `LAX_INT32_ARRAY`
+## `LAX_INT32_ARRAY`
```sql
@@ -5960,7 +6012,7 @@ SELECT LAX_INT32_ARRAY(JSON '9.8') AS result;
*--------*/
```
-### `LAX_INT64`
+## `LAX_INT64`
```sql
@@ -6172,7 +6224,7 @@ SELECT LAX_INT64(JSON '"foo"') AS result;
*--------*/
```
-### `LAX_INT64_ARRAY`
+## `LAX_INT64_ARRAY`
```sql
@@ -6331,7 +6383,7 @@ SELECT LAX_INT64_ARRAY(JSON '9.8') AS result;
*--------*/
```
-### `LAX_STRING`
+## `LAX_STRING`
```sql
@@ -6469,7 +6521,7 @@ SELECT LAX_STRING(JSON '1e100') AS result;
*--------*/
```
-### `LAX_STRING_ARRAY`
+## `LAX_STRING_ARRAY`
```sql
@@ -6598,7 +6650,7 @@ SELECT LAX_STRING_ARRAY(JSON '9.8') AS result;
*--------*/
```
-### `LAX_UINT32`
+## `LAX_UINT32`
```sql
@@ -6820,7 +6872,7 @@ SELECT LAX_UINT32(JSON '"foo"') AS result;
*--------*/
```
-### `LAX_UINT32_ARRAY`
+## `LAX_UINT32_ARRAY`
```sql
@@ -6981,7 +7033,7 @@ SELECT LAX_UINT32_ARRAY(JSON '9.8') AS result;
*--------*/
```
-### `LAX_UINT64`
+## `LAX_UINT64`
```sql
@@ -7203,7 +7255,7 @@ SELECT LAX_UINT64(JSON '"foo"') AS result;
*--------*/
```
-### `LAX_UINT64_ARRAY`
+## `LAX_UINT64_ARRAY`
```sql
@@ -7362,7 +7414,7 @@ SELECT LAX_UINT64_ARRAY(JSON '9.8') AS result;
*--------*/
```
-### `PARSE_JSON`
+## `PARSE_JSON`
```sql
PARSE_JSON(
@@ -7467,7 +7519,76 @@ SELECT PARSE_JSON('"red"') AS json_data;
*------------------------------*/
```
-### `STRING`
+## `SAFE_TO_JSON`
+
+```sql
+SAFE_TO_JSON(sql_value)
+```
+
+**Description**
+
+Similar to the `TO_JSON` function, but for each unsupported field in the
+input argument, produces a JSON null instead of an error.
+
+Arguments:
+
++ `sql_value`: The SQL value to convert to a JSON value. You can review the
+ ZetaSQL data types that this function supports and their
+ [JSON encodings][json-encodings].
+
+**Return type**
+
+`JSON`
+
+**Example**
+
+The following queries are functionally the same, except that `SAFE_TO_JSON`
+produces a JSON null instead of an error when a hypothetical unsupported
+data type is encountered:
+
+```sql
+-- Produces a JSON null.
+SELECT SAFE_TO_JSON(CAST(b'' AS UNSUPPORTED_TYPE)) as result;
+```
+
+```sql
+-- Produces an error.
+SELECT TO_JSON(CAST(b'' AS UNSUPPORTED_TYPE), stringify_wide_numbers=>TRUE) as result;
+```
+
+In the following query, the value for `ut` is ignored because the value is an
+unsupported type:
+
+```sql
+SELECT SAFE_TO_JSON(STRUCT(CAST(b'' AS UNSUPPORTED_TYPE) AS ut) AS result;
+
+/*--------------*
+ | result |
+ +--------------+
+ | {"ut": null} |
+ *--------------*/
+```
+
+The following array produces a JSON null instead of an error because the data
+type for the array is not supported.
+
+```sql
+SELECT SAFE_TO_JSON([
+ CAST(b'' AS UNSUPPORTED_TYPE),
+ CAST(b'' AS UNSUPPORTED_TYPE),
+ CAST(b'' AS UNSUPPORTED_TYPE),
+ ]) AS result;
+
+/*------------*
+ | result |
+ +------------+
+ | null |
+ *------------*/
+```
+
+[json-encodings]: #json_encodings
+
+## `STRING`
```sql
@@ -7524,7 +7645,7 @@ SELECT STRING(JSON 'null') AS result; -- Throws an error
SELECT SAFE.STRING(JSON '123') AS result; -- Returns a SQL NULL
```
-### `STRING_ARRAY`
+## `STRING_ARRAY`
```sql
@@ -7571,7 +7692,7 @@ SELECT STRING_ARRAY(JSON '[null]') AS result; -- Throws an error
SELECT STRING_ARRAY(JSON 'null') AS result; -- Throws an error
```
-### `TO_JSON`
+## `TO_JSON`
```sql
TO_JSON(
@@ -7707,7 +7828,7 @@ FROM T1 AS t;
[json-encodings]: #json_encodings
-### `TO_JSON_STRING`
+## `TO_JSON_STRING`
```sql
TO_JSON_STRING(value[, pretty_print])
@@ -7763,7 +7884,7 @@ SELECT TO_JSON_STRING(STRUCT(1 AS id, [10,20] AS coordinates), true) AS json_dat
[json-encodings]: #json_encodings
-### `UINT32`
+## `UINT32`
```sql
@@ -7833,7 +7954,7 @@ SELECT UINT32(JSON 'null') AS result; -- Throws an error
SELECT SAFE.UINT32(JSON '"strawberry"') AS result; -- Returns a SQL NULL
```
-### `UINT32_ARRAY`
+## `UINT32_ARRAY`
```sql
@@ -7893,7 +8014,7 @@ SELECT UINT32_ARRAY(JSON '[null]') AS result; -- Throws an error
SELECT UINT32_ARRAY(JSON 'null') AS result; -- Throws an error
```
-### `UINT64`
+## `UINT64`
```sql
@@ -7963,7 +8084,7 @@ SELECT UINT64(JSON 'null') AS result; -- Throws an error
SELECT SAFE.UINT64(JSON '"strawberry"') AS result; -- Returns a SQL NULL
```
-### `UINT64_ARRAY`
+## `UINT64_ARRAY`
```sql
@@ -8023,6 +8144,106 @@ SELECT UINT64_ARRAY(JSON '[null]') AS result; -- Throws an error
SELECT UINT64_ARRAY(JSON 'null') AS result; -- Throws an error
```
+## Supplemental materials
+
+### Differences between the JSON and JSON-formatted STRING types
+
+
+Many JSON functions accept two input types:
+
++ [`JSON`][JSON-type] type
++ `STRING` type
+
+The `STRING` version of the extraction functions behaves differently than the
+`JSON` version, mainly because `JSON` type values are always validated whereas
+JSON-formatted `STRING` type values are not.
+
+#### Non-validation of `STRING` inputs
+
+The following `STRING` is invalid JSON because it is missing a trailing `}`:
+
+```
+{"hello": "world"
+```
+
+The JSON function reads the input from the beginning and stops as soon as the
+field to extract is found, without reading the remainder of the input. A parsing
+error is not produced.
+
+With the `JSON` type, however, `JSON '{"hello": "world"'` returns a parsing
+error.
+
+For example:
+
+```sql
+SELECT JSON_VALUE('{"hello": "world"', "$.hello") AS hello;
+
+/*-------*
+ | hello |
+ +-------+
+ | world |
+ *-------*/
+```
+
+```sql
+SELECT JSON_VALUE(JSON '{"hello": "world"', "$.hello") AS hello;
+-- An error is returned: Invalid JSON literal: syntax error while parsing
+-- object - unexpected end of input; expected '}'
+```
+
+#### No strict validation of extracted values
+
+In the following examples, duplicated keys are not removed when using a
+JSON-formatted string. Similarly, keys order is preserved. For the `JSON`
+type, `JSON '{"key": 1, "key": 2}'` will result in `JSON '{"key":1}'` during
+parsing.
+
+```sql
+SELECT JSON_QUERY('{"key": 1, "key": 2}', "$") AS string;
+
+/*-------------------*
+ | string |
+ +-------------------+
+ | {"key":1,"key":2} |
+ *-------------------*/
+```
+
+```sql
+SELECT JSON_QUERY(JSON '{"key": 1, "key": 2}', "$") AS json;
+
+/*-----------*
+ | json |
+ +-----------+
+ | {"key":1} |
+ *-----------*/
+```
+
+#### JSON `null`
+
+When using a JSON-formatted `STRING` type in a JSON function, a JSON `null`
+value is extracted as a SQL `NULL` value.
+
+When using a JSON type in a JSON function, a JSON `null` value returns a JSON
+`null` value.
+
+```sql
+WITH t AS (
+ SELECT '{"name": null}' AS json_string, JSON '{"name": null}' AS json)
+SELECT JSON_QUERY(json_string, "$.name") AS name_string,
+ JSON_QUERY(json_string, "$.name") IS NULL AS name_string_is_null,
+ JSON_QUERY(json, "$.name") AS name_json,
+ JSON_QUERY(json, "$.name") IS NULL AS name_json_is_null
+FROM t;
+
+/*-------------+---------------------+-----------+-------------------*
+ | name_string | name_string_is_null | name_json | name_json_is_null |
+ +-------------+---------------------+-----------+-------------------+
+ | NULL | true | null | false |
+ *-------------+---------------------+-----------+-------------------*/
+```
+
+[JSON-type]: https://github.com/google/zetasql/blob/master/docs/data-types.md#json_type
+
### JSON encodings
@@ -8162,6 +8383,26 @@ The following SQL to JSON encodings are supported:
+
+ INTERVAL |
+ string |
+ SQL input: INTERVAL '10:20:30.52' HOUR TO SECOND
+ JSON output: "PT10H20M30.52S"
+
+ SQL input: INTERVAL 1 SECOND
+ JSON output: "PT1S"
+
+ INTERVAL -25 MONTH
+ JSON output: "P-2Y-1M"
+
+ INTERVAL '1 5:30' DAY TO MINUTE
+ JSON output: "P1DT5H30M"
+ |
+
+
+
+
+
NUMERIC
@@ -8476,6 +8717,54 @@ The following SQL to JSON encodings are supported:
+ |
+ GRAPH_ELEMENT |
+
+ object
+
+ The object can contain zero or more key-value pairs.
+ Each value is formatted according to its type.
+
+
+ For TO_JSON , graph
+ element (node or edge) objects are supported.
+
+
+ -
+ The graph element identifier is only valid within the scope of the
+ same query response and cannot be used to correlate entities across
+ different queries.
+
+ -
+ Field names that aren't valid UTF-8 might result in unparseable
+ JSON.
+
+ -
+ The result may include internal key-value pairs that are not defined
+ by the users.
+
+ -
+ The conversion can fail if the object contains values of unsupported
+ types.
+
+
+ |
+
+ SQL:
+
+GRAPH FinGraph
+MATCH (p:Person WHERE p.name = 'Dana')
+RETURN TO_JSON(p) AS dana_json;
+
+ JSON output (truncated):
+
+{"identifier":"ZGFuYQ==","kind":"node","labels":["Person"],"properties":{"id":2,"name":"Dana"}}
+ |
+
+
+
+
+
RANGE |
@@ -8618,105 +8907,3 @@ The JSONPath format supports these operators:
-### Differences between the JSON and JSON-formatted STRING types
-
-
-Many JSON functions accept two input types:
-
-+ [`JSON`][JSON-type] type
-+ `STRING` type
-
-The `STRING` version of the extraction functions behaves differently than the
-`JSON` version, mainly because `JSON` type values are always validated whereas
-JSON-formatted `STRING` type values are not.
-
-#### Non-validation of `STRING` inputs
-
-The following `STRING` is invalid JSON because it is missing a trailing `}`:
-
-```
-{"hello": "world"
-```
-
-The JSON function reads the input from the beginning and stops as soon as the
-field to extract is found, without reading the remainder of the input. A parsing
-error is not produced.
-
-With the `JSON` type, however, `JSON '{"hello": "world"'` returns a parsing
-error.
-
-For example:
-
-```sql
-SELECT JSON_VALUE('{"hello": "world"', "$.hello") AS hello;
-
-/*-------*
- | hello |
- +-------+
- | world |
- *-------*/
-```
-
-```sql
-SELECT JSON_VALUE(JSON '{"hello": "world"', "$.hello") AS hello;
--- An error is returned: Invalid JSON literal: syntax error while parsing
--- object - unexpected end of input; expected '}'
-```
-
-#### No strict validation of extracted values
-
-In the following examples, duplicated keys are not removed when using a
-JSON-formatted string. Similarly, keys order is preserved. For the `JSON`
-type, `JSON '{"key": 1, "key": 2}'` will result in `JSON '{"key":1}'` during
-parsing.
-
-```sql
-SELECT JSON_QUERY('{"key": 1, "key": 2}', "$") AS string;
-
-/*-------------------*
- | string |
- +-------------------+
- | {"key":1,"key":2} |
- *-------------------*/
-```
-
-```sql
-SELECT JSON_QUERY(JSON '{"key": 1, "key": 2}', "$") AS json;
-
-/*-----------*
- | json |
- +-----------+
- | {"key":1} |
- *-----------*/
-```
-
-#### JSON `null`
-
-When using a JSON-formatted `STRING` type in a JSON function, a JSON `null`
-value is extracted as a SQL `NULL` value.
-
-When using a JSON type in a JSON function, a JSON `null` value returns a JSON
-`null` value.
-
-```sql
-WITH t AS (
- SELECT '{"name": null}' AS json_string, JSON '{"name": null}' AS json)
-SELECT JSON_QUERY(json_string, "$.name") AS name_string,
- JSON_QUERY(json_string, "$.name") IS NULL AS name_string_is_null,
- JSON_QUERY(json, "$.name") AS name_json,
- JSON_QUERY(json, "$.name") IS NULL AS name_json_is_null
-FROM t;
-
-/*-------------+---------------------+-----------+-------------------*
- | name_string | name_string_is_null | name_json | name_json_is_null |
- +-------------+---------------------+-----------+-------------------+
- | NULL | true | null | false |
- *-------------+---------------------+-----------+-------------------*/
-```
-
-[JSONPath-format]: #JSONPath_format
-
-[JSON-type]: https://github.com/google/zetasql/blob/master/docs/data-types.md#json_type
-
-[JSONPath-mode]: #JSONPath_mode
-
diff --git a/docs/lexical.md b/docs/lexical.md
index fb3d7b4bf..59915cb14 100644
--- a/docs/lexical.md
+++ b/docs/lexical.md
@@ -1179,7 +1179,7 @@ ZetaSQL follows these rules for case sensitivity:
| String values |
Yes |
- Includes enum value strings
+ Any value of type STRING preserves its case. For example, the result of an expression that produces a STRING value or a column value that's of type STRING .
|
@@ -1206,6 +1206,22 @@ ZetaSQL follows these rules for case sensitivity:
|
+
+ Property graph names |
+ No |
+ |
+
+
+ Property graph label names |
+ No |
+ |
+
+
+ Property graph property names |
+ No |
+ |
+
+
@@ -1262,7 +1278,7 @@ FOLLOWING
FOR
FROM
FULL
-
+GRAPH_TABLE
GROUP
GROUPING
GROUPS
diff --git a/docs/mathematical_functions.md b/docs/mathematical_functions.md
index d6d6170ad..5660216a7 100644
--- a/docs/mathematical_functions.md
+++ b/docs/mathematical_functions.md
@@ -10,7 +10,7 @@ All mathematical functions have the following behaviors:
+ They return `NULL` if any of the input parameters is `NULL`.
+ They return `NaN` if any of the arguments is `NaN`.
-### Categories
+## Categories
@@ -155,7 +155,7 @@ All mathematical functions have the following behaviors:
-### Function list
+## Function list
@@ -167,8 +167,7 @@ All mathematical functions have the following behaviors:
- ABS
-
+ | ABS
|
Computes the absolute value of X .
@@ -176,8 +175,7 @@ All mathematical functions have the following behaviors:
|
- ACOS
-
+ | ACOS
|
Computes the inverse cosine of X .
@@ -185,8 +183,7 @@ All mathematical functions have the following behaviors:
|
- ACOSH
-
+ | ACOSH
|
Computes the inverse hyperbolic cosine of X .
@@ -194,8 +191,7 @@ All mathematical functions have the following behaviors:
|
- ASIN
-
+ | ASIN
|
Computes the inverse sine of X .
@@ -203,8 +199,7 @@ All mathematical functions have the following behaviors:
|
- ASINH
-
+ | ASINH
|
Computes the inverse hyperbolic sine of X .
@@ -212,8 +207,7 @@ All mathematical functions have the following behaviors:
|
- ATAN
-
+ | ATAN
|
Computes the inverse tangent of X .
@@ -221,8 +215,7 @@ All mathematical functions have the following behaviors:
|
- ATAN2
-
+ | ATAN2
|
Computes the inverse tangent of X/Y , using the signs of
@@ -231,8 +224,7 @@ All mathematical functions have the following behaviors:
|
- ATANH
-
+ | ATANH
|
Computes the inverse hyperbolic tangent of X .
@@ -240,17 +232,38 @@ All mathematical functions have the following behaviors:
|
- CBRT
+ | AVG
+ |
+
+ Gets the average of non-NULL values.
+ For more information, see Aggregate functions.
+ |
+
+
+
+ AVG (Differential Privacy)
|
- Computes the cube root of X .
+ DIFFERENTIAL_PRIVACY -supported AVG .
+ Gets the differentially-private average of non-NULL ,
+ non-NaN values in a query with a
+ DIFFERENTIAL_PRIVACY clause.
+
For more information, see Differential privacy functions.
+
|
- CEIL
+ | CBRT
+ |
+
+ Computes the cube root of X .
+ |
+
+
+ CEIL
|
Gets the smallest integral value that is not less than X .
@@ -258,8 +271,7 @@ All mathematical functions have the following behaviors:
|
- CEILING
-
+ | CEILING
|
Synonym of CEIL .
@@ -267,8 +279,7 @@ All mathematical functions have the following behaviors:
|
- COS
-
+ | COS
|
Computes the cosine of X .
@@ -276,8 +287,7 @@ All mathematical functions have the following behaviors:
|
- COSH
-
+ | COSH
|
Computes the hyperbolic cosine of X .
@@ -285,15 +295,13 @@ All mathematical functions have the following behaviors:
|
- COSINE_DISTANCE
-
+ | COSINE_DISTANCE
|
Computes the cosine distance between two vectors. |
- COT
-
+ | COT
|
Computes the cotangent of X .
@@ -301,8 +309,7 @@ All mathematical functions have the following behaviors:
|
- COTH
-
+ | COTH
|
Computes the hyperbolic cotangent of X .
@@ -310,8 +317,7 @@ All mathematical functions have the following behaviors:
|
- CSC
-
+ | CSC
|
Computes the cosecant of X .
@@ -319,8 +325,7 @@ All mathematical functions have the following behaviors:
|
- CSCH
-
+ | CSCH
|
Computes the hyperbolic cosecant of X .
@@ -328,8 +333,7 @@ All mathematical functions have the following behaviors:
|
- DIV
-
+ | DIV
|
Divides integer X by integer Y .
@@ -337,8 +341,7 @@ All mathematical functions have the following behaviors:
|
- EXP
-
+ | EXP
|
Computes e to the power of X .
@@ -346,15 +349,13 @@ All mathematical functions have the following behaviors:
|
- EUCLIDEAN_DISTANCE
-
+ | EUCLIDEAN_DISTANCE
|
Computes the Euclidean distance between two vectors. |
- FLOOR
-
+ | FLOOR
|
Gets the largest integral value that is not greater than X .
@@ -362,8 +363,7 @@ All mathematical functions have the following behaviors:
|
- GREATEST
-
+ | GREATEST
|
Gets the greatest value among X1,...,XN .
@@ -371,8 +371,7 @@ All mathematical functions have the following behaviors:
|
- IEEE_DIVIDE
-
+ | IEEE_DIVIDE
|
Divides X by Y , but does not generate errors for
@@ -381,8 +380,7 @@ All mathematical functions have the following behaviors:
|
- IS_INF
-
+ | IS_INF
|
Checks if X is positive or negative infinity.
@@ -390,8 +388,7 @@ All mathematical functions have the following behaviors:
|
- IS_NAN
-
+ | IS_NAN
|
Checks if X is a NaN value.
@@ -399,8 +396,7 @@ All mathematical functions have the following behaviors:
|
- LEAST
-
+ | LEAST
|
Gets the least value among X1,...,XN .
@@ -408,8 +404,7 @@ All mathematical functions have the following behaviors:
|
- LN
-
+ | LN
|
Computes the natural logarithm of X .
@@ -417,8 +412,7 @@ All mathematical functions have the following behaviors:
|
- LOG
-
+ | LOG
|
Computes the natural logarithm of X or the logarithm of
@@ -427,8 +421,7 @@ All mathematical functions have the following behaviors:
|
- LOG10
-
+ | LOG10
|
Computes the natural logarithm of X to base 10.
@@ -436,8 +429,17 @@ All mathematical functions have the following behaviors:
|
- MOD
+ | MAX
+ |
+
+ Gets the maximum non-NULL value.
+ For more information, see Aggregate functions.
+
+ |
+
+
+ MOD
|
Gets the remainder of the division of X by Y .
@@ -445,8 +447,7 @@ All mathematical functions have the following behaviors:
|
- PI
-
+ | PI
|
Produces the mathematical constant π as a
@@ -455,8 +456,7 @@ All mathematical functions have the following behaviors:
|
- PI_BIGNUMERIC
-
+ | PI_BIGNUMERIC
|
Produces the mathematical constant π as a BIGNUMERIC value.
@@ -464,8 +464,7 @@ All mathematical functions have the following behaviors:
|
- PI_NUMERIC
-
+ | PI_NUMERIC
|
Produces the mathematical constant π as a NUMERIC value.
@@ -473,8 +472,7 @@ All mathematical functions have the following behaviors:
|
- POW
-
+ | POW
|
Produces the value of X raised to the power of Y .
@@ -482,8 +480,7 @@ All mathematical functions have the following behaviors:
|
- POWER
-
+ | POWER
|
Synonym of POW .
@@ -491,8 +488,7 @@ All mathematical functions have the following behaviors:
|
- RAND
-
+ | RAND
|
Generates a pseudo-random value of type
@@ -502,18 +498,17 @@ All mathematical functions have the following behaviors:
|
- RANGE_BUCKET
-
+ | RANGE_BUCKET
|
Scans through a sorted array and returns the 0-based position
of a point's upper bound.
+
|
- ROUND
-
+ | ROUND
|
Rounds X to the nearest integer or rounds X
@@ -522,8 +517,7 @@ All mathematical functions have the following behaviors:
|
- SAFE_ADD
-
+ | SAFE_ADD
|
Equivalent to the addition operator (X + Y ), but returns
@@ -532,8 +526,7 @@ All mathematical functions have the following behaviors:
|
- SAFE_DIVIDE
-
+ | SAFE_DIVIDE
|
Equivalent to the division operator (X / Y ), but returns
@@ -542,8 +535,7 @@ All mathematical functions have the following behaviors:
|
- SAFE_MULTIPLY
-
+ | SAFE_MULTIPLY
|
Equivalent to the multiplication operator (X * Y ),
@@ -552,8 +544,7 @@ All mathematical functions have the following behaviors:
|
- SAFE_NEGATE
-
+ | SAFE_NEGATE
|
Equivalent to the unary minus operator (-X ), but returns
@@ -562,8 +553,7 @@ All mathematical functions have the following behaviors:
|
- SAFE_SUBTRACT
-
+ | SAFE_SUBTRACT
|
Equivalent to the subtraction operator (X - Y ), but
@@ -572,8 +562,7 @@ All mathematical functions have the following behaviors:
|
- SEC
-
+ | SEC
|
Computes the secant of X .
@@ -581,8 +570,7 @@ All mathematical functions have the following behaviors:
|
- SECH
-
+ | SECH
|
Computes the hyperbolic secant of X .
@@ -590,8 +578,7 @@ All mathematical functions have the following behaviors:
|
- SIGN
-
+ | SIGN
|
Produces -1 , 0, or +1 for negative, zero, and positive arguments
@@ -600,8 +587,7 @@ All mathematical functions have the following behaviors:
|
- SIN
-
+ | SIN
|
Computes the sine of X .
@@ -609,8 +595,7 @@ All mathematical functions have the following behaviors:
|
- SINH
-
+ | SINH
|
Computes the hyperbolic sine of X .
@@ -618,8 +603,7 @@ All mathematical functions have the following behaviors:
|
- SQRT
-
+ | SQRT
|
Computes the square root of X .
@@ -627,17 +611,38 @@ All mathematical functions have the following behaviors:
|
- TAN
+ | SUM
+ |
+
+ Gets the sum of non-NULL values.
+ For more information, see Aggregate functions.
+ |
+
+
+
+ SUM (Differential Privacy)
|
- Computes the tangent of X .
+ DIFFERENTIAL_PRIVACY -supported SUM .
+ Gets the differentially-private sum of non-NULL ,
+ non-NaN values in a query with a
+ DIFFERENTIAL_PRIVACY clause.
+
For more information, see Differential privacy functions.
+
|
- TANH
+ | TAN
+ |
+
+ Computes the tangent of X .
+ |
+
+
+ TANH
|
Computes the hyperbolic tangent of X .
@@ -645,8 +650,7 @@ All mathematical functions have the following behaviors:
|
- TRUNC
-
+ | TRUNC
|
Rounds a number like ROUND(X) or ROUND(X, N) ,
@@ -657,7 +661,7 @@ All mathematical functions have the following behaviors:
|
-### `ABS`
+## `ABS`
```
ABS(X)
@@ -711,7 +715,7 @@ largest negative input value, which has no positive representation.
-### `ACOS`
+## `ACOS`
```
ACOS(X)
@@ -754,7 +758,7 @@ range [-1, 1].
-### `ACOSH`
+## `ACOSH`
```
ACOSH(X)
@@ -792,7 +796,7 @@ less than 1.
-### `ASIN`
+## `ASIN`
```
ASIN(X)
@@ -835,7 +839,7 @@ the range [-1, 1].
-### `ASINH`
+## `ASINH`
```
ASINH(X)
@@ -868,7 +872,7 @@ Computes the inverse hyperbolic sine of X. Does not fail.
-### `ATAN`
+## `ATAN`
```
ATAN(X)
@@ -902,7 +906,7 @@ in the range [-π/2,π/2]. Does not fail.
-### `ATAN2`
+## `ATAN2`
```
ATAN2(X, Y)
@@ -986,7 +990,7 @@ the two arguments to determine the quadrant. The return value is in the range
-### `ATANH`
+## `ATANH`
```
ATANH(X)
@@ -1028,7 +1032,7 @@ of the range (-1, 1).
-### `CBRT`
+## `CBRT`
```
CBRT(X)
@@ -1089,7 +1093,7 @@ SELECT CBRT(27) AS cube_root;
[conversion-rules]: https://github.com/google/zetasql/blob/master/docs/conversion_rules.md#conversion_rules
-### `CEIL`
+## `CEIL`
```
CEIL(X)
@@ -1169,7 +1173,7 @@ Returns the smallest integral value that is not less than X.
-### `CEILING`
+## `CEILING`
```
CEILING(X)
@@ -1179,7 +1183,7 @@ CEILING(X)
Synonym of CEIL(X)
-### `COS`
+## `COS`
```
COS(X)
@@ -1212,7 +1216,7 @@ Computes the cosine of X where X is specified in radians. Never fails.
-### `COSH`
+## `COSH`
```
COSH(X)
@@ -1246,7 +1250,7 @@ Generates an error if overflow occurs.
-### `COSINE_DISTANCE`
+## `COSINE_DISTANCE`
```sql
COSINE_DISTANCE(vector1, vector2)
@@ -1319,7 +1323,7 @@ Computes the [cosine distance][wiki-cosine-distance] between two vectors.
```sql
-- sparse vector ARRAY>
- [(1, 10.0), (2: 30.0), (5, 40.0)]
+ [(1, 10.0), (2, 30.0), (5, 40.0)]
```
```sql
@@ -1331,11 +1335,11 @@ Computes the [cosine distance][wiki-cosine-distance] between two vectors.
particular order. The following sparse vectors are equivalent:
```sql
- [('a', 10.0), ('b': 30.0), ('d': 40.0)]
+ [('a', 10.0), ('b', 30.0), ('d', 40.0)]
```
```sql
- [('d': 40.0), ('a', 10.0), ('b': 30.0)]
+ [('d', 40.0), ('a', 10.0), ('b', 30.0)]
```
+ Both non-sparse vectors
in this function must share the same dimensions, and if they don't, an error
@@ -1437,7 +1441,7 @@ SELECT COSINE_DISTANCE(
[wiki-cosine-distance]: https://en.wikipedia.org/wiki/Cosine_similarity#Cosine_distance
-### `COT`
+## `COT`
```
COT(X)
@@ -1499,7 +1503,7 @@ SELECT COT(1) AS a, SAFE.COT(0) AS b;
[conversion-rules]: https://github.com/google/zetasql/blob/master/docs/conversion_rules.md#conversion_rules
-### `COTH`
+## `COTH`
```
COTH(X)
@@ -1561,7 +1565,7 @@ SELECT COTH(1) AS a, SAFE.COTH(0) AS b;
[conversion-rules]: https://github.com/google/zetasql/blob/master/docs/conversion_rules.md#conversion_rules
-### `CSC`
+## `CSC`
```
CSC(X)
@@ -1623,7 +1627,7 @@ SELECT CSC(100) AS a, CSC(-1) AS b, SAFE.CSC(0) AS c;
[conversion-rules]: https://github.com/google/zetasql/blob/master/docs/conversion_rules.md#conversion_rules
-### `CSCH`
+## `CSCH`
```
CSCH(X)
@@ -1685,7 +1689,7 @@ SELECT CSCH(0.5) AS a, CSCH(-2) AS b, SAFE.CSCH(0) AS c;
[conversion-rules]: https://github.com/google/zetasql/blob/master/docs/conversion_rules.md#conversion_rules
-### `DIV`
+## `DIV`
```
DIV(X, Y)
@@ -1755,7 +1759,7 @@ table.
-### `EXP`
+## `EXP`
```
EXP(X)
@@ -1805,7 +1809,7 @@ result overflows.
-### `EUCLIDEAN_DISTANCE`
+## `EUCLIDEAN_DISTANCE`
```sql
EUCLIDEAN_DISTANCE(vector1, vector2)
@@ -1878,7 +1882,7 @@ Computes the [Euclidean distance][wiki-euclidean-distance] between two vectors.
```sql
-- sparse vector ARRAY>
- [(1, 10.0), (2: 30.0), (5, 40.0)]
+ [(1, 10.0), (2, 30.0), (5, 40.0)]
```
```sql
@@ -1890,11 +1894,11 @@ Computes the [Euclidean distance][wiki-euclidean-distance] between two vectors.
particular order. The following sparse vectors are equivalent:
```sql
- [('a', 10.0), ('b': 30.0), ('d': 40.0)]
+ [('a', 10.0), ('b', 30.0), ('d', 40.0)]
```
```sql
- [('d': 40.0), ('a', 10.0), ('b': 30.0)]
+ [('d', 40.0), ('a', 10.0), ('b', 30.0)]
```
+ Both non-sparse vectors
in this function must share the same dimensions, and if they don't, an error
@@ -1983,7 +1987,7 @@ SELECT EUCLIDEAN_DISTANCE(
[wiki-euclidean-distance]: https://en.wikipedia.org/wiki/Euclidean_distance
-### `FLOOR`
+## `FLOOR`
```
FLOOR(X)
@@ -2063,7 +2067,7 @@ Returns the largest integral value that is not greater than X.
-### `GREATEST`
+## `GREATEST`
```
GREATEST(X1,...,XN)
@@ -2101,7 +2105,7 @@ This function supports specifying [collation][collation].
Data type of the input values.
-### `IEEE_DIVIDE`
+## `IEEE_DIVIDE`
```
IEEE_DIVIDE(X, Y)
@@ -2172,7 +2176,7 @@ this function does not generate errors for division by zero or overflow.
-### `IS_INF`
+## `IS_INF`
```
IS_INF(X)
@@ -2205,7 +2209,7 @@ Returns `TRUE` if the value is positive or negative infinity.
-### `IS_NAN`
+## `IS_NAN`
```
IS_NAN(X)
@@ -2234,7 +2238,7 @@ Returns `TRUE` if the value is a `NaN` value.
-### `LEAST`
+## `LEAST`
```
LEAST(X1,...,XN)
@@ -2272,7 +2276,7 @@ This function supports specifying [collation][collation].
Data type of the input values.
-### `LN`
+## `LN`
```
LN(X)
@@ -2300,7 +2304,7 @@ equal to zero.
+inf |
- X < 0 |
+ X <= 0 |
Error |
@@ -2321,7 +2325,7 @@ equal to zero.
-### `LOG`
+## `LOG`
```
LOG(X [, Y])
@@ -2406,7 +2410,7 @@ If only X is present, `LOG` is a synonym of `LN`. If Y is also present,
-### `LOG10`
+## `LOG10`
```
LOG10(X)
@@ -2458,7 +2462,7 @@ Similar to `LOG`, but computes logarithm to base 10.
-### `MOD`
+## `MOD`
```
MOD(X, Y)
@@ -2512,7 +2516,7 @@ table.
-### `PI`
+## `PI`
```sql
PI()
@@ -2539,7 +2543,7 @@ SELECT PI() AS pi
*--------------------*/
```
-### `PI_BIGNUMERIC`
+## `PI_BIGNUMERIC`
```sql
PI_BIGNUMERIC()
@@ -2565,7 +2569,7 @@ SELECT PI_BIGNUMERIC() AS pi
*-----------------------------------------*/
```
-### `PI_NUMERIC`
+## `PI_NUMERIC`
```sql
PI_NUMERIC()
@@ -2591,7 +2595,7 @@ SELECT PI_NUMERIC() AS pi
*-------------*/
```
-### `POW`
+## `POW`
```
POW(X, Y)
@@ -2714,7 +2718,7 @@ table.
-### `POWER`
+## `POWER`
```
POWER(X, Y)
@@ -2726,7 +2730,7 @@ Synonym of [`POW(X, Y)`][pow].
[pow]: #pow
-### `RAND`
+## `RAND`
```
RAND()
@@ -2737,7 +2741,7 @@ RAND()
Generates a pseudo-random value of type `DOUBLE` in
the range of [0, 1), inclusive of 0 and exclusive of 1.
-### `RANGE_BUCKET`
+## `RANGE_BUCKET`
```sql
RANGE_BUCKET(point, boundaries_array)
@@ -2852,7 +2856,7 @@ GROUP BY 1
[data-type-properties]: https://github.com/google/zetasql/blob/master/docs/data-types.md#data_type_properties
-### `ROUND`
+## `ROUND`
```
ROUND(X [, N [, rounding_mode]])
@@ -2983,7 +2987,7 @@ then the function generates an error.
[round-half-even]: https://en.wikipedia.org/wiki/Rounding#Rounding_half_to_even
-### `SAFE_ADD`
+## `SAFE_ADD`
```
SAFE_ADD(X, Y)
@@ -3033,7 +3037,7 @@ Equivalent to the addition operator (`+`), but returns
-### `SAFE_DIVIDE`
+## `SAFE_DIVIDE`
```
SAFE_DIVIDE(X, Y)
@@ -3093,7 +3097,7 @@ Equivalent to the division operator (`X / Y`), but returns
-### `SAFE_MULTIPLY`
+## `SAFE_MULTIPLY`
```
SAFE_MULTIPLY(X, Y)
@@ -3143,7 +3147,7 @@ Equivalent to the multiplication operator (`*`), but returns
-### `SAFE_NEGATE`
+## `SAFE_NEGATE`
```
SAFE_NEGATE(X)
@@ -3192,7 +3196,7 @@ Equivalent to the unary minus operator (`-`), but returns
-### `SAFE_SUBTRACT`
+## `SAFE_SUBTRACT`
```
SAFE_SUBTRACT(X, Y)
@@ -3243,7 +3247,7 @@ Equivalent to the subtraction operator (`-`), but returns
-### `SEC`
+## `SEC`
```
SEC(X)
@@ -3300,7 +3304,7 @@ SELECT SEC(100) AS a, SEC(-1) AS b;
[conversion-rules]: https://github.com/google/zetasql/blob/master/docs/conversion_rules.md#conversion_rules
-### `SECH`
+## `SECH`
```
SECH(X)
@@ -3358,7 +3362,7 @@ SELECT SECH(0.5) AS a, SECH(-2) AS b, SECH(100) AS c;
[conversion-rules]: https://github.com/google/zetasql/blob/master/docs/conversion_rules.md#conversion_rules
-### `SIGN`
+## `SIGN`
```
SIGN(X)
@@ -3412,7 +3416,7 @@ between positive and negative zero.
-### `SIN`
+## `SIN`
```
SIN(X)
@@ -3445,7 +3449,7 @@ Computes the sine of X where X is specified in radians. Never fails.
-### `SINH`
+## `SINH`
```
SINH(X)
@@ -3479,7 +3483,7 @@ an error if overflow occurs.
-### `SQRT`
+## `SQRT`
```
SQRT(X)
@@ -3527,7 +3531,7 @@ Computes the square root of X. Generates an error if X is less than 0.
-### `TAN`
+## `TAN`
```
TAN(X)
@@ -3561,7 +3565,7 @@ overflow occurs.
-### `TANH`
+## `TANH`
```
TANH(X)
@@ -3595,7 +3599,7 @@ fail.
-### `TRUNC`
+## `TRUNC`
```
TRUNC(X [, N])
diff --git a/docs/modules.md b/docs/modules.md
deleted file mode 100644
index 14aa8bd00..000000000
--- a/docs/modules.md
+++ /dev/null
@@ -1,372 +0,0 @@
-
-
-
-
-# Modules
-
-In ZetaSQL, a module is a collection of reusable [data definition
-language (DDL)][ddl] statements. The purpose of a module is to keep related
-logic in one location that you and other users can maintain centrally and
-reference repeatedly.
-
-Modules have the following key characteristics:
-
-+ Modules can't create permanent objects, such as tables and views, or use
- [data manipulation language (DML)][dml] statements to insert or modify data.
-+ Each module has its own namespace. This namespace is empty until `CREATE`
- statements in the module add objects to the module namespace.
-+ Modules allow public and private object definitions for proper
- encapsulation.
-
-ZetaSQL limits the duration and side effects of module objects to the
-invoking *session*. In the context of ZetaSQL modules, a session is a
-set of related statements and objects that form a [unit of work][unit-of-work]{: .external}.
-
-## Create a module
-
-To create a module, you create a file that contains a `MODULE` statement and
-subsequent `IMPORT` and `CREATE` statements. The file extension `.sqlm` must
-appear at the end of the filename of the module file.
-
-Modules support the following statements:
-
-+ `MODULE`
-+ `IMPORT MODULE`
-+ `CREATE { PUBLIC | PRIVATE } [ { TABLE | AGGREGATE } ] FUNCTION`
-+ `CREATE { PUBLIC | PRIVATE } CONSTANT`
-
-Modules do not support statements that return results or have side effects.
-Modules only support defining an object once and do not support modifying an
-object after it is defined.
-
-### Declare a module
-
-The first statement in a module must be a valid `MODULE` statement which defines
-the module name:
-
-
-MODULE identifier_path [ OPTIONS (...)];
-
-
-Each module file must contain only one `MODULE` statement.
-
-By convention, an `IMPORT` statement for this module will generally include the
-`identifier_path` in the `IMPORT` statement. For clarity, the `identifier_path`
-should reflect the path to the module file that contains the `MODULE` statement.
-
-For example, if a module file is stored at `search_path/x/y/z.sqlm`,
-then the `MODULE` statement will be:
-
-```sql
-MODULE x.y.z;
-```
-
-And the `IMPORT` statement will be:
-
-```sql
-IMPORT MODULE x.y.z;
-```
-
-Caveats:
-
-+ The `IMPORT` statement should not include the `.sqlm` file extension.
-+ If you import module `x.y.z`, ZetaSQL looks for the module at
- `search_path/x/y/z.sqlm`. If the module is not found, ZetaSQL looks for
- the module at `search_path/x/y/z/z.sqlm` and you can import it with either
- `IMPORT MODULE x.y.z` or `IMPORT MODULE x.y.z.z`.
-
-### Create objects within modules
-
-Modules can contain `CREATE` statements to create objects within the module.
-
-#### Specify public vs. private objects
-
-All `CREATE` statements must indicate if the created object is available outside
-of the module in the importing session (public), or only available internally
-within the module (private). To specify these properties, use the `PUBLIC` or
-`PRIVATE` modifier in the `CREATE` statement.
-
-**Examples**
-
-The following example creates a public function, which the invoking session can
-execute.
-
-```sql
-CREATE PUBLIC FUNCTION Foo(a INT64)
-AS (
- a + 1
-);
-```
-
-The following example creates a private function, which only statements within
-the same module can execute.
-
-```sql
-CREATE PRIVATE FUNCTION Bar(b INT64)
-AS (
- b - 1
-);
-```
-
-#### Create constants
-
-Modules support the creation of [constants][create-constant].
-
-The `TEMP` keyword is not allowed when creating a constant in a module.
-`TEMP` objects in a module are not meaningful, since the lifetime of the object
-is the lifetime of the module.
-
-**Example**
-
-Create a constant, `DEFAULT_HEIGHT`:
-
-```sql
-CREATE PUBLIC CONSTANT DEFAULT_HEIGHT = 25;
-```
-
-Use it in a statement:
-
-```sql
-SELECT (DEFAULT_HEIGHT + 5) AS result;
-
-/*--------*
- | result |
- +--------+
- | 30 |
- *--------*/
-```
-
-#### Create UDFs and TVFs
-
-Modules support creation of UDFs ([user-defined
-functions][user-defined-functions]), including TVFs ([table-valued
-functions][table-valued-functions]) with scalar and templated arguments.
-
-The `TEMP` keyword is not allowed in `CREATE ( PUBLIC | PRIVATE ) FUNCTION`
-statements in modules. `TEMP` objects are not meaningful within a module since
-the lifetime of the object is the lifetime of the module.
-
-Note that SQL UDFs/TVFs defined in modules cannot directly access any database
-schema tables, and therefore cannot rely on the existence of tables in a
-database. To reference a database table in a module TVF, the table must be
-passed in as a TVF argument of type `ANY TABLE`.
-
-**Examples**
-
-The following example creates a public UDF.
-
-```sql
-CREATE PUBLIC FUNCTION SampleUdf(a INT64)
-AS (
- a + 1
-);
-```
-
-The following example creates a public templated UDF with a scalar argument.
-
-```sql
-CREATE PUBLIC FUNCTION ScalarUdf(a ANY TYPE)
-AS (
- a + 1
-);
-```
-
-The following example creates a public TVF with a scalar
-argument using a public UDF defined in the same module.
-
-```sql
-CREATE PUBLIC TABLE FUNCTION ScalarTvf(a INT64)
-AS (
- SELECT a, SampleUdf(a) AS b
-);
-```
-
-The following example creates a public TVF with a table argument.
-
-```sql
-CREATE PUBLIC TABLE FUNCTION ScalarTvf(SomeTable TABLE)
-AS (
- SELECT a, SUM(b) AS sum_b FROM SomeTable GROUP BY a
-);
-```
-
-The following example creates a public templated TVF.
-
-```sql
-CREATE PUBLIC TABLE FUNCTION TemplatedTvf(a ANY TYPE, SomeTable ANY TABLE)
-AS (
- SELECT a, b.* FROM SomeTable
-);
-```
-
-### Reference module objects from within the same module
-
-Statements in a module can reference other objects in the same module.
-Statements can reference objects whose `CREATE` statements appear before or
-after that referencing statement.
-
-**Example**
-
-The following example module declares the name of the module, and creates one
-public function and two private functions. The public function references the
-other two private functions.
-
-```sql
-MODULE a.b.c;
-
-CREATE PRIVATE FUNCTION Foo(x INT64)
-AS (
- x
-);
-
-CREATE PRIVATE FUNCTION Bar(y INT64)
-AS (
- y
-);
-
-CREATE PUBLIC FUNCTION Baz(a INT64, b INT64)
-AS (
- Foo(a) + Bar(b)
-);
-```
-
-Object references cannot be circular: if a function directly or indirectly
-references a second function, then that second function cannot reference the
-original function.
-
-## Use an existing module
-
-You can use an existing module by importing it into a session or into another
-module.
-
-### Import a module into a session
-
-To import a module into a session, use the `IMPORT MODULE` statement.
-
-**Syntax**
-
-
-IMPORT MODULE module_identifier_path [AS alias];
-
-
-This imports a module and creates a namespace visible to the importing session
-containing public objects exported from the module.
-
-The `module_identifier_path` is a unique module name that corresponds to the
-path ID in the [module declaration](#declare-a-module).
-
-The `alias` provides the namespace that the `IMPORT MODULE` statement creates.
-If `alias` is absent, then the namespace will be the last name in the
-`module_identifier_path`.
-
-**Examples**
-
-The following example statement imports the module with the identifier path
-`x.y.z` into namespace `z`:
-
-```sql
-IMPORT MODULE x.y.z;
-```
-
-The following example statement imports the same module but with the alias
-`some_module` into namespace `some_module`.
-
-```sql
-IMPORT MODULE x.y.z AS some_module;
-```
-
-### Reference module objects from a session
-
-Once you have imported a module into a session, you can reference the public
-objects in that module from the session. Use the namespace of the module or its
-alias to reference the objects in the module.
-
-**Example**
-
-In the following example, the `IMPORT` statement imports the module with the
-identifier path `x.y.z` into namespace `z`, and then executes a public function
-`Baz` from inside of that module.
-
-```sql
-IMPORT MODULE x.y.z;
-
-SELECT z.Baz(a, b);
-```
-
-If the `IMPORT` statement includes an alias, then the statement creates the
-namespace with that alias. Use that alias as the identifier path prefix for the
-referenced object.
-
-**Example**
-
-In the following example, the `IMPORT` statement assigns alias `some_module` to
-the module with the identifier path `x.y.z`, and the `SELECT` statement executes
-a public function `Baz` from inside of that module.
-
-```sql
-IMPORT MODULE x.y.z AS some_module;
-
-SELECT some_module.Baz(a, b);
-```
-
-### Import a module into another module
-
-To import a module into another module, use the same syntax as when
-[importing a module into a session](#import-a-module-into-a-session).
-
-+ Imports cannot be circular. For example, if `module1` imports `module2`,
- then `module2` cannot directly or indirectly import `module1`.
-+ A module cannot import itself.
-
-### Reference module objects from another module
-
-Once you have imported a module into another module, you can reference the
-public objects that the imported module creates. Use the same syntax as in an
-invoking session.
-
-**Example**
-
-In the following example, the `IMPORT` statement imports the module with the
-identifier path `x.y.z`, and then creates a function `Foo` which references the
-public function `Baz` from inside of the imported module.
-
-```sql
-MODULE a.b.c;
-IMPORT MODULE x.y.z;
-
-CREATE PUBLIC FUNCTION Foo(d INT64, e INT64)
-AS (
- z.Baz(d, e)
-);
-```
-
-If the `IMPORT` statement includes an alias, you can reference objects from the
-imported module inside the importing module using the alias as the identifier
-path.
-
-```sql
-MODULE a.b.c;
-IMPORT MODULE x.y.z AS some_module;
-
-CREATE PUBLIC FUNCTION Foo(d INT64, e INT64)
-AS (
- some_module.Baz(d, e)
-);
-```
-
-
-
-[unit-of-work]: https://en.wikipedia.org/w/index.php?title=Unit_of_work&redirect=no
-
-[dml]: https://github.com/google/zetasql/blob/master/docs/data-manipulation-language.md
-
-[ddl]: https://github.com/google/zetasql/blob/master/docs/data-definition-language.md
-
-[user-defined-functions]: https://github.com/google/zetasql/blob/master/docs/user-defined-functions.md
-
-[table-valued-functions]: https://github.com/google/zetasql/blob/master/docs/table-functions.md#tvfs
-
-[create-constant]: https://github.com/google/zetasql/blob/master/docs/data-definition-language.md#create_constant
-
-
-
diff --git a/docs/navigation_functions.md b/docs/navigation_functions.md
index 5f636a43c..56d68ae2c 100644
--- a/docs/navigation_functions.md
+++ b/docs/navigation_functions.md
@@ -16,7 +16,7 @@ current row. The `OVER` clause syntax varies across navigation functions.
For all navigation functions, the result data type is the same type as
`value_expression`.
-### Function list
+## Function list
@@ -28,73 +28,73 @@ For all navigation functions, the result data type is the same type as
- FIRST_VALUE
-
+ | FIRST_VALUE
|
Gets a value for the first row in the current window frame.
+
|
- LAG
-
+ | LAG
|
Gets a value for a preceding row.
+
|
- LAST_VALUE
-
+ | LAST_VALUE
|
Gets a value for the last row in the current window frame.
+
|
- LEAD
-
+ | LEAD
|
Gets a value for a subsequent row.
+
|
- NTH_VALUE
-
+ | NTH_VALUE
|
Gets a value for the Nth row of the current window frame.
+
|
- PERCENTILE_CONT
-
+ | PERCENTILE_CONT
|
Computes the specified percentile for a value, using
linear interpolation.
+
|
- PERCENTILE_DISC
-
+ | PERCENTILE_DISC
|
Computes the specified percentile for a discrete value.
+
|
-### `FIRST_VALUE`
+## `FIRST_VALUE`
```sql
FIRST_VALUE (value_expression [{RESPECT | IGNORE} NULLS])
@@ -185,7 +185,7 @@ FROM (
*-----------------+-------------+----------+--------------+------------------*/
```
-### `LAG`
+## `LAG`
```sql
LAG (value_expression[, offset [, default_expression]])
@@ -352,7 +352,7 @@ FROM finishers;
*-----------------+-------------+----------+-------------------*/
```
-### `LAST_VALUE`
+## `LAST_VALUE`
```sql
LAST_VALUE (value_expression [{RESPECT | IGNORE} NULLS])
@@ -443,7 +443,7 @@ FROM (
*-----------------+-------------+----------+--------------+------------------*/
```
-### `LEAD`
+## `LEAD`
```sql
LEAD (value_expression[, offset [, default_expression]])
@@ -610,7 +610,7 @@ FROM finishers;
*-----------------+-------------+----------+------------------*/
```
-### `NTH_VALUE`
+## `NTH_VALUE`
```sql
NTH_VALUE (value_expression, constant_integer_expression [{RESPECT | IGNORE} NULLS])
@@ -707,7 +707,7 @@ FROM (
*-----------------+-------------+----------+--------------+----------------*/
```
-### `PERCENTILE_CONT`
+## `PERCENTILE_CONT`
```sql
PERCENTILE_CONT (value_expression, percentile [{RESPECT | IGNORE} NULLS])
@@ -816,7 +816,7 @@ FROM UNNEST([0, 3, NULL, 1, 2]) AS x LIMIT 1;
[dp-functions]: https://github.com/google/zetasql/blob/master/docs/aggregate-dp-functions.md
-### `PERCENTILE_DISC`
+## `PERCENTILE_DISC`
```sql
PERCENTILE_DISC (value_expression, percentile [{RESPECT | IGNORE} NULLS])
diff --git a/docs/net_functions.md b/docs/net_functions.md
index c5bfdeba3..16f3a24df 100644
--- a/docs/net_functions.md
+++ b/docs/net_functions.md
@@ -6,7 +6,7 @@
ZetaSQL supports the following Net functions.
-### Function list
+## Function list
@@ -18,8 +18,7 @@ ZetaSQL supports the following Net functions.
- NET.FORMAT_IP
-
+ | NET.FORMAT_IP
|
(Deprecated) Converts an
@@ -29,8 +28,7 @@ ZetaSQL supports the following Net functions.
|
- NET.FORMAT_PACKED_IP
-
+ | NET.FORMAT_PACKED_IP
|
(Deprecated) Converts an
@@ -40,8 +38,7 @@ ZetaSQL supports the following Net functions.
|
- NET.HOST
-
+ | NET.HOST
|
Gets the hostname from a URL.
@@ -49,8 +46,7 @@ ZetaSQL supports the following Net functions.
|
- NET.IP_FROM_STRING
-
+ | NET.IP_FROM_STRING
|
Converts an IPv4 or IPv6 address from a STRING value to
@@ -59,8 +55,7 @@ ZetaSQL supports the following Net functions.
|
- NET.IP_IN_NET
-
+ | NET.IP_IN_NET
|
Checks if an IP address is in a subnet.
@@ -68,8 +63,7 @@ ZetaSQL supports the following Net functions.
|
- NET.IP_NET_MASK
-
+ | NET.IP_NET_MASK
|
Gets a network mask.
@@ -77,8 +71,7 @@ ZetaSQL supports the following Net functions.
|
- NET.IP_TO_STRING
-
+ | NET.IP_TO_STRING
|
Converts an IPv4 or IPv6 address from a BYTES value in
@@ -87,8 +80,7 @@ ZetaSQL supports the following Net functions.
|
- NET.IP_TRUNC
-
+ | NET.IP_TRUNC
|
Converts a BYTES IPv4 or IPv6 address in
@@ -97,8 +89,7 @@ ZetaSQL supports the following Net functions.
|
- NET.IPV4_FROM_INT64
-
+ | NET.IPV4_FROM_INT64
|
Converts an IPv4 address from an INT64 value to a
@@ -107,8 +98,7 @@ ZetaSQL supports the following Net functions.
|
- NET.IPV4_TO_INT64
-
+ | NET.IPV4_TO_INT64
|
Converts an IPv4 address from a BYTES value in network
@@ -117,8 +107,7 @@ ZetaSQL supports the following Net functions.
|
- NET.MAKE_NET
-
+ | NET.MAKE_NET
|
Takes a IPv4 or IPv6 address and the prefix length, and produces a
@@ -127,8 +116,7 @@ ZetaSQL supports the following Net functions.
|
- NET.PARSE_IP
-
+ | NET.PARSE_IP
|
(Deprecated) Converts an
@@ -138,8 +126,7 @@ ZetaSQL supports the following Net functions.
|
- NET.PARSE_PACKED_IP
-
+ | NET.PARSE_PACKED_IP
|
(Deprecated) Converts an
@@ -149,8 +136,7 @@ ZetaSQL supports the following Net functions.
|
- NET.PUBLIC_SUFFIX
-
+ | NET.PUBLIC_SUFFIX
|
Gets the public suffix from a URL.
@@ -158,8 +144,7 @@ ZetaSQL supports the following Net functions.
|
- NET.REG_DOMAIN
-
+ | NET.REG_DOMAIN
|
Gets the registered or registrable domain from a URL.
@@ -167,8 +152,7 @@ ZetaSQL supports the following Net functions.
|
- NET.SAFE_IP_FROM_STRING
-
+ | NET.SAFE_IP_FROM_STRING
|
Similar to the NET.IP_FROM_STRING , but returns
@@ -179,7 +163,7 @@ ZetaSQL supports the following Net functions.
|
-### `NET.FORMAT_IP` (DEPRECATED)
+## `NET.FORMAT_IP` (DEPRECATED)
```
@@ -200,7 +184,7 @@ STRING
[net-link-to-ipv4-from-int64]: #netipv4_from_int64
-### `NET.FORMAT_PACKED_IP` (DEPRECATED)
+## `NET.FORMAT_PACKED_IP` (DEPRECATED)
```
@@ -217,7 +201,7 @@ STRING
[net-link-to-ip-to-string]: #netip_to_string
-### `NET.HOST`
+## `NET.HOST`
```
NET.HOST(url)
@@ -272,7 +256,7 @@ FROM (
[net-link-to-rfc-3986-appendix-a]: https://tools.ietf.org/html/rfc3986#appendix-A
-### `NET.IP_FROM_STRING`
+## `NET.IP_FROM_STRING`
```
NET.IP_FROM_STRING(addr_str)
@@ -325,7 +309,7 @@ FROM UNNEST([
[net-link-to-cidr-notation]: https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
-### `NET.IP_IN_NET`
+## `NET.IP_IN_NET`
```
NET.IP_IN_NET(address, subnet)
@@ -354,7 +338,7 @@ BOOL
[net-link-to-ipv6-rfc]: http://www.ietf.org/rfc/rfc2373.txt
-### `NET.IP_NET_MASK`
+## `NET.IP_NET_MASK`
```
NET.IP_NET_MASK(num_output_bytes, prefix_length)
@@ -398,7 +382,7 @@ FROM UNNEST([
*--------------------------------------------------------------------------------*/
```
-### `NET.IP_TO_STRING`
+## `NET.IP_TO_STRING`
```
NET.IP_TO_STRING(addr_bin)
@@ -439,7 +423,7 @@ FROM UNNEST([
*---------------------------------------------------------------------------------------------------------------*/
```
-### `NET.IP_TRUNC`
+## `NET.IP_TRUNC`
```
NET.IP_TRUNC(addr_bin, prefix_length)
@@ -483,7 +467,7 @@ FROM UNNEST([
*-----------------------------------------------------------------------------*/
```
-### `NET.IPV4_FROM_INT64`
+## `NET.IPV4_FROM_INT64`
```
NET.IPV4_FROM_INT64(integer_value)
@@ -527,7 +511,7 @@ FROM (
*-----------------------------------------------*/
```
-### `NET.IPV4_TO_INT64`
+## `NET.IPV4_TO_INT64`
```
NET.IPV4_TO_INT64(addr_bin)
@@ -567,7 +551,7 @@ UNNEST([b"\x00\x00\x00\x00", b"\x00\xab\xcd\xef", b"\xff\xff\xff\xff"]) AS x;
*-------------------------------------*/
```
-### `NET.MAKE_NET`
+## `NET.MAKE_NET`
```
NET.MAKE_NET(address, prefix_length)
@@ -610,7 +594,7 @@ STRING
[net-link-to-cidr-notation]: https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
-### `NET.PARSE_IP` (DEPRECATED)
+## `NET.PARSE_IP` (DEPRECATED)
```
@@ -632,7 +616,7 @@ INT64
[net-link-to-ipv4-to-int64]: #netipv4_to_int64
-### `NET.PARSE_PACKED_IP` (DEPRECATED)
+## `NET.PARSE_PACKED_IP` (DEPRECATED)
```
@@ -652,7 +636,7 @@ BYTES
[net-link-to-ip-from-string]: #netip_from_string
-### `NET.PUBLIC_SUFFIX`
+## `NET.PUBLIC_SUFFIX`
```
NET.PUBLIC_SUFFIX(url)
@@ -735,7 +719,7 @@ FROM (
[net-link-to-rfc-3986-appendix-a]: https://tools.ietf.org/html/rfc3986#appendix-A
-### `NET.REG_DOMAIN`
+## `NET.REG_DOMAIN`
```
NET.REG_DOMAIN(url)
@@ -823,7 +807,7 @@ FROM (
[net-link-to-rfc-3986-appendix-a]: https://tools.ietf.org/html/rfc3986#appendix-A
-### `NET.SAFE_IP_FROM_STRING`
+## `NET.SAFE_IP_FROM_STRING`
```
NET.SAFE_IP_FROM_STRING(addr_str)
diff --git a/docs/numbering_functions.md b/docs/numbering_functions.md
index 83968b408..625f68c5a 100644
--- a/docs/numbering_functions.md
+++ b/docs/numbering_functions.md
@@ -13,7 +13,7 @@ Numbering functions assign integer values to each row based on their position
within the specified window. The `OVER` clause syntax varies across
numbering functions.
-### Function list
+## Function list
@@ -25,64 +25,64 @@ numbering functions.
- CUME_DIST
-
+ | CUME_DIST
|
Gets the cumulative distribution (relative position (0,1]) of each row
within a window.
+
|
- DENSE_RANK
-
+ | DENSE_RANK
|
Gets the dense rank (1-based, no gaps) of each row within a window.
+
|
- NTILE
-
+ | NTILE
|
Gets the quantile bucket number (1-based) of each row within a window.
+
|
- PERCENT_RANK
-
+ | PERCENT_RANK
|
Gets the percentile rank (from 0 to 1) of each row within a window.
+
|
- RANK
-
+ | RANK
|
Gets the rank (1-based) of each row within a window.
+
|
- ROW_NUMBER
-
+ | ROW_NUMBER
|
Gets the sequential row number (1-based) of each row within a window.
+
|
-### `CUME_DIST`
+## `CUME_DIST`
```sql
CUME_DIST()
@@ -151,7 +151,7 @@ FROM finishers;
*-----------------+------------------------+----------+-------------*/
```
-### `DENSE_RANK`
+## `DENSE_RANK`
```sql
DENSE_RANK()
@@ -247,7 +247,7 @@ FROM finishers;
*-----------------+------------------------+----------+-------------*/
```
-### `NTILE`
+## `NTILE`
```sql
NTILE(constant_integer_expression)
@@ -320,7 +320,7 @@ FROM finishers;
*-----------------+------------------------+----------+-------------*/
```
-### `PERCENT_RANK`
+## `PERCENT_RANK`
```sql
PERCENT_RANK()
@@ -389,7 +389,7 @@ FROM finishers;
*-----------------+------------------------+----------+---------------------*/
```
-### `RANK`
+## `RANK`
```sql
RANK()
@@ -486,7 +486,7 @@ FROM finishers;
*-----------------+------------------------+----------+-------------*/
```
-### `ROW_NUMBER`
+## `ROW_NUMBER`
```sql
ROW_NUMBER()
diff --git a/docs/operators.md b/docs/operators.md
index a3b0efa9d..8edf592ae 100644
--- a/docs/operators.md
+++ b/docs/operators.md
@@ -60,7 +60,6 @@ statement.
ARRAY |
Array position. Must be used with OFFSET or ORDINAL —see
Array Functions
-
. |
Binary |
@@ -317,30 +316,34 @@ statement.
+For example, the logical expression:
+
+`x OR y AND z`
+
+is interpreted as:
+
+`( x OR ( y AND z ) )`
+
Operators with the same precedence are left associative. This means that those
operators are grouped together starting from the left and moving right. For
example, the expression:
`x AND y AND z`
-is interpreted as
+is interpreted as:
`( ( x AND y ) AND z )`
The expression:
-```
-x * y / z
-```
+`x * y / z`
is interpreted as:
-```
-( ( x * y ) / z )
-```
+`( ( x * y ) / z )`
All comparison operators have the same priority, but comparison operators are
-not associative. Therefore, parentheses are required in order to resolve
+not associative. Therefore, parentheses are required to resolve
ambiguity. For example:
`(x < y) IS FALSE`
@@ -358,70 +361,60 @@ ambiguity. For example:
Field access operator
-
|
Gets the value of a field. |
Array subscript operator
-
|
Gets a value from an array at a specific position. |
Struct subscript operator
-
|
Gets the value of a field at a selected position in a struct. |
JSON subscript operator
-
|
Gets a value of an array element or field in a JSON expression. |
Protocol buffer map subscript operator
-
|
Gets the value in a protocol buffer map for a given key. |
Array elements field access operator
-
|
Traverses through the levels of a nested data type inside an array. |
Arithmetic operators
-
|
Performs arithmetic operations. |
Date arithmetics operators
-
|
Performs arithmetic operations on dates. |
Datetime subtraction
-
|
Computes the difference between two datetimes as an interval. |
Interval arithmetic operators
-
|
Adds an interval to a datetime or subtracts an interval from a datetime.
@@ -430,14 +423,12 @@ ambiguity. For example:
|
Bitwise operators
-
|
Performs bit manipulation. |
Logical operators
-
|
Tests for the truth of some condition and produces TRUE ,
@@ -446,8 +437,60 @@ ambiguity. For example:
|
- Comparison operators
+ | Graph concatenation operator
+ |
+
+ Combines multiple graph paths into one and preserves the original order of
+ the nodes and edges.
+ |
+
+
+
+ Graph logical operators
+ |
+
+ Tests for the truth of a condition in a graph and produces either
+ TRUE or FALSE .
+ |
+
+
+
+ Graph predicates
+ |
+
+ Tests for the truth of a condition for a graph element and produces
+ TRUE , FALSE , or NULL .
+ |
+
+
+
+ IS DESTINATION predicate
+ |
+ In a graph, checks to see if a node is or isn't the destination of an edge. |
+
+
+
+ IS SOURCE predicate
+ |
+ In a graph, checks to see if a node is or isn't the source of an edge. |
+
+
+
+ PROPERTY_EXISTS predicate
+ |
+ In a graph, checks to see if a property exists for an element. |
+
+
+
+ SAME predicate
+ |
+
+ In a graph, determines if all graph elements in a list bind to the same node or edge.
+ |
+
+
+ Comparison operators
|
Compares operands and produces the results of the comparison as a
@@ -457,21 +500,18 @@ ambiguity. For example:
|
EXISTS operator
-
|
Checks if a subquery produces one or more rows. |
IN operator
-
|
Checks for an equal value in a set of values. |
IS operators
-
|
Checks for the truth of a condition and produces either TRUE or
@@ -481,42 +521,36 @@ ambiguity. For example:
|
IS DISTINCT FROM operator
-
|
Checks if values are considered to be distinct from each other. |
LIKE operator
-
|
Checks if values are like or not like one another. |
Quantified LIKE operator
-
|
Checks a search value for matches against several patterns. |
NEW operator
-
|
Creates a protocol buffer. |
Concatenation operator
-
|
Combines multiple values into one. |
WITH expression
-
|
Creates variables for re-use and produces a result expression. |
@@ -541,6 +575,7 @@ Input values:
+ `STRUCT`
+ `PROTO`
+ `JSON`
++ `GRAPH_ELEMENT`
Note: If the field to access is within a `STRUCT`, you can use the
[struct subscript operator][struct-subscript-operator] to access the field by
@@ -555,6 +590,8 @@ a field by position is useful when fields are un-named or have ambiguous names.
the protocol buffer, an error is thrown.
+ For `JSON`: `JSON`. If a field is not found in a JSON value, a SQL `NULL` is
returned.
++ For `GRAPH_ELEMENT`: SQL data type of `fieldname`. If a field (property) is
+ not found in the graph element, an error is produced.
**Example**
@@ -1735,6 +1772,355 @@ SELECT entry FROM entry_table WHERE entry IS NULL
[three-valued-logic]: https://en.wikipedia.org/wiki/Three-valued_logic
+### Graph concatenation operator
+
+
+```sql
+graph_path || graph_path [ || ... ]
+```
+
+**Description**
+
+Combines multiple graph paths into one and preserves the original order of the
+nodes and edges.
+
+Arguments:
+
++ `graph_path`: A `GRAPH_PATH` value that represents a graph path to
+ concatenate.
+
+**Details**
+
+This operator produces an error if the last node in the first path isn't the
+same as the first node in the second path.
+
+```sql
+-- This successfully produces the concatenated path called `full_path`.
+MATCH
+ p=(src:Account)-[t1:Transfers]->(mid:Account),
+ q=(mid)-[t2:Transfers]->(dst:Account)
+LET full_path = p || q
+```
+
+```sql
+-- This produces an error because the first node of the path to be concatenated
+-- (mid2) is not equal to the last node of the previous path (mid1).
+MATCH
+ p=(src:Account)-[t1:Transfers]->(mid1:Account),
+ q=(mid2:Account)-[t2:Transfers]->(dst:Account)
+LET full_path = p || q
+```
+
+The first node in each subsequent path is removed from the
+concatenated path.
+
+```sql
+-- The concatenated path called `full_path` contains these elements:
+-- src, t1, mid, t2, dst.
+MATCH
+ p=(src:Account)-[t1:Transfers]->(mid:Account),
+ q=(mid)-[t2:Transfers]->(dst:Account)
+LET full_path = p || q
+```
+
+If any `graph_path` is `NULL`, produces `NULL`.
+
+**Example**
+
+In the following query, a path called `p` and `q` are concatenated. Notice that
+`mid` is used at the end of the first path and at the beginning of the
+second path. Also notice that the duplicate `mid` is removed from the
+concatenated path called `full_path`:
+
+```sql
+GRAPH FinGraph
+MATCH
+ p=(src:Account)-[t1:Transfers]->(mid:Account),
+ q = (mid)-[t2:Transfers]->(dst:Account)
+LET full_path = p || q
+RETURN
+ JSON_QUERY(TO_JSON(full_path)[0], '$.labels') AS element_a,
+ JSON_QUERY(TO_JSON(full_path)[1], '$.labels') AS element_b,
+ JSON_QUERY(TO_JSON(full_path)[2], '$.labels') AS element_c,
+ JSON_QUERY(TO_JSON(full_path)[3], '$.labels') AS element_d,
+ JSON_QUERY(TO_JSON(full_path)[4], '$.labels') AS element_e,
+ JSON_QUERY(TO_JSON(full_path)[5], '$.labels') AS element_f
+
+/*-------------------------------------------------------------------------------------*
+ | element_a | element_b | element_c | element_d | element_e | element_f |
+ +-------------------------------------------------------------------------------------+
+ | ["Account"] | ["Transfers"] | ["Account"] | ["Transfers"] | ["Account"] | |
+ | ... | ... | ... | ... | ... | ... |
+ *-------------------------------------------------------------------------------------/*
+```
+
+The following query produces an error because the last node for `p` must
+be the first node for `q`:
+
+```sql
+-- Error: `mid1` and `mid2` are not equal.
+GRAPH FinGraph
+MATCH
+ p=(src:Account)-[t1:Transfers]->(mid1:Account),
+ q=(mid2:Account)-[t2:Transfers]->(dst:Account)
+LET full_path = p || q
+RETURN TO_JSON(full_path) AS results
+```
+
+The following query produces an error because the path called `p` is `NULL`:
+
+```sql
+-- Error: a graph path is NULL.
+GRAPH FinGraph
+MATCH
+ p=NULL,
+ q=(mid:Account)-[t2:Transfers]->(dst:Account)
+LET full_path = p || q
+RETURN TO_JSON(full_path) AS results
+```
+
+### Graph logical operators
+
+
+ZetaSQL supports the following logical operators in
+[element pattern label expressions][element-pattern-definition]:
+
+
+
+
+ Name |
+ Syntax |
+ Description |
+
+
+
+
+ NOT |
+ !X |
+
+ Returns TRUE if X is not included, otherwise,
+ returns FALSE .
+ |
+
+
+ OR |
+ X | Y |
+
+ Returns TRUE if either X or Y is
+ included, otherwise, returns FALSE .
+ |
+
+
+ AND |
+ X & Y |
+
+ Returns TRUE if both X and Y are
+ included, otherwise, returns FALSE .
+ |
+
+
+
+
+[element-pattern-definition]: https://github.com/google/zetasql/blob/master/docs/graph-patterns.md#element_pattern_definition
+
+### Graph predicates
+
+
+ZetaSQL supports the following graph-specific predicates in
+graph expressions. A predicate can produce `TRUE`, `FALSE`, or `NULL`.
+
++ [`PROPERTY_EXISTS` predicate][property-exists-predicate]
++ [`IS SOURCE` predicate][is-source-predicate]
++ [`IS DESTINATION` predicate][is-destination-predicate]
++ [`SAME` predicate][same-predicate]
+
+[property-exists-predicate]: #property_exists_predicate
+
+[is-source-predicate]: #is_source_predicate
+
+[is-destination-predicate]: #is_destination_predicate
+
+[same-predicate]: #same_predicate
+
+### `IS DESTINATION` predicate
+
+
+```sql
+node IS [ NOT ] DESTINATION [ OF ] edge
+```
+
+**Description**
+
+In a graph, checks to see if a node is or isn't the destination of an edge.
+Can produce `TRUE`, `FALSE`, or `NULL`.
+
+Arguments:
+
++ `node`: The graph pattern variable for the node element.
++ `edge`: The graph pattern variable for the edge element.
+
+**Examples**
+
+```sql
+GRAPH FinGraph
+MATCH (a:Account)-[transfer:Transfers]-(b:Account)
+WHERE a IS DESTINATION of transfer
+RETURN a.id AS a_id, b.id AS b_id
+
+/*-------------+
+ | a_id | b_id |
+ +-------------+
+ | 16 | 7 |
+ | 16 | 7 |
+ | 20 | 16 |
+ | 7 | 20 |
+ | 16 | 20 |
+ +-------------*/
+```
+
+```sql
+GRAPH FinGraph
+MATCH (a:Account)-[transfer:Transfers]-(b:Account)
+WHERE b IS DESTINATION of transfer
+RETURN a.id AS a_id, b.id AS b_id
+
+/*-------------+
+ | a_id | b_id |
+ +-------------+
+ | 7 | 16 |
+ | 7 | 16 |
+ | 16 | 20 |
+ | 20 | 7 |
+ | 20 | 16 |
+ +-------------*/
+```
+
+### `IS SOURCE` predicate
+
+
+```sql
+node IS [ NOT ] SOURCE [ OF ] edge
+```
+
+**Description**
+
+In a graph, checks to see if a node is or isn't the source of an edge.
+Can produce `TRUE`, `FALSE`, or `NULL`.
+
+Arguments:
+
++ `node`: The graph pattern variable for the node element.
++ `edge`: The graph pattern variable for the edge element.
+
+**Examples**
+
+```sql
+GRAPH FinGraph
+MATCH (a:Account)-[transfer:Transfers]-(b:Account)
+WHERE a IS SOURCE of transfer
+RETURN a.id AS a_id, b.id AS b_id
+
+/*-------------+
+ | a_id | b_id |
+ +-------------+
+ | 20 | 7 |
+ | 7 | 16 |
+ | 7 | 16 |
+ | 20 | 16 |
+ | 16 | 20 |
+ +-------------*/
+```
+
+```sql
+GRAPH FinGraph
+MATCH (a:Account)-[transfer:Transfers]-(b:Account)
+WHERE b IS SOURCE of transfer
+RETURN a.id AS a_id, b.id AS b_id
+
+/*-------------+
+ | a_id | b_id |
+ +-------------+
+ | 7 | 20 |
+ | 16 | 7 |
+ | 16 | 7 |
+ | 16 | 20 |
+ | 20 | 16 |
+ +-------------*/
+```
+
+### `PROPERTY_EXISTS` predicate
+
+
+```sql
+PROPERTY_EXISTS(element, element_property)
+```
+
+**Description**
+
+In a graph, checks to see if a property exists for an element.
+Can produce `TRUE`, `FALSE`, or `NULL`.
+
+Arguments:
+
++ `element`: The graph pattern variable for a node or edge element.
++ `element_property`: The name of the property to look for in `element`.
+ The property name must refer to a property in the graph. If the property
+ does not exist in the graph, an error is produced. The property name is
+ resolved in a case-insensitive manner.
+
+**Example**
+
+```sql
+GRAPH FinGraph
+MATCH (n:Person|Account WHERE PROPERTY_EXISTS(n, name))
+RETURN n.name
+
+/*------+
+ | name |
+ +------+
+ | Alex |
+ | Dana |
+ | Lee |
+ +------*/
+```
+
+### `SAME` predicate
+
+
+```sql
+SAME (element, element[, element])
+```
+
+**Description**
+
+In a graph, determines if all graph elements in a list bind to the same node or
+edge. Can produce `TRUE`, `FALSE`, or `NULL`.
+
+Arguments:
+
++ `element`: The graph pattern variable for a node or edge element.
+
+**Example**
+
+The following query checks to see if `a` and `b` are not the same person.
+
+```sql
+GRAPH FinGraph
+MATCH (src:Account)<-[transfer:Transfers]-(dest:Account)
+WHERE NOT SAME(src, dest)
+RETURN src.id AS source_id, dest.id AS destination_id
+
+/*----------------------------+
+ | source_id | destination_id |
+ +----------------------------+
+ | 7 | 20 |
+ | 16 | 7 |
+ | 16 | 7 |
+ | 16 | 20 |
+ | 20 | 16 |
+ +----------------------------*/
+```
+
### Comparison operators
@@ -3021,7 +3407,7 @@ NEW Universe {
radius_miles: 432,690
age: 4,603,000,000
}
- constellations [{
+ constellations: [{
name: "Libra"
index: 0
}, {
@@ -3081,6 +3467,12 @@ The concatenation operator combines multiple values into one.
+Note: The concatenation operator is translated into a nested
+[`CONCAT`][concat] function call. For example, `'A' || 'B' || 'C'` becomes
+`CONCAT('A', CONCAT('B', 'C'))`.
+
+[concat]: https://github.com/google/zetasql/blob/master/docs/string_functions.md#concat
+
### `WITH` expression
diff --git a/docs/pipe-syntax.md b/docs/pipe-syntax.md
index d8e348d13..2aad216f0 100644
--- a/docs/pipe-syntax.md
+++ b/docs/pipe-syntax.md
@@ -9,13 +9,16 @@ concise alternative to [standard query syntax][query-syntax]. Pipe syntax
supports many of the same operators as standard syntax, and improves some areas
of SQL query functionality.
+For more background and details on the language design, see the research paper
+[SQL Has Problems. We Can Fix Them: Pipe Syntax In SQL](https://research.google/pubs/sql-has-problems-we-can-fix-them-pipe-syntax-in-sql/).
+
## Pipe syntax
Pipe syntax has the following key characteristics:
-+ Pipe syntax consists of a pipe and an angle bracket `|>`, an operator name,
- and any arguments: \
++ Each pipe operator in pipe syntax consists of the pipe symbol, `|>`,
+ an operator name, and any arguments: \
`|> operator_name argument_list`
+ Pipe operators can be added to the end of any valid query.
+ Pipe operators can be applied in any order, any number of times.
@@ -26,30 +29,70 @@ Pipe syntax has the following key characteristics:
+ A query can [start with a `FROM` clause][from-queries], and pipe
operators can optionally be added after the `FROM` clause.
-Compare the following equivalent queries that count open tickets
-assigned to a user:
+### Query comparison
+
+Consider the following table called `Produce`:
+
+```sql
+CREATE OR REPLACE TABLE Produce AS (
+ SELECT 'apples' AS item, 2 AS sales, 'fruit' AS category
+ UNION ALL
+ SELECT 'carrots' AS item, 8 AS sales, 'vegetable' AS category
+ UNION ALL
+ SELECT 'apples' AS item, 7 AS sales, 'fruit' AS category
+ UNION ALL
+ SELECT 'bananas' AS item, 5 AS sales, 'fruit' AS category
+);
+
+SELECT * FROM Produce;
+
+/*---------+-------+-----------+
+ | item | sales | category |
+ +---------+-------+-----------+
+ | apples | 2 | fruit |
+ | carrots | 8 | vegetable |
+ | apples | 7 | fruit |
+ | bananas | 5 | fruit |
+ +---------+-------+-----------*/
+```
+
+Compare the following equivalent queries that compute the number and total
+amount of sales for each item in the `Produce` table:
**Standard syntax**
```sql
-SELECT component_id, COUNT(*)
-FROM ticketing_system_table
+SELECT item, COUNT(*) AS num_items, SUM(sales) AS total_sales
+FROM Produce
WHERE
- assignee_user.email = 'username@email.com'
- AND status IN ('NEW', 'ASSIGNED', 'ACCEPTED')
-GROUP BY component_id
-ORDER BY component_id DESC;
+ item != 'bananas'
+ AND category IN ('fruit', 'nut')
+GROUP BY item
+ORDER BY item DESC;
+
+/*--------+-----------+-------------+
+ | item | num_items | total_sales |
+ +--------+-----------+-------------+
+ | apples | 2 | 9 |
+ +--------+-----------+-------------*/
```
**Pipe syntax**
```sql
-FROM ticketing_system_table
+FROM Produce
|> WHERE
- assignee_user.email = 'username@email.com'
- AND status IN ('NEW', 'ASSIGNED', 'ACCEPTED')
-|> AGGREGATE COUNT(*)
- GROUP AND ORDER BY component_id DESC;
+ item != 'bananas'
+ AND category IN ('fruit', 'nut')
+|> AGGREGATE COUNT(*) AS num_items, SUM(sales) AS total_sales
+ GROUP BY item
+|> ORDER BY item DESC;
+
+/*--------+-----------+-------------+
+ | item | num_items | total_sales |
+ +--------+-----------+-------------+
+ | apples | 2 | 9 |
+ +--------+-----------+-------------*/
```
## Pipe operator semantics
@@ -59,7 +102,7 @@ Pipe operators have the following semantic behavior:
+ Each pipe operator performs a self-contained operation.
+ A pipe operator consumes the input table passed to it through the pipe
- character and produces a new table as output.
+ symbol, `|>`, and produces a new table as output.
+ A pipe operator can reference only columns from its immediate input table.
Columns from earlier in the same query aren't visible. Inside subqueries,
correlated references to outer columns are still allowed.
@@ -69,7 +112,8 @@ Pipe operators have the following semantic behavior:
In pipe syntax, a query can start with a standard [`FROM` clause][from-clause]
and use any standard `FROM` syntax, including tables, joins, subqueries,
-`UNNEST` operations, and table-valued functions (TVFs). Table aliases can be
+`UNNEST` operations, and
+table-valued functions (TVFs). Table aliases can be
assigned to each input item using the [`AS alias` clause][using-aliases].
A query with only a `FROM` clause, like `FROM table_name`, is allowed in pipe
@@ -83,19 +127,36 @@ syntax.
**Examples**
-
--- Return a table row that matches a condition.
-FROM table_name
-|> WHERE value_column IS NULL
-|> LIMIT 1;
-
+The following queries use the [`Produce` table][query-comparison]:
-
+```sql
+FROM Produce;
+
+/*---------+-------+-----------+
+ | item | sales | category |
+ +---------+-------+-----------+
+ | apples | 2 | fruit |
+ | carrots | 8 | vegetable |
+ | apples | 7 | fruit |
+ | bananas | 5 | fruit |
+ +---------+-------+-----------*/
+```
+
+```sql
-- Join tables in the FROM clause and then apply pipe operators.
-FROM Table1 AS t1 JOIN Table2 AS t2 USING (key)
-|> AGGREGATE SUM(t2.value)
- GROUP BY t1.key;
-
+FROM
+ Produce AS p1
+ JOIN Produce AS p2
+ USING (item)
+|> WHERE item = "bananas"
+|> SELECT p1.item, p2.sales;
+
+/*---------+-------+
+ | item | sales |
+ +---------+-------+
+ | bananas | 5 |
+ +---------+-------*/
+```
## Pipe operators
@@ -104,6 +165,175 @@ correspond or relate to similar operations in standard syntax, the operator
descriptions highlight similarities and differences and link to more detailed
documentation on the corresponding syntax.
+### Pipe operator list
+
+
+
+
+ Name |
+ Summary |
+
+
+
+
+
+ SELECT
+ |
+ Produces a new table with the listed columns. |
+
+
+
+ EXTEND
+ |
+ Propagates the existing table and adds computed columns. |
+
+
+
+ SET
+ |
+ Replaces the values of columns in the current table. |
+
+
+
+ DROP
+ |
+ Removes listed columns from the current table. |
+
+
+
+ RENAME
+ |
+ Renames specified columns. |
+
+
+
+ AS
+ |
+ Introduces a table alias for the input table. |
+
+
+
+ WHERE
+ |
+ Filters the results of the input table. |
+
+
+
+ LIMIT
+ |
+
+ Limits the number of rows to return in a query, with an optional
+ OFFSET clause to skip over rows.
+ |
+
+
+
+ AGGREGATE
+ |
+
+ Performs aggregation on data across groups of rows or the full
+ input table.
+ |
+
+
+
+ DISTINCT
+ |
+
+ Returns distinct rows from the input table, while preserving table aliases.
+ |
+
+
+
+ ORDER BY
+ |
+ Sorts results by a list of expressions. |
+
+
+
+ UNION
+ |
+
+ Combines the results of the input queries to the left and right of the pipe operator by pairing columns from the results of each query and vertically concatenating them.
+ |
+
+
+
+ INTERSECT
+ |
+
+ Returns rows that are found in the results of both the input query to the left
+ of the pipe operator and all input queries to the right of the pipe
+ operator.
+ |
+
+
+
+ EXCEPT
+ |
+
+ Returns rows from the input query to the left of the pipe operator that
+ aren't present in any input queries to the right of the pipe operator.
+ |
+
+
+
+ JOIN
+ |
+
+ Joins rows from the input table with rows from a second table provided as an
+ argument.
+ |
+
+
+
+ CALL
+ |
+
+ Calls a table-valued function (TVF), passing the pipe input table as a
+ table argument.
+ |
+
+
+
+ WINDOW
+ |
+
+ Adds columns with the result of computing the function over some window
+ of existing rows
+ |
+
+
+
+ TABLESAMPLE
+ |
+ Selects a random sample of rows from the input table. |
+
+
+
+ PIVOT
+ |
+ Rotates rows into columns. |
+
+
+
+ UNPIVOT
+ |
+ Rotates columns into rows. |
+
+
+
+ ASSERT
+ |
+
+ Evaluates that an expression is true for all input rows, raising an error
+ if not.
+ |
+
+
+
+
+
### `SELECT` pipe operator
@@ -142,15 +372,40 @@ pipe syntax supports other operators:
**Example**
-
-|> SELECT account_id AS Account
-
+```sql
+FROM (SELECT 'apples' AS item, 2 AS sales)
+|> SELECT item AS fruit_name;
+
+/*------------+
+ | fruit_name |
+ +------------+
+ | apples |
+ +------------*/
+```
+
+[select-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#select_list
+
+[window-functions]: https://github.com/google/zetasql/blob/master/docs/window-function-calls.md
+
+[select-star]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#select_
+
+[aggregate-pipe-operator]: #aggregate_pipe_operator
+
+[extend-pipe-operator]: #extend_pipe_operator
+
+[set-pipe-operator]: #set_pipe_operator
+
+[drop-pipe-operator]: #drop_pipe_operator
+
+[rename-pipe-operator]: #rename_pipe_operator
+
+[value-tables]: https://github.com/google/zetasql/blob/master/docs/data-model.md#value_tables
### `EXTEND` pipe operator
-|> EXTEND expression [[AS] alias] [, ...]
+|> EXTEND expression [[AS] alias] [, ...]
**Description**
@@ -161,14 +416,45 @@ Propagates the existing table and adds a computed column, similar to
**Examples**
-
-|> EXTEND status IN ('NEW', 'ASSIGNED', 'ACCEPTED') AS is_open
-
+```sql
+(
+ SELECT 'apples' AS item, 2 AS sales
+ UNION ALL
+ SELECT 'carrots' AS item, 8 AS sales
+)
+|> EXTEND item IN ('carrots', 'oranges') AS is_orange;
+
+/*---------+-------+------------+
+ | item | sales | is_orange |
+ +---------+-------+------------+
+ | apples | 2 | FALSE |
+ | carrots | 8 | TRUE |
+ +---------+-------+------------*/
+```
-
--- Window function, with OVER
-|> EXTEND SUM(val) OVER (ORDER BY k) AS val_over_k
-
+```sql
+-- Window function, with `OVER`
+(
+ SELECT 'apples' AS item, 2 AS sales
+ UNION ALL
+ SELECT 'bananas' AS item, 5 AS sales
+ UNION ALL
+ SELECT 'carrots' AS item, 8 AS sales
+)
+|> EXTEND SUM(sales) OVER() AS total_sales;
+
+/*---------+-------+-------------+
+ | item | sales | total_sales |
+ +---------+-------+-------------+
+ | apples | 2 | 15 |
+ | bananas | 5 | 15 |
+ | carrots | 8 | 15 |
+ +---------+-------+-------------*/
+```
+
+[select-star]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#select_
+
+[window-functions]: https://github.com/google/zetasql/blob/master/docs/window-function-calls.md
### `SET` pipe operator
@@ -189,9 +475,35 @@ Therefore, `t.x` will still refer to the original value.
**Example**
-
-|> SET x = 5, y = CAST(y AS INT32)
-
+```sql
+(
+ SELECT 1 AS x, 11 AS y
+ UNION ALL
+ SELECT 2 AS x, 22 AS y
+)
+|> SET x = x * x, y = 3;
+
+/*---+---+
+ | x | y |
+ +---+---+
+ | 1 | 3 |
+ | 4 | 3 |
+ +---+---*/
+```
+
+```sql
+FROM (SELECT 2 AS x, 3 AS y) AS t
+|> SET x = x * x, y = 8
+|> SELECT t.x AS original_x, x, y;
+
+/*------------+---+---+
+ | original_x | x | y |
+ +------------+---+---+
+ | 2 | 4 | 8 |
+ +------------+---+---*/
+```
+
+[select-replace]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#select_replace
### `DROP` pipe operator
@@ -216,15 +528,38 @@ deletes persistent schema objects.
**Example**
-
-|> DROP account_id, user_id
-
+```sql
+SELECT 'apples' AS item, 2 AS sales, 'fruit' AS category
+|> DROP sales, category;
+
+/*--------+
+ | item |
+ +--------+
+ | apples |
+ +--------*/
+```
+
+```sql
+FROM (SELECT 1 AS x, 2 AS y) AS t
+|> DROP x
+|> SELECT t.x AS original_x, y;
+
+/*------------+---+
+ | original_x | y |
+ +------------+---+
+ | 1 | 2 |
+ +------------+---*/
+```
+
+[select-except]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#select_except
+
+[drop-statement]: https://github.com/google/zetasql/blob/master/docs/data-definition-language.md#drop
### `RENAME` pipe operator
-|> RENAME old_column_name [AS] new_column_name [, ...]
+|> RENAME old_column_name [AS] new_column_name [, ...]
**Description**
@@ -240,9 +575,18 @@ values. Therefore, `t.x` will still refer to the original value.
**Example**
-
-|> RENAME last_name AS surname
-
+```sql
+SELECT 1 AS x, 2 AS y, 3 AS z
+|> AS t
+|> RENAME y AS renamed_y
+|> SELECT *, t.y AS t_y;
+
+/*---+-----------+---+-----+
+ | x | renamed_y | z | t_y |
+ +---+-----------+---+-----+
+ | 1 | 2 | 3 | 2 |
+ +---+-----------+---+-----*/
+```
### `AS` pipe operator
@@ -261,15 +605,38 @@ all columns in the row.
The `AS` operator can be useful after operators like
[`SELECT`][select-pipe-operator], [`EXTEND`][extend-pipe-operator], or
[`AGGREGATE`][aggregate-pipe-operator] that add columns but can't give table
-aliases to them.
+aliases to them. You can use the table alias to disambiguate columns after the
+`JOIN` operator.
**Example**
-
-|> SELECT x, y, z
-|> AS table_alias
-|> WHERE table_alias.y = 10
-
+```sql
+(
+ SELECT "000123" AS id, "apples" AS item, 2 AS sales
+ UNION ALL
+ SELECT "000456" AS id, "bananas" AS item, 5 AS sales
+) AS sales_table
+|> AGGREGATE SUM(sales) AS total_sales GROUP BY id, item
+-- The sales_table alias is now out of scope. We must introduce a new one.
+|> AS t1
+|> JOIN (SELECT 456 AS id, "yellow" AS color) AS t2
+ ON CAST(t1.id AS INT64) = t2.id
+|> SELECT t2.id, total_sales, color;
+
+/*-----+-------------+--------+
+ | id | total_sales | color |
+ +-----+-------------+--------+
+ | 456 | 5 | yellow |
+ +-----+-------------+--------*/
+```
+
+[using-aliases]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#using_aliases
+
+[select-pipe-operator]: #select_pipe_operator
+
+[extend-pipe-operator]: #extend_pipe_operator
+
+[aggregate-pipe-operator]: #aggregate_pipe_operator
### `WHERE` pipe operator
@@ -292,15 +659,39 @@ a `QUALIFY` clause, use window functions inside a `WHERE` clause instead.
**Example**
-
-|> WHERE assignee_user.email = 'username@email.com'
-
+```sql
+(
+ SELECT 'apples' AS item, 2 AS sales
+ UNION ALL
+ SELECT 'bananas' AS item, 5 AS sales
+ UNION ALL
+ SELECT 'carrots' AS item, 8 AS sales
+)
+|> WHERE sales >= 3;
+
+/*---------+-------+
+ | item | sales |
+ +---------+-------+
+ | bananas | 5 |
+ | carrots | 8 |
+ +---------+-------*/
+```
+
+[where-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#where_clause
+
+[having-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#having_clause
+
+[qualify-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#qualify_clause
+
+[aggregate-pipe-operator]: #aggregate_pipe_operator
+
+[window-functions]: https://github.com/google/zetasql/blob/master/docs/window-function-calls.md
### `LIMIT` pipe operator
-|> LIMIT count [OFFSET skip_rows]
+|> LIMIT count [OFFSET skip_rows]
**Description**
@@ -311,32 +702,62 @@ to skip over rows. The `LIMIT` operator behaves the same as the
**Examples**
-
-|> LIMIT 10
-
+```sql
+(
+ SELECT 'apples' AS item, 2 AS sales
+ UNION ALL
+ SELECT 'bananas' AS item, 5 AS sales
+ UNION ALL
+ SELECT 'carrots' AS item, 8 AS sales
+)
+|> ORDER BY item
+|> LIMIT 1;
-
-|> LIMIT 10 OFFSET 2
-
+/*---------+-------+
+ | item | sales |
+ +---------+-------+
+ | apples | 2 |
+ +---------+-------*/
+```
+
+```sql
+(
+ SELECT 'apples' AS item, 2 AS sales
+ UNION ALL
+ SELECT 'bananas' AS item, 5 AS sales
+ UNION ALL
+ SELECT 'carrots' AS item, 8 AS sales
+)
+|> ORDER BY item
+|> LIMIT 1 OFFSET 2;
+
+/*---------+-------+
+ | item | sales |
+ +---------+-------+
+ | carrots | 8 |
+ +---------+-------*/
+```
+
+[limit-offset-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#limit_and_offset_clause
### `AGGREGATE` pipe operator
-- Full-table aggregation
-|> AGGREGATE aggregate_expression [[AS] alias] [, ...]
+|> AGGREGATE aggregate_expression [[AS] alias] [, ...]
-- Aggregation with grouping
-|> AGGREGATE [aggregate_expression [[AS] alias] [, ...]]
+|> AGGREGATE [aggregate_expression [[AS] alias] [, ...]]
GROUP BY groupable_items [[AS] alias] [, ...]
-- Aggregation with grouping and shorthand ordering syntax
-|> AGGREGATE [aggregate_expression [order_suffix] [[AS] alias] [, ...]]
- GROUP [AND ORDER] BY groupable_item [order_suffix] [[AS] alias] [, ...]
+|> AGGREGATE [aggregate_expression [[AS] alias] [order_suffix] [, ...]]
+ GROUP [AND ORDER] BY groupable_item [[AS] alias] [order_suffix] [, ...]
-order_suffix: {ASC | DESC} [{NULLS FIRST | NULLS LAST}]
+order_suffix: {ASC | DESC} [{NULLS FIRST | NULLS LAST}]
**Description**
@@ -381,35 +802,64 @@ Because output columns are fully specified by the `AGGREGATE` operator, the
`SELECT` operator isn't needed after the `AGGREGATE` operator unless
you want to produce a list of columns different from the default.
-**Examples**
-
-
--- Full-table aggregation
-|> AGGREGATE COUNT(*) AS row_count, SUM(num_users) AS total_users
-
-
--- Aggregation with grouping
-|> AGGREGATE COUNT(*) AS row_count, SUM(num_users) AS total_users,
- GROUP BY org_site, date
-
-
-The following examples compare aggregation in standard syntax and in pipe
-syntax:
+**Standard syntax**
-- Aggregation in standard syntax
-SELECT id, EXTRACT(MONTH FROM date) AS month, SUM(value) AS total
-FROM table
-GROUP BY id, month
+SELECT SUM(col1) AS total, col2, col3, col4...
+FROM table1
+GROUP BY col2, col3, col4...
+**Pipe syntax**
+
-- The same aggregation in pipe syntax
-FROM table
-|> AGGREGATE SUM(value) AS total
- GROUP BY id, EXTRACT(MONTH FROM date) AS month
+FROM table1
+|> AGGREGATE SUM(col1) AS total
+ GROUP BY col2, col3, col4...
+**Examples**
+
+```sql
+-- Full-table aggregation
+(
+ SELECT 'apples' AS item, 2 AS sales
+ UNION ALL
+ SELECT 'bananas' AS item, 5 AS sales
+ UNION ALL
+ SELECT 'apples' AS item, 7 AS sales
+)
+|> AGGREGATE COUNT(*) AS num_items, SUM(sales) AS total_sales;
+
+/*-----------+-------------+
+ | num_items | total_sales |
+ +-----------+-------------+
+ | 3 | 14 |
+ +-----------+-------------*/
+```
+
+```sql
+-- Aggregation with grouping
+(
+ SELECT 'apples' AS item, 2 AS sales
+ UNION ALL
+ SELECT 'bananas' AS item, 5 AS sales
+ UNION ALL
+ SELECT 'apples' AS item, 7 AS sales
+)
+|> AGGREGATE COUNT(*) AS num_items, SUM(sales) AS total_sales
+ GROUP BY item;
+
+/*---------+-----------+-------------+
+ | item | num_items | total_sales |
+ +---------+-----------+-------------+
+ | apples | 2 | 9 |
+ | bananas | 1 | 5 |
+ +---------+-----------+-------------*/
+```
+
#### Shorthand ordering syntax with `AGGREGATE`
@@ -419,8 +869,8 @@ of the `AGGREGATE` operator without repeating the column list:
-- Aggregation with grouping and shorthand ordering syntax
-|> AGGREGATE [aggregate_expression [order_suffix] [[AS] alias] [, ...]]
- GROUP [AND ORDER] BY groupable_item [order_suffix] [[AS] alias] [, ...]
+|> AGGREGATE [aggregate_expression [[AS] alias] [order_suffix] [, ...]]
+ GROUP [AND ORDER] BY groupable_item [[AS] alias] [order_suffix] [, ...]
order_suffix: {ASC | DESC} [{NULLS FIRST | NULLS LAST}]
@@ -441,23 +891,228 @@ the left-to-right output column order.
**Examples**
-
--- Order by all grouping columns.
-|> AGGREGATE COUNT(*)
- GROUP AND ORDER BY first_name, last_name DESC
-
+Consider the following table called `Produce`:
-The ordering in the previous example is equivalent to using
-`|> ORDER BY first_name, last_name DESC`.
+```sql
+/*---------+-------+-----------+
+ | item | sales | category |
+ +---------+-------+-----------+
+ | apples | 2 | fruit |
+ | carrots | 8 | vegetable |
+ | apples | 7 | fruit |
+ | bananas | 5 | fruit |
+ +---------+-------+-----------*/
+```
-
+The following two equivalent examples show you how to order by all grouping
+columns using the `GROUP AND ORDER BY` clause or a separate `ORDER BY` clause:
+
+```sql
+-- Order by all grouping columns using GROUP AND ORDER BY.
+FROM Produce
+|> AGGREGATE SUM(sales) AS total_sales
+ GROUP AND ORDER BY category, item DESC;
+
+/*-----------+---------+-------------+
+ | category | item | total_sales |
+ +-----------+---------+-------------+
+ | fruit | bananas | 5 |
+ | fruit | apples | 9 |
+ | vegetable | carrots | 8 |
+ +-----------+---------+-------------*/
+```
+
+```sql
+--Order by columns using ORDER BY after performing aggregation.
+FROM Produce
+|> AGGREGATE SUM(sales) AS total_sales
+ GROUP BY category, item
+|> ORDER BY category, item DESC;
+```
+
+You can add an ordering suffix to a column in the `AGGREGATE` list. Although the
+`AGGREGATE` list appears before the `GROUP BY` list in the query, ordering
+suffixes on columns in the `GROUP BY` list are applied first.
+
+```sql
+FROM Produce
+|> AGGREGATE SUM(sales) AS total_sales ASC
+ GROUP BY item, category DESC;
+
+/*---------+-----------+-------------+
+ | item | category | total_sales |
+ +---------+-----------+-------------+
+ | carrots | vegetable | 8 |
+ | bananas | fruit | 5 |
+ | apples | fruit | 9 |
+ +---------+-----------+-------------*/
+```
+
+The previous query is equivalent to the following:
+
+```sql
-- Order by specified grouping and aggregate columns.
-|> AGGREGATE COUNT(*) DESC
- GROUP BY first_name, last_name ASC
+FROM Produce
+|> AGGREGATE SUM(sales) AS total_sales
+ GROUP BY item, category
+|> ORDER BY category DESC, total_sales;
+```
+
+[group-by-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#group_by_clause
+
+[aggregate-functions]: https://github.com/google/zetasql/blob/master/docs/aggregate_functions.md
+
+[as-pipe-operator]: #as_pipe_operator
+
+[extend-pipe-operator]: #extend_pipe_operator
+
+[order-by-pipe-operator]: #order_by_pipe_operator
+
+### `DISTINCT` pipe operator
+
+
+
+|> DISTINCT
-The ordering in the previous example is equivalent to using
-`|> ORDER BY last_name ASC, COUNT(*) DESC`.
+**Description**
+
+Returns distinct rows from the input table, while preserving table aliases.
+
+Using the `DISTINCT` operator after a `SELECT` or `UNION ALL` clause is similar
+to using a [`SELECT DISTINCT` clause][select-distinct] or
+[`UNION DISTINCT` clause][union-operator] in standard syntax, but the `DISTINCT`
+pipe operator can be applied anywhere. The `DISTINCT` operator computes distinct
+rows based on the values of all visible columns. Pseudo-columns are ignored
+while computing distinct rows and are dropped from the output.
+
+The `DISTINCT` operator is similar to using a `|> SELECT DISTINCT *` clause, but
+doesn't expand value table fields, and preserves table aliases from the input.
+
+**Examples**
+
+```sql
+(
+ SELECT 'apples' AS item, 2 AS sales
+ UNION ALL
+ SELECT 'bananas' AS item, 5 AS sales
+ UNION ALL
+ SELECT 'bananas' AS item, 5 AS sales
+ UNION ALL
+ SELECT 'carrots' AS item, 8 AS sales
+)
+|> DISTINCT
+|> WHERE sales >= 3;
+
+/*---------+-------+
+ | item | sales |
+ +---------+-------+
+ | bananas | 5 |
+ | carrots | 8 |
+ +---------+-------*/
+```
+
+In the following example, the table alias `Produce` can be used in
+expressions after the `DISTINCT` pipe operator.
+
+```sql
+(
+ SELECT 'apples' AS item, 2 AS sales
+ UNION ALL
+ SELECT 'bananas' AS item, 5 AS sales
+ UNION ALL
+ SELECT 'bananas' AS item, 5 AS sales
+ UNION ALL
+ SELECT 'carrots' AS item, 8 AS sales
+)
+|> AS Produce
+|> DISTINCT
+|> SELECT Produce.item;
+
+/*---------+
+ | item |
+ +---------+
+ | apples |
+ | bananas |
+ | carrots |
+ +---------*/
+```
+
+By contrast, the table alias isn't visible after a `|> SELECT DISTINCT *`
+clause.
+
+```sql {.bad}
+-- Error, unrecognnized name: Produce
+(
+ SELECT 'apples' AS item, 2 AS sales
+ UNION ALL
+ SELECT 'bananas' AS item, 5 AS sales
+ UNION ALL
+ SELECT 'bananas' AS item, 5 AS sales
+ UNION ALL
+ SELECT 'carrots' AS item, 8 AS sales
+)
+|> AS Produce
+|> SELECT DISTINCT *
+|> SELECT Produce.item;
+```
+
+In the following examples, the `DISTINCT` operator doesn't expand value table
+fields and retains the `STRUCT` type in the result. By contrast, the
+`|> SELECT DISTINCT *` clause expands the `STRUCT` type into two columns.
+
+```sql
+SELECT AS STRUCT 1 x, 2 y
+|> DISTINCT;
+
+/*---------+
+ | $struct |
+ +---------+
+ {
+ x: 1,
+ y: 2
+ }
+ +----------*/
+```
+
+```sql
+SELECT AS STRUCT 1 x, 2 y
+|> SELECT DISTINCT *;
+
+/*---+---+
+ | x | y |
+ +---+---+
+ | 1 | 2 |
+ +---+---*/
+```
+
+The following examples show equivalent ways to generate the same results with
+distinct values from columns `a`, `b`, and `c`.
+
+```sql
+FROM table
+|> SELECT DISTINCT a, b, c;
+
+FROM table
+|> SELECT a, b, c
+|> DISTINCT;
+
+FROM table
+|> AGGREGATE
+ GROUP BY a, b, c;
+```
+
+[select-distinct]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#select_distinct
+
+[union-operator]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#union
+
+[aggregate-pipe-operator]: https://github.com/google/zetasql/blob/master/docs/pipe-syntax.md#aggregate_pipe_operator
+
+[using-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#using_clause
+
+[full-join]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#full_join
+
+[inner-join]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#inner_join
### `ORDER BY` pipe operator
@@ -479,10 +1134,420 @@ apply `ORDER BY` behavior more concisely as part of aggregation.
**Example**
-
-|> ORDER BY last_name DESC
+```sql
+(
+ SELECT 1 AS x
+ UNION ALL
+ SELECT 3 AS x
+ UNION ALL
+ SELECT 2 AS x
+)
+|> ORDER BY x DESC;
+
+/*---+
+ | x |
+ +---+
+ | 3 |
+ | 2 |
+ | 1 |
+ +---*/
+```
+
+[order-by-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#order_by_clause
+
+[aggregate-pipe-operator]: #aggregate_pipe_operator
+
+[shorthand-order-pipe-syntax]: #shorthand_order_pipe_syntax
+
+### `UNION` pipe operator
+
+
+
+query_expression
+|> UNION {ALL | DISTINCT} (query_expression) [, (query_expression), ...]
+**Description**
+
+Combines the results of the input queries to the left and right of the pipe
+operator by pairing columns from the results of each query and vertically
+concatenating them.
+
+The `UNION` pipe operator behaves the same as the
+[`UNION` set operator][union-operator] in standard syntax. However, in pipe
+syntax, the query expressions after the `UNION` pipe operator are enclosed in
+parentheses and separated by commas instead of by the repeated operator name.
+For example, `UNION ALL SELECT 1 UNION ALL SELECT 2` in standard syntax becomes
+`UNION ALL (SELECT 1), (SELECT 2)` in pipe syntax.
+
+The `UNION` pipe operator supports the same modifiers as the
+`UNION` set operator in standard syntax, such as the
+[`CORRESPONDING` modifier][corresponding-modifier].
+
+**Examples**
+
+```sql
+SELECT * FROM UNNEST(ARRAY[1, 2, 3]) AS number
+|> UNION ALL (SELECT 1);
+
+/*--------+
+ | number |
+ +--------+
+ | 1 |
+ | 2 |
+ | 3 |
+ | 1 |
+ +--------*/
+```
+
+```sql
+SELECT * FROM UNNEST(ARRAY[1, 2, 3]) AS number
+|> UNION DISTINCT (SELECT 1);
+
+/*--------+
+ | number |
+ +--------+
+ | 1 |
+ | 2 |
+ | 3 |
+ +--------*/
+```
+
+The following example shows multiple input queries to the right of the pipe
+operator:
+
+```sql
+SELECT * FROM UNNEST(ARRAY[1, 2, 3]) AS number
+|> UNION DISTINCT (SELECT 1), (SELECT 2);
+
+/*--------+
+ | number |
+ +--------+
+ | 1 |
+ | 2 |
+ | 3 |
+ +--------*/
+```
+
+#### `CORRESPONDING` modifier
+
+
+The [`UNION`][union-pipe-operator], [`INTERSECT`][intersect-pipe-operator], and
+[`EXCEPT`][except-pipe-operator] pipe operators support the `CORRESPONDING`
+modifier, which matches columns by name instead of by position in query results:
+
+
+query_expression
+|> [{FULL | LEFT}] [OUTER] {UNION | INTERSECT | EXCEPT} {ALL | DISTINCT}
+[STRICT] CORRESPONDING [BY (column_list)] (query_expression) [, (query_expression), ...]
+
+
+The `CORRESPONDING` modifier behaves the same as the
+[`CORRESPONDING` set operation][corresponding-operation] in standard syntax.
+However, in pipe syntax, the query expressions after the `CORRESPONDING`
+modifier are enclosed in parentheses. For example, `CORRESPONDING SELECT
+...` in standard syntax becomes `CORRESPONDING (SELECT ...)` in pipe syntax.
+
+**Examples**
+
+In the following example, the input queries to the left and right of the pipe
+operator specify the same column names in different orders. With the
+`CORRESPONDING` modifier, the results are matched by column name instead of in
+the order the columns were specified in the query.
+
+```sql
+SELECT 1 AS one_digit, 10 AS two_digit
+|> UNION ALL CORRESPONDING (SELECT 20 AS two_digit, 2 AS one_digit);
+
+/*-----------+-----------+
+ | one_digit | two_digit |
+ +-----------+-----------+
+ | 1 | 10 |
+ | 2 | 20 |
+ +-----------+-----------*/
+```
+
+By contrast, the following example without the `CORRESPONDING` modifier shows
+results in the order the columns were listed in the input queries instead of by
+column name.
+
+```sql
+SELECT 1 AS one_digit, 10 AS two_digit
+|> UNION ALL (SELECT 20 AS two_digit, 2 AS one_digit);
+
+/*-----------+-----------+
+ | one_digit | two_digit |
+ +-----------+-----------+
+ | 1 | 10 |
+ | 20 | 2 |
+ +-----------+-----------*/
+```
+
+The following example adds a `three_digit` column to the input query on the left
+of the pipe operator and a `four_digit` column to the input query on the right
+of the pipe operator. Because these columns aren't present in both queries, the
+new columns are excluded from the results.
+
+```sql
+SELECT 1 AS one_digit, 10 AS two_digit, 100 AS three_digit
+|> UNION ALL CORRESPONDING (SELECT 20 AS two_digit, 2 AS one_digit, 1000 AS four_digit);
+
+/*-----------+-----------+
+ | one_digit | two_digit |
+ +-----------+-----------+
+ | 1 | 10 |
+ | 2 | 20 |
+ +-----------+-----------*/
+```
+
+To include these differing columns, the following example uses `FULL OUTER` mode
+to populate `NULL` values for the missing column in each query.
+
+```sql
+SELECT 1 AS one_digit, 10 AS two_digit, 100 AS three_digit
+|> FULL OUTER UNION ALL CORRESPONDING
+ (SELECT 20 AS two_digit, 2 AS one_digit, 1000 AS four_digit);
+
+/*-----------+-----------+-------------+------------+
+ | one_digit | two_digit | three_digit | four_digit |
+ +-----------+-----------+-------------+------------+
+ | 1 | 10 | 100 | NULL |
+ | 2 | 20 | NULL | 1000 |
+ +-----------+-----------+-------------+------------*/
+```
+
+Similarly, the following example uses `LEFT OUTER` mode to include the new
+column from only the input query on the left of the pipe operator and populate a
+`NULL` value for the missing column in the input query on the right of the pipe
+operator.
+
+```sql
+SELECT 1 AS one_digit, 10 AS two_digit, 100 AS three_digit
+|> LEFT OUTER UNION ALL CORRESPONDING
+ (SELECT 20 AS two_digit, 2 AS one_digit, 1000 AS four_digit);
+
+/*-----------+-----------+-------------+
+ | one_digit | two_digit | three_digit |
+ +-----------+-----------+-------------+
+ | 1 | 10 | 100 |
+ | 2 | 20 | NULL |
+ +-----------+-----------+-------------*/
+```
+
+The following example uses the modifier `BY (column_list)` to return only the
+specified columns in the specified order.
+
+```sql
+SELECT 1 AS one_digit, 10 AS two_digit, 100 AS three_digit
+|> FULL OUTER UNION ALL CORRESPONDING BY (three_digit, two_digit)
+ (SELECT 20 AS two_digit, 2 AS one_digit, 1000 AS four_digit);
+
+/*-------------+-----------+
+ | three_digit | two_digit |
+ +-------------+-----------+
+ | 100 | 10 |
+ | NULL | 20 |
+ +-----------+-------------*/
+```
+
+The following examples use the `CORRESPONDING` modifier with the `INTERSECT` and
+`EXCEPT` pipe operators to likewise match the results by column name. The
+`INTERSECT` pipe operator returns common rows between the input queries, and the
+`EXCEPT` pipe operator returns rows that are present only in the input query to
+the left of the pipe operator.
+
+```sql
+WITH
+ NumbersTable AS (
+ SELECT 1 AS one_digit, 10 AS two_digit
+ UNION ALL
+ SELECT 2, 20
+ UNION ALL
+ SELECT 3, 30
+ )
+SELECT one_digit, two_digit FROM NumbersTable
+|> INTERSECT ALL CORRESPONDING (SELECT 10 AS two_digit, 1 AS one_digit);
+
+/*-----------+-----------+
+ | one_digit | two_digit |
+ +-----------+-----------+
+ | 1 | 10 |
+ +-----------+-----------*/
+```
+
+```sql
+WITH
+ NumbersTable AS (
+ SELECT 1 AS one_digit, 10 AS two_digit
+ UNION ALL
+ SELECT 2, 20
+ UNION ALL
+ SELECT 3, 30
+ )
+SELECT one_digit, two_digit FROM NumbersTable
+|> EXCEPT ALL CORRESPONDING (SELECT 10 AS two_digit, 1 AS one_digit);
+
+/*-----------+-----------+
+ | one_digit | two_digit |
+ +-----------+-----------+
+ | 2 | 20 |
+ | 3 | 30 |
+ +-----------+-----------*/
+```
+
+[union-operator]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#union
+
+[union-pipe-operator]: #union_pipe_operator
+
+[intersect-pipe-operator]: #intersect_pipe_operator
+
+[except-pipe-operator]: #except_pipe_operator
+
+[corresponding-operation]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#corresponding
+
+### `INTERSECT` pipe operator
+
+
+
+query_expression
+|> INTERSECT {ALL | DISTINCT} (query_expression) [, (query_expression), ...]
+
+
+**Description**
+
+Returns rows that are found in the results of both the input query to the left
+of the pipe operator and all input queries to the right of the pipe
+operator.
+
+The `INTERSECT` pipe operator behaves the same as the
+[`INTERSECT` set operator][intersect-operator] in standard syntax. However, in
+pipe syntax, the query expressions after the `INTERSECT` pipe operator are
+enclosed in parentheses and separated by commas instead of by the repeated
+operator name. For example, `INTERSECT ALL SELECT 1 INTERSECT ALL SELECT 2` in
+standard syntax becomes `INTERSECT ALL (SELECT 1), (SELECT 2)` in pipe syntax.
+
+The `INTERSECT` pipe operator supports the same modifiers as the
+`INTERSECT` set operator in standard syntax, such as the
+[`CORRESPONDING` modifier][corresponding-modifier].
+
+**Examples**
+
+```sql
+SELECT * FROM UNNEST(ARRAY[1, 2, 3, 3, 4]) AS number
+|> INTERSECT ALL (SELECT * FROM UNNEST(ARRAY[2, 3, 3, 5]) AS number);
+
+/*--------+
+ | number |
+ +--------+
+ | 2 |
+ | 3 |
+ | 3 |
+ +--------*/
+```
+
+```sql
+SELECT * FROM UNNEST(ARRAY[1, 2, 3, 3, 4]) AS number
+|> INTERSECT DISTINCT (SELECT * FROM UNNEST(ARRAY[2, 3, 3, 5]) AS number);
+
+/*--------+
+ | number |
+ +--------+
+ | 2 |
+ | 3 |
+ +--------*/
+```
+
+The following example shows multiple input queries to the right of the pipe
+operator:
+
+```sql
+SELECT * FROM UNNEST(ARRAY[1, 2, 3, 3, 4]) AS number
+|> INTERSECT DISTINCT
+ (SELECT * FROM UNNEST(ARRAY[2, 3, 3, 5]) AS number),
+ (SELECT * FROM UNNEST(ARRAY[3, 3, 4, 5]) AS number);
+
+/*--------+
+ | number |
+ +--------+
+ | 3 |
+ +--------*/
+```
+
+[intersect-operator]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#intersect
+
+[corresponding-modifier]: #corresponding_modifier
+
+### `EXCEPT` pipe operator
+
+
+
+query_expression
+|> EXCEPT {ALL | DISTINCT} (query_expression) [, (query_expression), ...]
+
+
+**Description**
+
+Returns rows from the input query to the left of the pipe operator that aren't
+present in any input queries to the right of the pipe operator.
+
+The `EXCEPT` pipe operator behaves the same as the
+[`EXCEPT` set operator][except-operator] in standard syntax. However, in pipe
+syntax, the query expressions after the `EXCEPT` pipe operator are enclosed in
+parentheses and separated by commas instead of by the repeated operator name.
+For example, `EXCEPT ALL SELECT 1 EXCEPT ALL SELECT 2` in standard syntax
+becomes `EXCEPT ALL (SELECT 1), (SELECT 2)` in pipe syntax.
+
+The `EXCEPT` pipe operator supports the same modifiers as the
+`EXCEPT` set operator in standard syntax, such as the
+[`CORRESPONDING` modifier][corresponding-modifier].
+
+**Examples**
+
+```sql
+SELECT * FROM UNNEST(ARRAY[1, 2, 3, 3, 4]) AS number
+|> EXCEPT ALL (SELECT * FROM UNNEST(ARRAY[1, 2]) AS number);
+
+/*--------+
+ | number |
+ +--------+
+ | 3 |
+ | 3 |
+ | 4 |
+ +--------*/
+```
+
+```sql
+SELECT * FROM UNNEST(ARRAY[1, 2, 3, 3, 4]) AS number
+|> EXCEPT DISTINCT (SELECT * FROM UNNEST(ARRAY[1, 2]) AS number);
+
+/*--------+
+ | number |
+ +--------+
+ | 3 |
+ | 4 |
+ +--------*/
+```
+
+The following example shows multiple input queries to the right of the pipe
+operator:
+
+```sql
+SELECT * FROM UNNEST(ARRAY[1, 2, 3, 3, 4]) AS number
+|> EXCEPT DISTINCT
+ (SELECT * FROM UNNEST(ARRAY[1, 2]) AS number),
+ (SELECT * FROM UNNEST(ARRAY[1, 4]) AS number);
+
+/*--------+
+ | number |
+ +--------+
+ | 3 |
+ +--------*/
+```
+
+[except-operator]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#except
+
+[corresponding-modifier]: #corresponding_modifier
+
### `JOIN` pipe operator
@@ -508,16 +1573,39 @@ input table is needed, perhaps to disambiguate columns in an
**Example**
-
-|> JOIN ticketing_system_table AS components
- ON bug_table.component_id = CAST(components.component_id AS int64)
-
+```sql
+(
+ SELECT 'apples' AS item, 2 AS sales
+ UNION ALL
+ SELECT 'bananas' AS item, 5 AS sales
+)
+|> AS produce_sales
+|> LEFT JOIN
+ (
+ SELECT "apples" AS item, 123 AS id
+ ) AS produce_data
+ ON produce_sales.item = produce_data.item
+|> SELECT produce_sales.item, sales, id;
+
+/*---------+-------+------+
+ | item | sales | id |
+ +---------+-------+------+
+ | apples | 2 | 123 |
+ | bananas | 5 | NULL |
+ +---------+-------+------*/
+```
+
+[join-operation]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#join_types
+
+[on-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#on_clause
+
+[as-pipe-operator]: #as_pipe_operator
### `CALL` pipe operator
-|> CALL table_function (argument [, ...]) [[AS] alias]
+|> CALL table_function (argument [, ...]) [[AS] alias]
**Description**
@@ -537,29 +1625,37 @@ Multiple TVFs can be called sequentially without using nested subqueries.
**Examples**
-
-|> CALL AddSuffix('*')
-|> CALL AddSuffix2(arg1, arg2, arg3)
-
+Suppose you have TVFs with the following parameters:
-The following examples compare a TVF call in standard syntax and in pipe syntax:
++ `tvf1(inputTable1 ANY TABLE, arg1 ANY TYPE)` and
++ `tvf2(arg2 ANY TYPE, arg3 ANY TYPE, inputTable2 ANY TABLE)`.
-
--- Call a TVF in standard syntax.
-FROM tvf( (SELECT * FROM table), arg1, arg2 )
-
+The following examples compare calling both TVFs on an input table
+by using standard syntax and by using the `CALL` pipe operator:
-
--- Call the same TVF in pipe syntax.
-SELECT * FROM table
-|> CALL tvf(arg1, arg2)
-
+```sql
+-- Call the TVFs without using the CALL operator.
+SELECT *
+FROM
+ tvf2(arg2, arg3, TABLE tvf1(TABLE input_table, arg1));
+```
+
+```sql
+-- Call the same TVFs with the CALL operator.
+FROM input_table
+|> CALL tvf1(arg1)
+|> CALL tvf2(arg2, arg3);
+```
+
+[tvf]: https://github.com/google/zetasql/blob/master/docs/table-functions.md#tvfs
+
+[table-function-calls]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#table_function_calls
### `WINDOW` pipe operator
-|> WINDOW window_expression [[AS] alias] [, ...]
+|> WINDOW window_expression [[AS] alias] [, ...]
**Description**
@@ -570,15 +1666,35 @@ existing rows, similar to calling [window functions][window-functions] in a
window expression must include a window function with an
[`OVER` clause][over-clause].
-The [`EXTEND` operator][extend-pipe-operator] is recommended for window
-functions instead of the `WINDOW` operator because it also supports window
-expressions and covers the same use cases.
+Alternatively, you can use the [`EXTEND` operator][extend-pipe-operator] for
+window functions.
**Example**
-
-|> WINDOW SUM(val) OVER (ORDER BY k)
-
+```sql
+(
+ SELECT 'apples' AS item, 2 AS sales
+ UNION ALL
+ SELECT 'bananas' AS item, 5 AS sales
+ UNION ALL
+ SELECT 'carrots' AS item, 8 AS sales
+)
+|> WINDOW SUM(sales) OVER() AS total_sales;
+
+/*---------+-------+-------------+
+ | item | sales | total_sales |
+ +---------+-------+-------------+
+ | apples | 2 | 15 |
+ | bananas | 5 | 15 |
+ | carrots | 8 | 15 |
+ +---------+-------+-------------*/
+```
+
+[window-functions]: https://github.com/google/zetasql/blob/master/docs/window-function-calls.md
+
+[over-clause]: https://github.com/google/zetasql/blob/master/docs/window-function-calls.md#def_over_clause
+
+[extend-pipe-operator]: #extend_pipe_operator
### `TABLESAMPLE` pipe operator
@@ -595,15 +1711,21 @@ standard syntax.
**Example**
-
-|> TABLESAMPLE BERNOULLI (0.1 PERCENT)
-
+The following example samples approximately 1% of data from a table called
+`LargeTable`:
+
+```sql
+FROM LargeTable
+|> TABLESAMPLE SYSTEM (1 PERCENT);
+```
+
+[tablesample-operator]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#tablesample_operator
### `PIVOT` pipe operator
-|> PIVOT (aggregate_expression FOR input_column IN (pivot_column [, ...])) [[AS] alias]
+|> PIVOT (aggregate_expression FOR input_column IN (pivot_column [, ...])) [[AS] alias]
**Description**
@@ -613,16 +1735,35 @@ Rotates rows into columns. The `PIVOT` pipe operator behaves the same as the
**Example**
-
-|> SELECT year, username, num_users
-|> PIVOT (SUM(num_users) FOR username IN ('Jeff', 'Jeffrey', 'Jeffery'))
-
+```sql
+(
+ SELECT "kale" AS product, 51 AS sales, "Q1" AS quarter
+ UNION ALL
+ SELECT "kale" AS product, 4 AS sales, "Q1" AS quarter
+ UNION ALL
+ SELECT "kale" AS product, 45 AS sales, "Q2" AS quarter
+ UNION ALL
+ SELECT "apple" AS product, 8 AS sales, "Q1" AS quarter
+ UNION ALL
+ SELECT "apple" AS product, 10 AS sales, "Q2" AS quarter
+)
+|> PIVOT(SUM(sales) FOR quarter IN ('Q1', 'Q2'));
+
+/*---------+----+------+
+ | product | Q1 | Q2 |
+ +---------+-----------+
+ | kale | 55 | 45 |
+ | apple | 8 | 10 |
+ +---------+----+------*/
+```
+
+[pivot-operator]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#pivot_operator
### `UNPIVOT` pipe operator
-|> UNPIVOT (values_column FOR name_column IN (column_to_unpivot [, ...])) [[AS] alias]
+|> UNPIVOT (values_column FOR name_column IN (column_to_unpivot [, ...])) [[AS] alias]
**Description**
@@ -632,10 +1773,25 @@ Rotates columns into rows. The `UNPIVOT` pipe operator behaves the same as the
**Example**
-
-|> UNPIVOT (count FOR user_location IN (London, Bangalore, Madrid))
-|> ORDER BY year, cnt
-
+```sql
+(
+ SELECT 'kale' as product, 55 AS Q1, 45 AS Q2
+ UNION ALL
+ SELECT 'apple', 8, 10
+)
+|> UNPIVOT(sales FOR quarter IN (Q1, Q2));
+
+/*---------+-------+---------+
+ | product | sales | quarter |
+ +---------+-------+---------+
+ | kale | 55 | Q1 |
+ | kale | 45 | Q2 |
+ | apple | 8 | Q1 |
+ | apple | 10 | Q2 |
+ +---------+-------+---------*/
+```
+
+[unpivot-operator]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#unpivot_operator
### `ASSERT` pipe operator
@@ -667,13 +1823,13 @@ a related feature that verifies that a single expression is true.
**Example**
-
+```sql
FROM table
|> ASSERT count != 0, "Count is zero for user", userId
|> SELECT total / count AS average
-
+```
-
+[assert-statement]: https://github.com/google/zetasql/blob/master/docs/debugging-statements.md#assert
[query-syntax]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md
@@ -681,79 +1837,13 @@ FROM table
[from-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#from_clause
-[select-as-value]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#select_as_value
-
-[select-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#select_list
-
-[select-star]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#select_
-
-[select-replace]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#select_replace
-
-[select-except]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#select_except
-
-[set-operators]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#set_operators
-
-[drop-statement]: https://github.com/google/zetasql/blob/master/docs/data-definition-language.md#drop
-
-[where-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#where_clause
-
-[aggregate-pipe-operator]: #aggregate_pipe_operator
-
-[extend-pipe-operator]: #extend_pipe_operator
-
-[select-pipe-operator]: #select_pipe_operator
-
-[set-pipe-operator]: #set_pipe_operator
-
-[drop-pipe-operator]: #drop_pipe_operator
-
-[rename-pipe-operator]: #rename_pipe_operator
-
-[as-pipe-operator]: #as_pipe_operator
-
-[order-by-pipe-operator]: #order_by_pipe_operator
-
-[shorthand-order-pipe-syntax]: #shorthand_order_pipe_syntax
-
-[limit-offset-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#limit_and_offset_clause
-
-[having-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#having_clause
-
-[qualify-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#qualify_clause
-
-[aggregate-functions]: https://github.com/google/zetasql/blob/master/docs/aggregate_functions.md
-
-[group-by-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#group_by_clause
-
-[order-by-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#order_by_clause
-
-[join-operation]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#join_types
-
-[on-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#on_clause
-
-[window-clause]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#window_clause
-
-[window-functions]: https://github.com/google/zetasql/blob/master/docs/window-function-calls.md
-
-[over-clause]: https://github.com/google/zetasql/blob/master/docs/window-function-calls.md#def_over_clause
-
-[window-pipe-operator]: #window_pipe_operator
-
-[table-function-calls]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#table-function-calls
-
-[tvf]: https://github.com/google/zetasql/blob/master/docs/table-functions.md#tvfs
-
-[tablesample-operator]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#tablesample_operator
-
[using-aliases]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#using_aliases
-[pivot-operator]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#pivot_operator
-
-[unpivot-operator]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#unpivot_operator
+[select-star]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#select_
-[assert-statement]: https://github.com/google/zetasql/blob/master/docs/debugging-statements.md#assert
+[query-comparison]: #query_comparison
[value-tables]: https://github.com/google/zetasql/blob/master/docs/data-model.md#value_tables
-
+[select-as-value]: https://github.com/google/zetasql/blob/master/docs/query-syntax.md#select_as_value
diff --git a/docs/protocol_buffer_functions.md b/docs/protocol_buffer_functions.md
index 29d89bb19..405d38512 100644
--- a/docs/protocol_buffer_functions.md
+++ b/docs/protocol_buffer_functions.md
@@ -6,7 +6,7 @@
ZetaSQL supports the following protocol buffer functions.
-### Function list
+## Function list
@@ -18,8 +18,7 @@ ZetaSQL supports the following protocol buffer functions.
- ENUM_VALUE_DESCRIPTOR_PROTO
-
+ | ENUM_VALUE_DESCRIPTOR_PROTO
|
Gets the enum value descriptor proto
@@ -28,8 +27,7 @@ ZetaSQL supports the following protocol buffer functions.
|
- EXTRACT
-
+ | EXTRACT
|
Extracts a value or metadata from a protocol buffer.
@@ -37,8 +35,7 @@ ZetaSQL supports the following protocol buffer functions.
|
- FILTER_FIELDS
-
+ | FILTER_FIELDS
|
Removed unwanted fields from a protocol buffer.
@@ -46,17 +43,16 @@ ZetaSQL supports the following protocol buffer functions.
|
- FROM_PROTO
-
+ | FROM_PROTO
|
Converts a protocol buffer value into ZetaSQL value.
+
|
- PROTO_DEFAULT_IF_NULL
-
+ | PROTO_DEFAULT_IF_NULL
|
Produces the default protocol buffer field value if the
@@ -66,8 +62,7 @@ ZetaSQL supports the following protocol buffer functions.
|
- PROTO_MAP_CONTAINS_KEY
-
+ | PROTO_MAP_CONTAINS_KEY
|
Checks if a protocol buffer map field contains a given key.
@@ -75,8 +70,7 @@ ZetaSQL supports the following protocol buffer functions.
|
- PROTO_MODIFY_MAP
-
+ | PROTO_MODIFY_MAP
|
Modifies a protocol buffer map field.
@@ -84,8 +78,7 @@ ZetaSQL supports the following protocol buffer functions.
|
- REPLACE_FIELDS
-
+ | REPLACE_FIELDS
|
Replaces the values in one or more protocol buffer fields.
@@ -93,18 +86,18 @@ ZetaSQL supports the following protocol buffer functions.
|
- TO_PROTO
-
+ | TO_PROTO
|
Converts a ZetaSQL value into a protocol buffer value.
+
|
-### `ENUM_VALUE_DESCRIPTOR_PROTO`
+## `ENUM_VALUE_DESCRIPTOR_PROTO`
```sql
ENUM_VALUE_DESCRIPTOR_PROTO(proto_enum)
@@ -155,7 +148,7 @@ FROM
*-------------------------------------------------+-----------------+----------------------------+---------------------------*/
```
-### `EXTRACT`
+## `EXTRACT`
```sql
@@ -321,7 +314,7 @@ FROM AlbumList;
[has-value]: https://github.com/google/zetasql/blob/master/docs/protocol-buffers.md#checking_if_a_field_has_a_value
-### `FILTER_FIELDS`
+## `FILTER_FIELDS`
```sql
FILTER_FIELDS(
@@ -575,7 +568,7 @@ FROM MusicAwards;
[querying-proto-extensions]: https://github.com/google/zetasql/blob/master/docs/protocol-buffers.md#extensions
-### `FROM_PROTO`
+## `FROM_PROTO`
```sql
FROM_PROTO(expression)
@@ -765,7 +758,7 @@ SELECT FROM_PROTO(DATE '2019-10-30')
*------------*/
```
-### `PROTO_DEFAULT_IF_NULL`
+## `PROTO_DEFAULT_IF_NULL`
```sql
PROTO_DEFAULT_IF_NULL(proto_field_expression)
@@ -832,7 +825,7 @@ default value for `country`.
*-----------------*/
```
-### `PROTO_MAP_CONTAINS_KEY`
+## `PROTO_MAP_CONTAINS_KEY`
```sql
PROTO_MAP_CONTAINS_KEY(proto_map_field_expression, key)
@@ -886,7 +879,7 @@ FROM
[proto-map]: https://developers.google.com/protocol-buffers/docs/proto3#maps
-### `PROTO_MODIFY_MAP`
+## `PROTO_MODIFY_MAP`
```sql
PROTO_MODIFY_MAP(proto_map_field_expression, key_value_pair[, ...])
@@ -952,7 +945,7 @@ FROM
[proto-map]: https://developers.google.com/protocol-buffers/docs/proto3#maps
-### `REPLACE_FIELDS`
+## `REPLACE_FIELDS`
```sql
REPLACE_FIELDS(proto_expression, value AS field_path [, ... ])
@@ -964,12 +957,12 @@ Returns a copy of a protocol buffer, replacing the values in one or more fields.
`field_path` is a delimited path to the protocol buffer field that is replaced.
When using `replace_fields`, the following limitations apply:
-+ If `value` is `NULL`, it un-sets `field_path` or returns an error if the last
- component of `field_path` is a required field.
-+ Replacing subfields will succeed only if the message containing the field is
- set.
-+ Replacing subfields of repeated field is not allowed.
-+ A repeated field can be replaced with an `ARRAY` value.
++ If `value` is `NULL`, it un-sets `field_path` or returns an error if the
+ last component of `field_path` is a required field.
++ Replacing subfields will succeed only if the message containing the field is
+ set.
++ Replacing subfields of repeated field isn't allowed.
++ A repeated field can be replaced with an `ARRAY` value.
**Return type**
@@ -1045,7 +1038,7 @@ AS proto;
*-----------------------------------------------------------------------------*/
```
-### `TO_PROTO`
+## `TO_PROTO`
```
TO_PROTO(expression)
diff --git a/docs/query-syntax.md b/docs/query-syntax.md
index 5a45d5813..542223840 100644
--- a/docs/query-syntax.md
+++ b/docs/query-syntax.md
@@ -61,7 +61,7 @@ SELECT
select_all:
[ expression. ]*
[ EXCEPT ( column_name [, ...] ) ]
- [ REPLACE ( expression [ AS ] column_name [, ...] ) ]
+ [ REPLACE ( expression AS column_name [, ...] ) ]
select_expression:
expression [ [ AS ] alias ]
@@ -230,6 +230,59 @@ A `SELECT DISTINCT` statement discards duplicate rows and returns only the
remaining rows. `SELECT DISTINCT` cannot return columns of the following types:
+ `PROTO`
++ `GRAPH_ELEMENT`
+
+In the following example, `SELECT DISTINCT` is used to produce distinct arrays:
+
+```sql
+WITH PlayerStats AS (
+ SELECT ['Coolidge', 'Adams'] as Name, 3 as PointsScored UNION ALL
+ SELECT ['Adams', 'Buchanan'], 0 UNION ALL
+ SELECT ['Coolidge', 'Adams'], 1 UNION ALL
+ SELECT ['Kiran', 'Noam'], 1)
+SELECT DISTINCT Name
+
+/*------------------+
+ | Name |
+ +------------------+
+ | [Coolidge,Adams] |
+ | [Adams,Buchanan] |
+ | [Kiran,Noam] |
+ +------------------*/
+```
+
+In the following example, `SELECT DISTINCT` is used to produce distinct structs:
+
+```sql
+WITH
+ PlayerStats AS (
+ SELECT
+ STRUCT(
+ 'Adams', 'Noam', 20) AS Player,
+ 3 AS PointsScored UNION ALL
+ SELECT ('Buchanan', 'Jie', 19), 0 UNION ALL
+ SELECT ('Adams', 'Noam', 20), 4 UNION ALL
+ SELECT ('Buchanan', 'Jie', 19), 13
+ )
+SELECT DISTINCT Player
+FROM PlayerStats;
+
+/*--------------------------+
+ | player |
+ +--------------------------+
+ | { |
+ | last_name: "Adams", |
+ | first_name: "Noam", |
+ | age: 20 |
+ | } |
+ +--------------------------+
+ | { |
+ | last_name: "Buchanan", |
+ | first_name: "Jie", |
+ | age: 19 |
+ | } |
+ +---------------------------*/
+```
### `SELECT ALL`
@@ -393,6 +446,7 @@ FROM from_clause[, ...]
| field_path
| unnest_operator
| cte_name [ as_alias ]
+ | graph_table_operator [ as_alias ]
}
as_alias:
@@ -418,6 +472,11 @@ See [UNPIVOT operator][unpivot-operator].
See [TABLESAMPLE operator][tablesample-operator].
+#### `graph_table_operator`
+
+
+See [GRAPH_TABLE operator][graph-table-operator].
+
#### `table_name`
The name (optionally qualified) of an existing table.
@@ -1627,6 +1686,13 @@ As a result, the output of the second query doesn't contain the `SUM` estimate
for the group `VN`. We refer to this as the _missing-group problem_, which
can be solved with [stratified sampling][stratified-sampling].
+## `GRAPH_TABLE` operator
+
+
+To learn more about this operator, see
+[`GRAPH_TABLE` operator][graph-table-operator] in the
+Graph Query Language (GQL) reference guide.
+
## Join operation
@@ -3053,9 +3119,78 @@ GROUP BY last_name;
+--------------+-----------*/
```
-`GROUP BY` can group rows by the value of an array.
-`GROUP BY` will group two arrays if they have the same number of elements and
-all corresponding elements are in the same groups, or if both arrays are `NULL`.
+You can use the `GROUP BY` clause with arrays. The following query executes
+because the array elements being grouped are the same length and group type:
+
+```sql
+WITH PlayerStats AS (
+ SELECT ['Coolidge', 'Adams'] as Name, 3 as PointsScored UNION ALL
+ SELECT ['Adams', 'Buchanan'], 0 UNION ALL
+ SELECT ['Coolidge', 'Adams'], 1 UNION ALL
+ SELECT ['Kiran', 'Noam'], 1)
+SELECT SUM(PointsScored) AS total_points, name
+FROM PlayerStats
+GROUP BY Name;
+
+/*--------------+------------------+
+ | total_points | name |
+ +--------------+------------------+
+ | 4 | [Coolidge,Adams] |
+ | 0 | [Adams,Buchanan] |
+ | 1 | [Kiran,Noam] |
+ +--------------+------------------*/
+```
+
+You can use the `GROUP BY` clause with structs. The following query executes
+because the struct fields being grouped have the same group types:
+
+```sql
+WITH
+ TeamStats AS (
+ SELECT
+ ARRAY>[
+ ('Adams', 'Noam', 20), ('Buchanan', 'Jie', 19)] AS Team,
+ 3 AS PointsScored
+ UNION ALL
+ SELECT [('Coolidge', 'Kiran', 21), ('Yang', 'Jason', 22)], 4
+ UNION ALL
+ SELECT [('Adams', 'Noam', 20), ('Buchanan', 'Jie', 19)], 10
+ UNION ALL
+ SELECT [('Coolidge', 'Kiran', 21), ('Yang', 'Jason', 22)], 7
+ )
+SELECT
+ SUM(PointsScored) AS total_points,
+ Team
+FROM TeamStats
+GROUP BY Team;
+
+/*--------------+--------------------------+
+ | total_points | teams |
+ +--------------+--------------------------+
+ | 13 | [{ |
+ | | last_name: "Adams", |
+ | | first_name: "Noam", |
+ | | age: 20 |
+ | | },{ |
+ | | last_name: "Buchanan",|
+ | | first_name: "Jie", |
+ | | age: 19 |
+ | | }] |
+ +-----------------------------------------+
+ | 11 | [{ |
+ | | last_name: "Coolidge",|
+ | | first_name: "Kiran", |
+ | | age: 21 |
+ | | },{ |
+ | | last_name: "Yang", |
+ | | first_name: "Jason", |
+ | | age: 22 |
+ | | }] |
+ +--------------+--------------------------*/
+```
+
+To learn more about the data types that are supported for values in the
+`GROUP BY` clause, see [Groupable data types][data-type-properties].
#### Group rows by column ordinals
@@ -6233,7 +6368,7 @@ Common items that this expression can represent include
tables,
[value tables][value-tables],
[subqueries][subquery-concepts],
-[table value functions (TVFs)][tvf-concepts],
+[table-valued functions (TVFs)][tvf-concepts],
[joins][query-joins], and [parenthesized joins][query-joins].
In general, a range variable provides a reference to the rows of a table
@@ -6811,6 +6946,10 @@ Results:
[privacy-view]: https://github.com/google/zetasql/blob/master/docs/analysis-rules.md#privacy_view
+[graph-table-operator]: https://github.com/google/zetasql/blob/master/docs/graph-sql-queries.md#graph_table_operator
+
+[graph-hints-gql]: https://github.com/google/zetasql/blob/master/docs/graph-query-statements.md#graph_hints
+
[coalesce]: https://github.com/google/zetasql/blob/master/docs/conditional_expressions.md#coalesce
diff --git a/docs/range-functions.md b/docs/range-functions.md
index 7618114d0..8e6200673 100644
--- a/docs/range-functions.md
+++ b/docs/range-functions.md
@@ -6,7 +6,7 @@
ZetaSQL supports the following range functions.
-### Function list
+## Function list
@@ -18,15 +18,17 @@ ZetaSQL supports the following range functions.
- GENERATE_RANGE_ARRAY
-
+ | GENERATE_RANGE_ARRAY
|
- Splits a range into an array of subranges. |
+
+ Splits a range into an array of subranges.
+ For more information, see Range functions.
+
+ |
- RANGE
-
+ | RANGE
|
Constructs a range of DATE , DATETIME ,
@@ -35,8 +37,18 @@ ZetaSQL supports the following range functions.
|
- RANGE_CONTAINS
+ | RANGE_BUCKET
+ |
+
+ Scans through a sorted array and returns the 0-based position
+ of a point's upper bound.
+ For more information, see Mathematical functions.
+
+ |
+
+
+ RANGE_CONTAINS
|
Signature 1: Checks if one range is in another range.
@@ -46,36 +58,34 @@ ZetaSQL supports the following range functions.
|
- RANGE_END
-
+ | RANGE_END
|
Gets the upper bound of a range. |
- RANGE_INTERSECT
-
+ | RANGE_INTERSECT
|
Gets a segment of two ranges that intersect. |
- RANGE_OVERLAPS
-
+ | RANGE_OVERLAPS
|
Checks if two ranges overlap. |
- RANGE_SESSIONIZE
-
+ | RANGE_SESSIONIZE
|
- Produces a table of sessionized ranges. |
+
+ Produces a table of sessionized ranges.
+
+ |
- RANGE_START
-
+ | RANGE_START
|
Gets the lower bound of a range. |
@@ -83,7 +93,7 @@ ZetaSQL supports the following range functions.
-### `GENERATE_RANGE_ARRAY`
+## `GENERATE_RANGE_ARRAY`
```sql
GENERATE_RANGE_ARRAY(range_to_split, step_interval)
@@ -214,7 +224,7 @@ SELECT GENERATE_RANGE_ARRAY(
[interval-single]: https://github.com/google/zetasql/blob/master/docs/data-types.md#single_datetime_part_interval
-### `RANGE`
+## `RANGE`
```sql
RANGE(lower_bound, upper_bound)
@@ -316,7 +326,7 @@ SELECT RANGE(DATE '2022-10-01', NULL) AS results;
[datetime-type]: https://github.com/google/zetasql/blob/master/docs/data-types.md#datetime_type
-### `RANGE_CONTAINS`
+## `RANGE_CONTAINS`
+ [Signature 1][range_contains-sig1]: Checks if every value in one range is
in another range.
@@ -441,7 +451,7 @@ SELECT RANGE_CONTAINS(
[range_contains-sig2]: #signature_2
-### `RANGE_END`
+## `RANGE_END`
```sql
RANGE_END(range_to_check)
@@ -492,7 +502,7 @@ SELECT RANGE_END(RANGE '[2022-12-01, UNBOUNDED)') AS results;
+------------*/
```
-### `RANGE_INTERSECT`
+## `RANGE_INTERSECT`
```sql
RANGE_INTERSECT(range_a, range_b)
@@ -558,7 +568,7 @@ SELECT RANGE_INTERSECT(
+-------------------------*/
```
-### `RANGE_OVERLAPS`
+## `RANGE_OVERLAPS`
```sql
RANGE_OVERLAPS(range_a, range_b)
@@ -635,7 +645,7 @@ SELECT RANGE_OVERLAPS(
[range-intersect]: #range_intersect
-### `RANGE_SESSIONIZE`
+## `RANGE_SESSIONIZE`
```sql
RANGE_SESSIONIZE(
@@ -788,7 +798,7 @@ GROUP BY emp_id, normalized;
+--------+--------------------------*/
```
-### `RANGE_START`
+## `RANGE_START`
```sql
RANGE_START(range_to_check)
diff --git a/docs/resolved_ast.md b/docs/resolved_ast.md
index 7e1b087c7..9b8ca788e 100755
--- a/docs/resolved_ast.md
+++ b/docs/resolved_ast.md
@@ -23,6 +23,11 @@ The generated classes are specified in
[https://github.com/google/zetasql/blob/master/zetasql/resolved_ast/gen_resolved_ast.py](https://github.com/google/zetasql/blob/master/zetasql/resolved_ast/gen_resolved_ast.py)
See that file for comments on specific nodes and fields.
+Additional non-generated classes that are documented separately:
+
+- [ResolvedColumn](https://github.com/google/zetasql/blob/master/zetasql/resolved_ast/resolved_column.h)
+- [ResolvedCollation](https://github.com/google/zetasql/blob/master/zetasql/resolved_ast/resolved_collation.h)
+
### Java
The base class `ResolvedNode` is defined in
@@ -32,10 +37,13 @@ The generated classes are specified in
[https://github.com/google/zetasql/blob/master/zetasql/resolved_ast/gen_resolved_ast.py](https://github.com/google/zetasql/blob/master/zetasql/resolved_ast/gen_resolved_ast.py)
See that file for comments on specific nodes and fields.
+Additional non-generated classes that are documented separately:
+
+- [ResolvedColumn](https://github.com/google/zetasql/blob/master/java/com/google/zetasql/resolvedast/ResolvedColumn.java)
+
## Resolved AST Node Hierarchy
-
-ResolvedNode
+ResolvedNode
ResolvedArgument
ResolvedAggregateHavingModifier
ResolvedAlterAction
@@ -92,7 +100,23 @@ See that file for comments on specific nodes and fields.
ResolvedFilterFieldArg
ResolvedFunctionArgument
ResolvedFunctionSignatureHolder
+ ResolvedGeneralizedQuerySubpipeline
ResolvedGeneratedColumnInfo
+ ResolvedGraphElementIdentifier
+ ResolvedGraphElementLabel
+ ResolvedGraphElementProperty
+ ResolvedGraphElementTable
+ ResolvedGraphLabelExpr
+ ResolvedGraphLabel
+ ResolvedGraphLabelNaryExpr
+ ResolvedGraphWildCardLabel
+ ResolvedGraphMakeArrayVariable
+ ResolvedGraphNodeTableReference
+ ResolvedGraphPathMode
+ ResolvedGraphPathPatternQuantifier
+ ResolvedGraphPathSearchPrefix
+ ResolvedGraphPropertyDeclaration
+ ResolvedGraphPropertyDefinition
ResolvedGroupingCall
ResolvedGroupingSetBase
ResolvedCube
@@ -103,13 +127,25 @@ See that file for comments on specific nodes and fields.
ResolvedIndexItem
ResolvedInlineLambda
ResolvedInsertRow
+ ResolvedLockMode
ResolvedMakeProtoField
+ ResolvedMatchRecognizePatternExpr
+ ResolvedMatchRecognizePatternAnchor
+ ResolvedMatchRecognizePatternEmpty
+ ResolvedMatchRecognizePatternOperation
+ ResolvedMatchRecognizePatternQuantification
+ ResolvedMatchRecognizePatternVariableRef
+ ResolvedMatchRecognizeVariableDefinition
+ ResolvedMeasureGroup
ResolvedMergeWhen
ResolvedModel
ResolvedObjectUnit
+ ResolvedOnConflictClause
ResolvedOption
ResolvedOrderByItem
ResolvedOutputColumn
+ ResolvedOutputSchema
+ ResolvedPipeIfCase
ResolvedPivotColumn
ResolvedPrivilege
ResolvedRecursionDepthModifier
@@ -117,6 +153,7 @@ See that file for comments on specific nodes and fields.
ResolvedReturningClause
ResolvedSequence
ResolvedSetOperationItem
+ ResolvedSubpipeline
ResolvedTableAndColumnInfo
ResolvedUnnestItem
ResolvedUnpivotArg
@@ -130,6 +167,7 @@ See that file for comments on specific nodes and fields.
ResolvedWithPartitionColumns
ResolvedExpr
ResolvedArgumentRef
+ ResolvedArrayAggregate
ResolvedCast
ResolvedCatalogColumnRef
ResolvedColumnRef
@@ -148,6 +186,9 @@ See that file for comments on specific nodes and fields.
ResolvedGetProtoField
ResolvedGetProtoOneof
ResolvedGetStructField
+ ResolvedGraphGetElementProperty
+ ResolvedGraphIsLabeledPredicate
+ ResolvedGraphMakeElement
ResolvedLiteral
ResolvedMakeProto
ResolvedMakeStruct
@@ -168,10 +209,25 @@ See that file for comments on specific nodes and fields.
ResolvedBarrierScan
ResolvedExecuteAsRoleScan
ResolvedFilterScan
+ ResolvedGraphPathScanBase
+ ResolvedGraphElementScan
+ ResolvedGraphEdgeScan
+ ResolvedGraphNodeScan
+ ResolvedGraphPathScan
+ ResolvedGraphRefScan
+ ResolvedGraphScanBase
+ ResolvedGraphLinearScan
+ ResolvedGraphScan
+ ResolvedGraphTableScan
ResolvedGroupRowsScan
ResolvedJoinScan
ResolvedLimitOffsetScan
+ ResolvedLogScan
+ ResolvedMatchRecognizeScan
ResolvedOrderByScan
+ ResolvedPipeExportDataScan
+ ResolvedPipeForkScan
+ ResolvedPipeIfScan
ResolvedPivotScan
ResolvedProjectScan
ResolvedRecursiveRefScan
@@ -181,6 +237,7 @@ See that file for comments on specific nodes and fields.
ResolvedSetOperationScan
ResolvedSingleRowScan
ResolvedStaticDescribeScan
+ ResolvedSubpipelineInputScan
ResolvedTVFScan
ResolvedTableScan
ResolvedUnpivotScan
@@ -222,6 +279,7 @@ See that file for comments on specific nodes and fields.
ResolvedCreateModelStmt
ResolvedCreatePrivilegeRestrictionStmt
ResolvedCreateProcedureStmt
+ ResolvedCreatePropertyGraphStmt
ResolvedCreateSchemaStmtBase
ResolvedCreateExternalSchemaStmt
ResolvedCreateSchemaStmt
@@ -251,6 +309,7 @@ See that file for comments on specific nodes and fields.
ResolvedExportDataStmt
ResolvedExportMetadataStmt
ResolvedExportModelStmt
+ ResolvedGeneralizedQueryStmt
ResolvedGrantOrRevokeStmt
ResolvedGrantStmt
ResolvedRevokeStmt
@@ -279,8 +338,7 @@ the base class.
### ResolvedArgument
-
-// Argument nodes are not self-contained nodes in the tree. They exist
+// Argument nodes are not self-contained nodes in the tree. They exist
// only to describe parameters to another node (e.g. columns in an OrderBy).
// This node is here for organizational purposes only, to cluster these
// argument nodes.
@@ -291,8 +349,7 @@ class ResolvedArgument : public ResolvedNode {
### ResolvedExpr
-
-class ResolvedExpr : public ResolvedNode {
+class ResolvedExpr : public ResolvedNode {
bool IsExpression() const final { return true; }
AnnotatedType annotated_type() const {
@@ -308,8 +365,7 @@ class ResolvedExpr : public ResolvedNode {
### ResolvedLiteral
-
-// Any literal value, including NULL literals.
+// Any literal value, including NULL literals.
// There is a special-cased constructor here that gets the type from the
// Value.
class ResolvedLiteral : public ResolvedExpr {
@@ -341,8 +397,7 @@ class ResolvedLiteral : public ResolvedExpr {
### ResolvedParameter
-
-class ResolvedParameter : public ResolvedExpr {
+class ResolvedParameter : public ResolvedExpr {
static const ResolvedNodeKind TYPE = RESOLVED_PARAMETER;
// If non-empty, the name of the parameter.
@@ -368,8 +423,7 @@ class ResolvedParameter : public ResolvedExpr {
### ResolvedExpressionColumn
-
-// This represents a column when analyzing a standalone expression.
+// This represents a column when analyzing a standalone expression.
// This is only used when the analyzer was called using AnalyzeExpression.
// Expression column names and types come from
// AnalyzerOptions::AddExpressionColumn.
@@ -384,8 +438,7 @@ class ResolvedExpressionColumn : public ResolvedExpr
### ResolvedCatalogColumnRef
-
-// An expression referencing a Column from the Catalog. This is used to
+// An expression referencing a Column from the Catalog. This is used to
// represent a column reference in an expression inside a DDL statement.
// The DDL statement will normally define the Table context, and the
// referenced Column should be a Column of that Table.
@@ -399,8 +452,7 @@ class ResolvedCatalogColumnRef : public ResolvedExpr
### ResolvedColumnRef
-
-// An expression referencing the value of some column visible in the
+// An expression referencing the value of some column visible in the
// current Scan node.
//
// If <is_correlated> is false, this must be a column visible in the Scan
@@ -423,8 +475,7 @@ class ResolvedColumnRef : public ResolvedExpr {
### ResolvedGroupingSetMultiColumn
-
-// A list of ResolvedColumnRef expression references that will be batched
+// A list of ResolvedColumnRef expression references that will be batched
// together in rollup/cube when being expanded to grouping sets. For
// example, ROLLUP((a, b), c) will be expanded to 3 grouping sets [(a, b, c),
// (a, b), ()], (a, b) is a multi-column.
@@ -444,8 +495,7 @@ class ResolvedGroupingSetMultiColumn : public Resolv
### ResolvedConstant
-
-// A reference to a named constant.
+// A reference to a named constant.
class ResolvedConstant : public ResolvedExpr {
static const ResolvedNodeKind TYPE = RESOLVED_CONSTANT;
@@ -457,8 +507,7 @@ class ResolvedConstant : public ResolvedExpr {
### ResolvedSystemVariable
-
-// A reference to a system variable.
+// A reference to a system variable.
class ResolvedSystemVariable : public ResolvedExpr {
static const ResolvedNodeKind TYPE = RESOLVED_SYSTEM_VARIABLE;
@@ -472,8 +521,7 @@ class ResolvedSystemVariable : public ResolvedExpr {
### ResolvedInlineLambda
-
-// A lambda expression, used inline as a function argument.
+// A lambda expression, used inline as a function argument.
// This represents both the definition of the lambda and the resolution of
// its templated signature and body for this function call.
// Currently can only be used as an argument of a function.
@@ -514,8 +562,7 @@ class ResolvedInlineLambda : public ResolvedArgument
### ResolvedSequence
-
-// Represents a sequence as a function argument
+// Represents a sequence as a function argument
class ResolvedSequence : public ResolvedArgument {
static const ResolvedNodeKind TYPE = RESOLVED_SEQUENCE;
@@ -526,8 +573,7 @@ class ResolvedSequence : public ResolvedArgument
### ResolvedFilterFieldArg
-
-// An argument to the FILTER_FIELDS() function which specifies a sign to show
+// An argument to the FILTER_FIELDS() function which specifies a sign to show
// inclusion/exclusion status and a field path to include or exclude.
class ResolvedFilterFieldArg : public ResolvedArgument {
static const ResolvedNodeKind TYPE = RESOLVED_FILTER_FIELD_ARG;
@@ -549,8 +595,7 @@ class ResolvedFilterFieldArg : public ResolvedArgume
### ResolvedFilterField
-
-// Represents a call to the FILTER_FIELDS() function. This function can be
+// Represents a call to the FILTER_FIELDS() function. This function can be
// used to modify a proto, prune fields and output the resulting proto. The
// SQL syntax for this function is
// FILTER_FIELDS(<expr>, <filter_field_arg_list>).
@@ -601,8 +646,7 @@ class ResolvedFilterField : public ResolvedExpr {
### ResolvedFunctionCallBase
-
-// Common base class for scalar and aggregate function calls.
+// Common base class for scalar and aggregate function calls.
//
// <argument_list> contains a list of arguments of type ResolvedExpr.
//
@@ -664,8 +708,7 @@ class ResolvedFunctionCallBase : public ResolvedExpr
### ResolvedFunctionCall
-
-// A regular function call. The signature will always have mode SCALAR.
+// A regular function call. The signature will always have mode SCALAR.
// Most scalar expressions show up as FunctionCalls using builtin signatures.
class ResolvedFunctionCall : public ResolvedFunctionCallBase {
static const ResolvedNodeKind TYPE = RESOLVED_FUNCTION_CALL;
@@ -689,8 +732,7 @@ class ResolvedFunctionCall : public Resolved
### ResolvedNonScalarFunctionCallBase
-
-// Common base class for analytic and aggregate function calls.
+// Common base class for analytic and aggregate function calls.
class ResolvedNonScalarFunctionCallBase : public ResolvedFunctionCallBase {
typedef ResolvedNonScalarFunctionCallBaseEnums::NullHandlingModifier NullHandlingModifier;
static const NullHandlingModifier DEFAULT_NULL_HANDLING = ResolvedNonScalarFunctionCallBaseEnums::DEFAULT_NULL_HANDLING;
@@ -742,8 +784,7 @@ class ResolvedNonScalarFunctionCallBase : public
-
-// An aggregate function call. The signature always has mode AGGREGATE.
+// An aggregate function call. The signature always has mode AGGREGATE.
//
// FEATURE_V_1_4_MULTILEVEL_AGGREGATION enables multi-level aggregate
// expressions (e.g. 'SUM(AVG(1 + X) GROUP BY key)' ). The GROUP BY modifier
@@ -798,9 +839,9 @@ class ResolvedAggregateFunctionCall : public ResolvedComputedColumnBase>>& group_by_list() const;
+ const std::vector<std::unique_ptr<const ResolvedComputedColumn>>& group_by_list() const;
int group_by_list_size() const;
- const ResolvedComputedColumnBase* group_by_list(int i) const;
+ const ResolvedComputedColumn* group_by_list(int i) const;
// Aggregate columns to compute over the grouping keys defined in
// `group_by_list`. Used only for multi-level aggregation, when
@@ -814,8 +855,7 @@ class ResolvedAggregateFunctionCall : public
-
-// An analytic function call. The mode of the function is either AGGREGATE
+// An analytic function call. The mode of the function is either AGGREGATE
// or ANALYTIC. This node only ever shows up as a function call in a
// ResolvedAnalyticFunctionGroup::analytic_function_list. Its associated
// window is not under this node but as a sibling of its parent node.
@@ -831,8 +871,7 @@ class ResolvedAnalyticFunctionCall : public
-
-// Describes a leaf extended cast of ResolvedExtendedCast. See the comment
+// Describes a leaf extended cast of ResolvedExtendedCast. See the comment
// for element_list field of ResolvedExtendedCast for more details.
class ResolvedExtendedCastElement : public ResolvedArgument {
static const ResolvedNodeKind TYPE = RESOLVED_EXTENDED_CAST_ELEMENT;
@@ -848,8 +887,7 @@ class ResolvedExtendedCastElement : public ResolvedA
### ResolvedExtendedCast
-
-// Describes overall cast operation between two values where at least one
+// Describes overall cast operation between two values where at least one
// value's type is or contains an extended type (e.g. on a struct field).
class ResolvedExtendedCast : public ResolvedArgument {
static const ResolvedNodeKind TYPE = RESOLVED_EXTENDED_CAST;
@@ -869,8 +907,7 @@ class ResolvedExtendedCast : public ResolvedArgument
### ResolvedCast
-
-// A cast expression, casting the result of an input expression to the
+// A cast expression, casting the result of an input expression to the
// target Type.
//
// Valid casts are defined in the CastHashMap (see cast.cc), which identifies
@@ -945,8 +982,7 @@ class ResolvedCast : public ResolvedExpr {
### ResolvedMakeStruct
-
-// Construct a struct value. <type> is always a StructType.
+// Construct a struct value. <type> is always a StructType.
// <field_list> matches 1:1 with the fields in <type> position-wise.
// Each field's type will match the corresponding field in <type>.
class ResolvedMakeStruct : public ResolvedExpr {
@@ -961,8 +997,7 @@ class ResolvedMakeStruct : public ResolvedExpr {
### ResolvedMakeProto
-
-// Construct a proto value. <type> is always a ProtoType.
+// Construct a proto value. <type> is always a ProtoType.
// <field_list> is a vector of (FieldDescriptor, expr) pairs to write.
// <field_list> will contain all required fields, and no duplicate fields.
class ResolvedMakeProto : public ResolvedExpr {
@@ -977,8 +1012,7 @@ class ResolvedMakeProto : public ResolvedExpr {
### ResolvedMakeProtoField
-
-// One field assignment in a ResolvedMakeProto expression.
+// One field assignment in a ResolvedMakeProto expression.
// The type of expr will match with the zetasql type of the proto field.
// The type will be an array iff the field is repeated.
//
@@ -1003,8 +1037,7 @@ class ResolvedMakeProtoField : public ResolvedArgume
### ResolvedGetStructField
-
-// Get the field in position <field_idx> (0-based) from <expr>, which has a
+// Get the field in position <field_idx> (0-based) from <expr>, which has a
// STRUCT type.
class ResolvedGetStructField : public ResolvedExpr {
static const ResolvedNodeKind TYPE = RESOLVED_GET_STRUCT_FIELD;
@@ -1023,8 +1056,7 @@ class ResolvedGetStructField : public ResolvedExpr {
### ResolvedGetProtoField
-
-class ResolvedGetProtoField : public ResolvedExpr {
+class ResolvedGetProtoField : public ResolvedExpr {
static const ResolvedNodeKind TYPE = RESOLVED_GET_PROTO_FIELD;
const ResolvedExpr* expr() const;
@@ -1093,8 +1125,7 @@ class ResolvedGetProtoField : public ResolvedExpr {
### ResolvedGetJsonField
-
-// Get the field <field_name> from <expr>, which has a JSON type.
+// Get the field <field_name> from <expr>, which has a JSON type.
class ResolvedGetJsonField : public ResolvedExpr {
static const ResolvedNodeKind TYPE = RESOLVED_GET_JSON_FIELD;
@@ -1107,8 +1138,7 @@ class ResolvedGetJsonField : public ResolvedExpr {
### ResolvedFlatten
-
-// Constructs an initial input ARRAY<T> from expr. For each get_field_list
+// Constructs an initial input ARRAY<T> from expr. For each get_field_list
// expr, we evaluate the expression once with each array input element and
// use the output as a new array of inputs for the next get_field_list expr.
// If the result of a single expr is an array, we add each element from that
@@ -1139,8 +1169,7 @@ class ResolvedFlatten : public ResolvedExpr {
### ResolvedFlattenedArg
-
-// Argument for a child of ResolvedFlatten. This is a placeholder to indicate
+// Argument for a child of ResolvedFlatten. This is a placeholder to indicate
// that it will be invoked once for each array element from ResolvedFlatten's
// expr or previous get_field_list entry.
class ResolvedFlattenedArg : public ResolvedExpr {
@@ -1152,8 +1181,7 @@ class ResolvedFlattenedArg : public ResolvedExpr {
### ResolvedReplaceFieldItem
-
-// An argument to the REPLACE_FIELDS() function which specifies a field path
+// An argument to the REPLACE_FIELDS() function which specifies a field path
// and a value that this field will be set to. The field path to be modified
// can be constructed through the <struct_index_path> and <proto_field_path>
// fields. These vectors correspond to field paths in a STRUCT and PROTO,
@@ -1205,8 +1233,7 @@ class ResolvedReplaceFieldItem : public ResolvedArgu
### ResolvedReplaceField
-
-// Represents a call to the REPLACE_FIELDS() function. This function
+// Represents a call to the REPLACE_FIELDS() function. This function
// can be used to copy a proto or struct, modify a few fields and
// output the resulting proto or struct. The SQL syntax for this
// function is REPLACE_FIELDS(<expr>, <replace_field_item_list>).
@@ -1236,8 +1263,7 @@ class ResolvedReplaceField : public ResolvedExpr {
### ResolvedGetProtoOneof
-
-// Returns a string value indicating which field of <oneof_descriptor> is
+// Returns a string value indicating which field of <oneof_descriptor> is
// set in the containing proto <expr>. If none of the fields are set, an
// empty string is returned.
//
@@ -1257,8 +1283,7 @@ class ResolvedGetProtoOneof : public ResolvedExpr {
### ResolvedSubqueryExpr
-
-// A subquery in an expression (not a FROM clause). The subquery runs
+// A subquery in an expression (not a FROM clause). The subquery runs
// in the context of a single input row and produces a single output value.
//
// Correlated subqueries can be thought of like functions, with a parameter
@@ -1366,8 +1391,7 @@ class ResolvedSubqueryExpr : public ResolvedExpr {
### ResolvedWithExpr
-
-// ResolvedWithExpr introduces one or more columns in <assignment_list> that
+// ResolvedWithExpr introduces one or more columns in <assignment_list> that
// can then be referenced inside <expr>. Each assigned expression is
// evaluated once, and each reference to that column in <expr> sees the same
// value even if the assigned expression is volatile. Multiple assignment
@@ -1392,8 +1416,7 @@ class ResolvedWithExpr : public ResolvedExpr {
### ResolvedScan
-
-// Common superclass for all Scans, which are nodes that produce rows
+// Common superclass for all Scans, which are nodes that produce rows
// (e.g. scans, joins, table subqueries). A query's FROM clause is
// represented as a single Scan that composes all input sources into
// a single row stream.
@@ -1434,8 +1457,7 @@ class ResolvedScan : public ResolvedNode {
### ResolvedExecuteAsRoleScan
-
-// This node provides the role context for its subtree. Currently, it only
+// This node provides the role context for its subtree. Currently, it only
// handles subtrees from inlined TVFs and VIEWs created with DEFINER rights.
// Due to the lack of a ROLE catalog object, we are using the original
// catalog object (VIEW or TVF) as a proxy. The engine is expected to extract
@@ -1472,8 +1494,7 @@ class ResolvedExecuteAsRoleScan : public ResolvedScan
-
-// Represents a machine learning model as a TVF argument.
+// Represents a machine learning model as a TVF argument.
// <model> is the machine learning model object known to the resolver
// (usually through the catalog).
class ResolvedModel : public ResolvedArgument {
@@ -1486,8 +1507,7 @@ class ResolvedModel : public ResolvedArgument {
### ResolvedConnection
-
-// Represents a connection object, which encapsulates engine-specific
+// Represents a connection object, which encapsulates engine-specific
// metadata used to connect to an external data source.
class ResolvedConnection : public ResolvedArgument {
static const ResolvedNodeKind TYPE = RESOLVED_CONNECTION;
@@ -1499,8 +1519,7 @@ class ResolvedConnection : public ResolvedArgument
### ResolvedDescriptor
-
-// Represents a descriptor object as a TVF argument.
+// Represents a descriptor object as a TVF argument.
// A descriptor is basically a list of unresolved column names, written
// DESCRIPTOR(column1, column2)
//
@@ -1526,8 +1545,7 @@ class ResolvedDescriptor : public ResolvedArgument
### ResolvedSingleRowScan
-
-// Scan that produces a single row with no columns. Used for queries without
+// Scan that produces a single row with no columns. Used for queries without
// a FROM clause, where all output comes from the select list.
class ResolvedSingleRowScan : public ResolvedScan {
static const ResolvedNodeKind TYPE = RESOLVED_SINGLE_ROW_SCAN;
@@ -1538,8 +1556,7 @@ class ResolvedSingleRowScan : public ResolvedScan {
### ResolvedTableScan
-
-// Scan a Table.
+// Scan a Table.
// The <column_list>[i] should be matched to a Table column by
// <table>.GetColumn(<column_index_list>[i]).
//
@@ -1579,14 +1596,15 @@ class ResolvedTableScan : public ResolvedScan {
int column_index_list(int i) const;
const std::string& alias() const;
+
+ const ResolvedLockMode* lock_mode() const;
};
### ResolvedJoinScan
-
-// A Scan that joins two input scans.
+// A Scan that joins two input scans.
// The <column_list> will contain columns selected from the union
// of the input scan's <column_lists>.
// When the join is a LEFT/RIGHT/FULL join, ResolvedColumns that came from
@@ -1620,8 +1638,7 @@ class ResolvedJoinScan : public